linux/drivers/infiniband/ulp/srpt/ib_srpt.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc.  All rights reserved.
   3 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 *
  33 */
  34
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/slab.h>
  38#include <linux/err.h>
  39#include <linux/ctype.h>
  40#include <linux/kthread.h>
  41#include <linux/string.h>
  42#include <linux/delay.h>
  43#include <linux/atomic.h>
  44#include <scsi/scsi_proto.h>
  45#include <scsi/scsi_tcq.h>
  46#include <target/target_core_base.h>
  47#include <target/target_core_fabric.h>
  48#include "ib_srpt.h"
  49
  50/* Name of this kernel module. */
  51#define DRV_NAME                "ib_srpt"
  52#define DRV_VERSION             "2.0.0"
  53#define DRV_RELDATE             "2011-02-14"
  54
  55#define SRPT_ID_STRING  "Linux SRP target"
  56
  57#undef pr_fmt
  58#define pr_fmt(fmt) DRV_NAME " " fmt
  59
  60MODULE_AUTHOR("Vu Pham and Bart Van Assche");
  61MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
  62                   "v" DRV_VERSION " (" DRV_RELDATE ")");
  63MODULE_LICENSE("Dual BSD/GPL");
  64
  65/*
  66 * Global Variables
  67 */
  68
  69static u64 srpt_service_guid;
  70static DEFINE_SPINLOCK(srpt_dev_lock);  /* Protects srpt_dev_list. */
  71static LIST_HEAD(srpt_dev_list);        /* List of srpt_device structures. */
  72
  73static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
  74module_param(srp_max_req_size, int, 0444);
  75MODULE_PARM_DESC(srp_max_req_size,
  76                 "Maximum size of SRP request messages in bytes.");
  77
  78static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
  79module_param(srpt_srq_size, int, 0444);
  80MODULE_PARM_DESC(srpt_srq_size,
  81                 "Shared receive queue (SRQ) size.");
  82
  83static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
  84{
  85        return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
  86}
  87module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
  88                  0444);
  89MODULE_PARM_DESC(srpt_service_guid,
  90                 "Using this value for ioc_guid, id_ext, and cm_listen_id"
  91                 " instead of using the node_guid of the first HCA.");
  92
  93static struct ib_client srpt_client;
  94static void srpt_release_cmd(struct se_cmd *se_cmd);
  95static void srpt_free_ch(struct kref *kref);
  96static int srpt_queue_status(struct se_cmd *cmd);
  97static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
  98static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
  99static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
 100
 101/*
 102 * The only allowed channel state changes are those that change the channel
 103 * state into a state with a higher numerical value. Hence the new > prev test.
 104 */
 105static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
 106{
 107        unsigned long flags;
 108        enum rdma_ch_state prev;
 109        bool changed = false;
 110
 111        spin_lock_irqsave(&ch->spinlock, flags);
 112        prev = ch->state;
 113        if (new > prev) {
 114                ch->state = new;
 115                changed = true;
 116        }
 117        spin_unlock_irqrestore(&ch->spinlock, flags);
 118
 119        return changed;
 120}
 121
 122/**
 123 * srpt_event_handler() - Asynchronous IB event callback function.
 124 *
 125 * Callback function called by the InfiniBand core when an asynchronous IB
 126 * event occurs. This callback may occur in interrupt context. See also
 127 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
 128 * Architecture Specification.
 129 */
 130static void srpt_event_handler(struct ib_event_handler *handler,
 131                               struct ib_event *event)
 132{
 133        struct srpt_device *sdev;
 134        struct srpt_port *sport;
 135
 136        sdev = ib_get_client_data(event->device, &srpt_client);
 137        if (!sdev || sdev->device != event->device)
 138                return;
 139
 140        pr_debug("ASYNC event= %d on device= %s\n", event->event,
 141                 sdev->device->name);
 142
 143        switch (event->event) {
 144        case IB_EVENT_PORT_ERR:
 145                if (event->element.port_num <= sdev->device->phys_port_cnt) {
 146                        sport = &sdev->port[event->element.port_num - 1];
 147                        sport->lid = 0;
 148                        sport->sm_lid = 0;
 149                }
 150                break;
 151        case IB_EVENT_PORT_ACTIVE:
 152        case IB_EVENT_LID_CHANGE:
 153        case IB_EVENT_PKEY_CHANGE:
 154        case IB_EVENT_SM_CHANGE:
 155        case IB_EVENT_CLIENT_REREGISTER:
 156        case IB_EVENT_GID_CHANGE:
 157                /* Refresh port data asynchronously. */
 158                if (event->element.port_num <= sdev->device->phys_port_cnt) {
 159                        sport = &sdev->port[event->element.port_num - 1];
 160                        if (!sport->lid && !sport->sm_lid)
 161                                schedule_work(&sport->work);
 162                }
 163                break;
 164        default:
 165                pr_err("received unrecognized IB event %d\n",
 166                       event->event);
 167                break;
 168        }
 169}
 170
 171/**
 172 * srpt_srq_event() - SRQ event callback function.
 173 */
 174static void srpt_srq_event(struct ib_event *event, void *ctx)
 175{
 176        pr_info("SRQ event %d\n", event->event);
 177}
 178
 179static const char *get_ch_state_name(enum rdma_ch_state s)
 180{
 181        switch (s) {
 182        case CH_CONNECTING:
 183                return "connecting";
 184        case CH_LIVE:
 185                return "live";
 186        case CH_DISCONNECTING:
 187                return "disconnecting";
 188        case CH_DRAINING:
 189                return "draining";
 190        case CH_DISCONNECTED:
 191                return "disconnected";
 192        }
 193        return "???";
 194}
 195
 196/**
 197 * srpt_qp_event() - QP event callback function.
 198 */
 199static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
 200{
 201        pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
 202                 event->event, ch->cm_id, ch->sess_name, ch->state);
 203
 204        switch (event->event) {
 205        case IB_EVENT_COMM_EST:
 206                ib_cm_notify(ch->cm_id, event->event);
 207                break;
 208        case IB_EVENT_QP_LAST_WQE_REACHED:
 209                pr_debug("%s-%d, state %s: received Last WQE event.\n",
 210                         ch->sess_name, ch->qp->qp_num,
 211                         get_ch_state_name(ch->state));
 212                break;
 213        default:
 214                pr_err("received unrecognized IB QP event %d\n", event->event);
 215                break;
 216        }
 217}
 218
 219/**
 220 * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
 221 *
 222 * @slot: one-based slot number.
 223 * @value: four-bit value.
 224 *
 225 * Copies the lowest four bits of value in element slot of the array of four
 226 * bit elements called c_list (controller list). The index slot is one-based.
 227 */
 228static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
 229{
 230        u16 id;
 231        u8 tmp;
 232
 233        id = (slot - 1) / 2;
 234        if (slot & 0x1) {
 235                tmp = c_list[id] & 0xf;
 236                c_list[id] = (value << 4) | tmp;
 237        } else {
 238                tmp = c_list[id] & 0xf0;
 239                c_list[id] = (value & 0xf) | tmp;
 240        }
 241}
 242
 243/**
 244 * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
 245 *
 246 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
 247 * Specification.
 248 */
 249static void srpt_get_class_port_info(struct ib_dm_mad *mad)
 250{
 251        struct ib_class_port_info *cif;
 252
 253        cif = (struct ib_class_port_info *)mad->data;
 254        memset(cif, 0, sizeof(*cif));
 255        cif->base_version = 1;
 256        cif->class_version = 1;
 257
 258        ib_set_cpi_resp_time(cif, 20);
 259        mad->mad_hdr.status = 0;
 260}
 261
 262/**
 263 * srpt_get_iou() - Write IOUnitInfo to a management datagram.
 264 *
 265 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
 266 * Specification. See also section B.7, table B.6 in the SRP r16a document.
 267 */
 268static void srpt_get_iou(struct ib_dm_mad *mad)
 269{
 270        struct ib_dm_iou_info *ioui;
 271        u8 slot;
 272        int i;
 273
 274        ioui = (struct ib_dm_iou_info *)mad->data;
 275        ioui->change_id = cpu_to_be16(1);
 276        ioui->max_controllers = 16;
 277
 278        /* set present for slot 1 and empty for the rest */
 279        srpt_set_ioc(ioui->controller_list, 1, 1);
 280        for (i = 1, slot = 2; i < 16; i++, slot++)
 281                srpt_set_ioc(ioui->controller_list, slot, 0);
 282
 283        mad->mad_hdr.status = 0;
 284}
 285
 286/**
 287 * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
 288 *
 289 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
 290 * Architecture Specification. See also section B.7, table B.7 in the SRP
 291 * r16a document.
 292 */
 293static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
 294                         struct ib_dm_mad *mad)
 295{
 296        struct srpt_device *sdev = sport->sdev;
 297        struct ib_dm_ioc_profile *iocp;
 298
 299        iocp = (struct ib_dm_ioc_profile *)mad->data;
 300
 301        if (!slot || slot > 16) {
 302                mad->mad_hdr.status
 303                        = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
 304                return;
 305        }
 306
 307        if (slot > 2) {
 308                mad->mad_hdr.status
 309                        = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
 310                return;
 311        }
 312
 313        memset(iocp, 0, sizeof(*iocp));
 314        strcpy(iocp->id_string, SRPT_ID_STRING);
 315        iocp->guid = cpu_to_be64(srpt_service_guid);
 316        iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
 317        iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
 318        iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
 319        iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
 320        iocp->subsys_device_id = 0x0;
 321        iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
 322        iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
 323        iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
 324        iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
 325        iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
 326        iocp->rdma_read_depth = 4;
 327        iocp->send_size = cpu_to_be32(srp_max_req_size);
 328        iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
 329                                          1U << 24));
 330        iocp->num_svc_entries = 1;
 331        iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
 332                SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
 333
 334        mad->mad_hdr.status = 0;
 335}
 336
 337/**
 338 * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
 339 *
 340 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
 341 * Specification. See also section B.7, table B.8 in the SRP r16a document.
 342 */
 343static void srpt_get_svc_entries(u64 ioc_guid,
 344                                 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
 345{
 346        struct ib_dm_svc_entries *svc_entries;
 347
 348        WARN_ON(!ioc_guid);
 349
 350        if (!slot || slot > 16) {
 351                mad->mad_hdr.status
 352                        = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
 353                return;
 354        }
 355
 356        if (slot > 2 || lo > hi || hi > 1) {
 357                mad->mad_hdr.status
 358                        = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
 359                return;
 360        }
 361
 362        svc_entries = (struct ib_dm_svc_entries *)mad->data;
 363        memset(svc_entries, 0, sizeof(*svc_entries));
 364        svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
 365        snprintf(svc_entries->service_entries[0].name,
 366                 sizeof(svc_entries->service_entries[0].name),
 367                 "%s%016llx",
 368                 SRP_SERVICE_NAME_PREFIX,
 369                 ioc_guid);
 370
 371        mad->mad_hdr.status = 0;
 372}
 373
 374/**
 375 * srpt_mgmt_method_get() - Process a received management datagram.
 376 * @sp:      source port through which the MAD has been received.
 377 * @rq_mad:  received MAD.
 378 * @rsp_mad: response MAD.
 379 */
 380static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
 381                                 struct ib_dm_mad *rsp_mad)
 382{
 383        u16 attr_id;
 384        u32 slot;
 385        u8 hi, lo;
 386
 387        attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
 388        switch (attr_id) {
 389        case DM_ATTR_CLASS_PORT_INFO:
 390                srpt_get_class_port_info(rsp_mad);
 391                break;
 392        case DM_ATTR_IOU_INFO:
 393                srpt_get_iou(rsp_mad);
 394                break;
 395        case DM_ATTR_IOC_PROFILE:
 396                slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
 397                srpt_get_ioc(sp, slot, rsp_mad);
 398                break;
 399        case DM_ATTR_SVC_ENTRIES:
 400                slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
 401                hi = (u8) ((slot >> 8) & 0xff);
 402                lo = (u8) (slot & 0xff);
 403                slot = (u16) ((slot >> 16) & 0xffff);
 404                srpt_get_svc_entries(srpt_service_guid,
 405                                     slot, hi, lo, rsp_mad);
 406                break;
 407        default:
 408                rsp_mad->mad_hdr.status =
 409                    cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
 410                break;
 411        }
 412}
 413
 414/**
 415 * srpt_mad_send_handler() - Post MAD-send callback function.
 416 */
 417static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
 418                                  struct ib_mad_send_wc *mad_wc)
 419{
 420        rdma_destroy_ah(mad_wc->send_buf->ah);
 421        ib_free_send_mad(mad_wc->send_buf);
 422}
 423
 424/**
 425 * srpt_mad_recv_handler() - MAD reception callback function.
 426 */
 427static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
 428                                  struct ib_mad_send_buf *send_buf,
 429                                  struct ib_mad_recv_wc *mad_wc)
 430{
 431        struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
 432        struct ib_ah *ah;
 433        struct ib_mad_send_buf *rsp;
 434        struct ib_dm_mad *dm_mad;
 435
 436        if (!mad_wc || !mad_wc->recv_buf.mad)
 437                return;
 438
 439        ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
 440                                  mad_wc->recv_buf.grh, mad_agent->port_num);
 441        if (IS_ERR(ah))
 442                goto err;
 443
 444        BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
 445
 446        rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
 447                                 mad_wc->wc->pkey_index, 0,
 448                                 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
 449                                 GFP_KERNEL,
 450                                 IB_MGMT_BASE_VERSION);
 451        if (IS_ERR(rsp))
 452                goto err_rsp;
 453
 454        rsp->ah = ah;
 455
 456        dm_mad = rsp->mad;
 457        memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
 458        dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
 459        dm_mad->mad_hdr.status = 0;
 460
 461        switch (mad_wc->recv_buf.mad->mad_hdr.method) {
 462        case IB_MGMT_METHOD_GET:
 463                srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
 464                break;
 465        case IB_MGMT_METHOD_SET:
 466                dm_mad->mad_hdr.status =
 467                    cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
 468                break;
 469        default:
 470                dm_mad->mad_hdr.status =
 471                    cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
 472                break;
 473        }
 474
 475        if (!ib_post_send_mad(rsp, NULL)) {
 476                ib_free_recv_mad(mad_wc);
 477                /* will destroy_ah & free_send_mad in send completion */
 478                return;
 479        }
 480
 481        ib_free_send_mad(rsp);
 482
 483err_rsp:
 484        rdma_destroy_ah(ah);
 485err:
 486        ib_free_recv_mad(mad_wc);
 487}
 488
 489/**
 490 * srpt_refresh_port() - Configure a HCA port.
 491 *
 492 * Enable InfiniBand management datagram processing, update the cached sm_lid,
 493 * lid and gid values, and register a callback function for processing MADs
 494 * on the specified port.
 495 *
 496 * Note: It is safe to call this function more than once for the same port.
 497 */
 498static int srpt_refresh_port(struct srpt_port *sport)
 499{
 500        struct ib_mad_reg_req reg_req;
 501        struct ib_port_modify port_modify;
 502        struct ib_port_attr port_attr;
 503        __be16 *guid;
 504        int ret;
 505
 506        memset(&port_modify, 0, sizeof(port_modify));
 507        port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
 508        port_modify.clr_port_cap_mask = 0;
 509
 510        ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
 511        if (ret)
 512                goto err_mod_port;
 513
 514        ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
 515        if (ret)
 516                goto err_query_port;
 517
 518        sport->sm_lid = port_attr.sm_lid;
 519        sport->lid = port_attr.lid;
 520
 521        ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid,
 522                           NULL);
 523        if (ret)
 524                goto err_query_port;
 525
 526        sport->port_guid_wwn.priv = sport;
 527        guid = (__be16 *)&sport->gid.global.interface_id;
 528        snprintf(sport->port_guid, sizeof(sport->port_guid),
 529                 "%04x:%04x:%04x:%04x",
 530                 be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
 531                 be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
 532        sport->port_gid_wwn.priv = sport;
 533        snprintf(sport->port_gid, sizeof(sport->port_gid),
 534                 "0x%016llx%016llx",
 535                 be64_to_cpu(sport->gid.global.subnet_prefix),
 536                 be64_to_cpu(sport->gid.global.interface_id));
 537
 538        if (!sport->mad_agent) {
 539                memset(&reg_req, 0, sizeof(reg_req));
 540                reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
 541                reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
 542                set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
 543                set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
 544
 545                sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
 546                                                         sport->port,
 547                                                         IB_QPT_GSI,
 548                                                         &reg_req, 0,
 549                                                         srpt_mad_send_handler,
 550                                                         srpt_mad_recv_handler,
 551                                                         sport, 0);
 552                if (IS_ERR(sport->mad_agent)) {
 553                        ret = PTR_ERR(sport->mad_agent);
 554                        sport->mad_agent = NULL;
 555                        goto err_query_port;
 556                }
 557        }
 558
 559        return 0;
 560
 561err_query_port:
 562
 563        port_modify.set_port_cap_mask = 0;
 564        port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
 565        ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
 566
 567err_mod_port:
 568
 569        return ret;
 570}
 571
 572/**
 573 * srpt_unregister_mad_agent() - Unregister MAD callback functions.
 574 *
 575 * Note: It is safe to call this function more than once for the same device.
 576 */
 577static void srpt_unregister_mad_agent(struct srpt_device *sdev)
 578{
 579        struct ib_port_modify port_modify = {
 580                .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
 581        };
 582        struct srpt_port *sport;
 583        int i;
 584
 585        for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
 586                sport = &sdev->port[i - 1];
 587                WARN_ON(sport->port != i);
 588                if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
 589                        pr_err("disabling MAD processing failed.\n");
 590                if (sport->mad_agent) {
 591                        ib_unregister_mad_agent(sport->mad_agent);
 592                        sport->mad_agent = NULL;
 593                }
 594        }
 595}
 596
 597/**
 598 * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
 599 */
 600static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
 601                                           int ioctx_size, int dma_size,
 602                                           enum dma_data_direction dir)
 603{
 604        struct srpt_ioctx *ioctx;
 605
 606        ioctx = kmalloc(ioctx_size, GFP_KERNEL);
 607        if (!ioctx)
 608                goto err;
 609
 610        ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
 611        if (!ioctx->buf)
 612                goto err_free_ioctx;
 613
 614        ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
 615        if (ib_dma_mapping_error(sdev->device, ioctx->dma))
 616                goto err_free_buf;
 617
 618        return ioctx;
 619
 620err_free_buf:
 621        kfree(ioctx->buf);
 622err_free_ioctx:
 623        kfree(ioctx);
 624err:
 625        return NULL;
 626}
 627
 628/**
 629 * srpt_free_ioctx() - Free an SRPT I/O context structure.
 630 */
 631static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
 632                            int dma_size, enum dma_data_direction dir)
 633{
 634        if (!ioctx)
 635                return;
 636
 637        ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
 638        kfree(ioctx->buf);
 639        kfree(ioctx);
 640}
 641
 642/**
 643 * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
 644 * @sdev:       Device to allocate the I/O context ring for.
 645 * @ring_size:  Number of elements in the I/O context ring.
 646 * @ioctx_size: I/O context size.
 647 * @dma_size:   DMA buffer size.
 648 * @dir:        DMA data direction.
 649 */
 650static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
 651                                int ring_size, int ioctx_size,
 652                                int dma_size, enum dma_data_direction dir)
 653{
 654        struct srpt_ioctx **ring;
 655        int i;
 656
 657        WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
 658                && ioctx_size != sizeof(struct srpt_send_ioctx));
 659
 660        ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
 661        if (!ring)
 662                goto out;
 663        for (i = 0; i < ring_size; ++i) {
 664                ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
 665                if (!ring[i])
 666                        goto err;
 667                ring[i]->index = i;
 668        }
 669        goto out;
 670
 671err:
 672        while (--i >= 0)
 673                srpt_free_ioctx(sdev, ring[i], dma_size, dir);
 674        kfree(ring);
 675        ring = NULL;
 676out:
 677        return ring;
 678}
 679
 680/**
 681 * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
 682 */
 683static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
 684                                 struct srpt_device *sdev, int ring_size,
 685                                 int dma_size, enum dma_data_direction dir)
 686{
 687        int i;
 688
 689        for (i = 0; i < ring_size; ++i)
 690                srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
 691        kfree(ioctx_ring);
 692}
 693
 694/**
 695 * srpt_get_cmd_state() - Get the state of a SCSI command.
 696 */
 697static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
 698{
 699        enum srpt_command_state state;
 700        unsigned long flags;
 701
 702        BUG_ON(!ioctx);
 703
 704        spin_lock_irqsave(&ioctx->spinlock, flags);
 705        state = ioctx->state;
 706        spin_unlock_irqrestore(&ioctx->spinlock, flags);
 707        return state;
 708}
 709
 710/**
 711 * srpt_set_cmd_state() - Set the state of a SCSI command.
 712 *
 713 * Does not modify the state of aborted commands. Returns the previous command
 714 * state.
 715 */
 716static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
 717                                                  enum srpt_command_state new)
 718{
 719        enum srpt_command_state previous;
 720        unsigned long flags;
 721
 722        BUG_ON(!ioctx);
 723
 724        spin_lock_irqsave(&ioctx->spinlock, flags);
 725        previous = ioctx->state;
 726        if (previous != SRPT_STATE_DONE)
 727                ioctx->state = new;
 728        spin_unlock_irqrestore(&ioctx->spinlock, flags);
 729
 730        return previous;
 731}
 732
 733/**
 734 * srpt_test_and_set_cmd_state() - Test and set the state of a command.
 735 *
 736 * Returns true if and only if the previous command state was equal to 'old'.
 737 */
 738static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
 739                                        enum srpt_command_state old,
 740                                        enum srpt_command_state new)
 741{
 742        enum srpt_command_state previous;
 743        unsigned long flags;
 744
 745        WARN_ON(!ioctx);
 746        WARN_ON(old == SRPT_STATE_DONE);
 747        WARN_ON(new == SRPT_STATE_NEW);
 748
 749        spin_lock_irqsave(&ioctx->spinlock, flags);
 750        previous = ioctx->state;
 751        if (previous == old)
 752                ioctx->state = new;
 753        spin_unlock_irqrestore(&ioctx->spinlock, flags);
 754        return previous == old;
 755}
 756
 757/**
 758 * srpt_post_recv() - Post an IB receive request.
 759 */
 760static int srpt_post_recv(struct srpt_device *sdev,
 761                          struct srpt_recv_ioctx *ioctx)
 762{
 763        struct ib_sge list;
 764        struct ib_recv_wr wr, *bad_wr;
 765
 766        BUG_ON(!sdev);
 767        list.addr = ioctx->ioctx.dma;
 768        list.length = srp_max_req_size;
 769        list.lkey = sdev->pd->local_dma_lkey;
 770
 771        ioctx->ioctx.cqe.done = srpt_recv_done;
 772        wr.wr_cqe = &ioctx->ioctx.cqe;
 773        wr.next = NULL;
 774        wr.sg_list = &list;
 775        wr.num_sge = 1;
 776
 777        return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
 778}
 779
 780/**
 781 * srpt_zerolength_write() - Perform a zero-length RDMA write.
 782 *
 783 * A quote from the InfiniBand specification: C9-88: For an HCA responder
 784 * using Reliable Connection service, for each zero-length RDMA READ or WRITE
 785 * request, the R_Key shall not be validated, even if the request includes
 786 * Immediate data.
 787 */
 788static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
 789{
 790        struct ib_send_wr wr, *bad_wr;
 791
 792        memset(&wr, 0, sizeof(wr));
 793        wr.opcode = IB_WR_RDMA_WRITE;
 794        wr.wr_cqe = &ch->zw_cqe;
 795        wr.send_flags = IB_SEND_SIGNALED;
 796        return ib_post_send(ch->qp, &wr, &bad_wr);
 797}
 798
 799static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
 800{
 801        struct srpt_rdma_ch *ch = cq->cq_context;
 802
 803        if (wc->status == IB_WC_SUCCESS) {
 804                srpt_process_wait_list(ch);
 805        } else {
 806                if (srpt_set_ch_state(ch, CH_DISCONNECTED))
 807                        schedule_work(&ch->release_work);
 808                else
 809                        WARN_ONCE(1, "%s-%d\n", ch->sess_name, ch->qp->qp_num);
 810        }
 811}
 812
 813static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
 814                struct srp_direct_buf *db, int nbufs, struct scatterlist **sg,
 815                unsigned *sg_cnt)
 816{
 817        enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
 818        struct srpt_rdma_ch *ch = ioctx->ch;
 819        struct scatterlist *prev = NULL;
 820        unsigned prev_nents;
 821        int ret, i;
 822
 823        if (nbufs == 1) {
 824                ioctx->rw_ctxs = &ioctx->s_rw_ctx;
 825        } else {
 826                ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs),
 827                        GFP_KERNEL);
 828                if (!ioctx->rw_ctxs)
 829                        return -ENOMEM;
 830        }
 831
 832        for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
 833                struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
 834                u64 remote_addr = be64_to_cpu(db->va);
 835                u32 size = be32_to_cpu(db->len);
 836                u32 rkey = be32_to_cpu(db->key);
 837
 838                ret = target_alloc_sgl(&ctx->sg, &ctx->nents, size, false,
 839                                i < nbufs - 1);
 840                if (ret)
 841                        goto unwind;
 842
 843                ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
 844                                ctx->sg, ctx->nents, 0, remote_addr, rkey, dir);
 845                if (ret < 0) {
 846                        target_free_sgl(ctx->sg, ctx->nents);
 847                        goto unwind;
 848                }
 849
 850                ioctx->n_rdma += ret;
 851                ioctx->n_rw_ctx++;
 852
 853                if (prev) {
 854                        sg_unmark_end(&prev[prev_nents - 1]);
 855                        sg_chain(prev, prev_nents + 1, ctx->sg);
 856                } else {
 857                        *sg = ctx->sg;
 858                }
 859
 860                prev = ctx->sg;
 861                prev_nents = ctx->nents;
 862
 863                *sg_cnt += ctx->nents;
 864        }
 865
 866        return 0;
 867
 868unwind:
 869        while (--i >= 0) {
 870                struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
 871
 872                rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
 873                                ctx->sg, ctx->nents, dir);
 874                target_free_sgl(ctx->sg, ctx->nents);
 875        }
 876        if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
 877                kfree(ioctx->rw_ctxs);
 878        return ret;
 879}
 880
 881static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch,
 882                                    struct srpt_send_ioctx *ioctx)
 883{
 884        enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
 885        int i;
 886
 887        for (i = 0; i < ioctx->n_rw_ctx; i++) {
 888                struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
 889
 890                rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
 891                                ctx->sg, ctx->nents, dir);
 892                target_free_sgl(ctx->sg, ctx->nents);
 893        }
 894
 895        if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
 896                kfree(ioctx->rw_ctxs);
 897}
 898
 899static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
 900{
 901        /*
 902         * The pointer computations below will only be compiled correctly
 903         * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
 904         * whether srp_cmd::add_data has been declared as a byte pointer.
 905         */
 906        BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) &&
 907                     !__same_type(srp_cmd->add_data[0], (u8)0));
 908
 909        /*
 910         * According to the SRP spec, the lower two bits of the 'ADDITIONAL
 911         * CDB LENGTH' field are reserved and the size in bytes of this field
 912         * is four times the value specified in bits 3..7. Hence the "& ~3".
 913         */
 914        return srp_cmd->add_data + (srp_cmd->add_cdb_len & ~3);
 915}
 916
 917/**
 918 * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
 919 * @ioctx: Pointer to the I/O context associated with the request.
 920 * @srp_cmd: Pointer to the SRP_CMD request data.
 921 * @dir: Pointer to the variable to which the transfer direction will be
 922 *   written.
 923 * @data_len: Pointer to the variable to which the total data length of all
 924 *   descriptors in the SRP_CMD request will be written.
 925 *
 926 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
 927 *
 928 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
 929 * -ENOMEM when memory allocation fails and zero upon success.
 930 */
 931static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
 932                struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
 933                struct scatterlist **sg, unsigned *sg_cnt, u64 *data_len)
 934{
 935        BUG_ON(!dir);
 936        BUG_ON(!data_len);
 937
 938        /*
 939         * The lower four bits of the buffer format field contain the DATA-IN
 940         * buffer descriptor format, and the highest four bits contain the
 941         * DATA-OUT buffer descriptor format.
 942         */
 943        if (srp_cmd->buf_fmt & 0xf)
 944                /* DATA-IN: transfer data from target to initiator (read). */
 945                *dir = DMA_FROM_DEVICE;
 946        else if (srp_cmd->buf_fmt >> 4)
 947                /* DATA-OUT: transfer data from initiator to target (write). */
 948                *dir = DMA_TO_DEVICE;
 949        else
 950                *dir = DMA_NONE;
 951
 952        /* initialize data_direction early as srpt_alloc_rw_ctxs needs it */
 953        ioctx->cmd.data_direction = *dir;
 954
 955        if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
 956            ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
 957                struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
 958
 959                *data_len = be32_to_cpu(db->len);
 960                return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
 961        } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
 962                   ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
 963                struct srp_indirect_buf *idb = srpt_get_desc_buf(srp_cmd);
 964                int nbufs = be32_to_cpu(idb->table_desc.len) /
 965                                sizeof(struct srp_direct_buf);
 966
 967                if (nbufs >
 968                    (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
 969                        pr_err("received unsupported SRP_CMD request"
 970                               " type (%u out + %u in != %u / %zu)\n",
 971                               srp_cmd->data_out_desc_cnt,
 972                               srp_cmd->data_in_desc_cnt,
 973                               be32_to_cpu(idb->table_desc.len),
 974                               sizeof(struct srp_direct_buf));
 975                        return -EINVAL;
 976                }
 977
 978                *data_len = be32_to_cpu(idb->len);
 979                return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
 980                                sg, sg_cnt);
 981        } else {
 982                *data_len = 0;
 983                return 0;
 984        }
 985}
 986
 987/**
 988 * srpt_init_ch_qp() - Initialize queue pair attributes.
 989 *
 990 * Initialized the attributes of queue pair 'qp' by allowing local write,
 991 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
 992 */
 993static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
 994{
 995        struct ib_qp_attr *attr;
 996        int ret;
 997
 998        attr = kzalloc(sizeof(*attr), GFP_KERNEL);
 999        if (!attr)
1000                return -ENOMEM;
1001
1002        attr->qp_state = IB_QPS_INIT;
1003        attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
1004            IB_ACCESS_REMOTE_WRITE;
1005        attr->port_num = ch->sport->port;
1006        attr->pkey_index = 0;
1007
1008        ret = ib_modify_qp(qp, attr,
1009                           IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
1010                           IB_QP_PKEY_INDEX);
1011
1012        kfree(attr);
1013        return ret;
1014}
1015
1016/**
1017 * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
1018 * @ch: channel of the queue pair.
1019 * @qp: queue pair to change the state of.
1020 *
1021 * Returns zero upon success and a negative value upon failure.
1022 *
1023 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1024 * If this structure ever becomes larger, it might be necessary to allocate
1025 * it dynamically instead of on the stack.
1026 */
1027static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1028{
1029        struct ib_qp_attr qp_attr;
1030        int attr_mask;
1031        int ret;
1032
1033        qp_attr.qp_state = IB_QPS_RTR;
1034        ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
1035        if (ret)
1036                goto out;
1037
1038        qp_attr.max_dest_rd_atomic = 4;
1039
1040        ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1041
1042out:
1043        return ret;
1044}
1045
1046/**
1047 * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
1048 * @ch: channel of the queue pair.
1049 * @qp: queue pair to change the state of.
1050 *
1051 * Returns zero upon success and a negative value upon failure.
1052 *
1053 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1054 * If this structure ever becomes larger, it might be necessary to allocate
1055 * it dynamically instead of on the stack.
1056 */
1057static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1058{
1059        struct ib_qp_attr qp_attr;
1060        int attr_mask;
1061        int ret;
1062
1063        qp_attr.qp_state = IB_QPS_RTS;
1064        ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
1065        if (ret)
1066                goto out;
1067
1068        qp_attr.max_rd_atomic = 4;
1069
1070        ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1071
1072out:
1073        return ret;
1074}
1075
1076/**
1077 * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
1078 */
1079static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1080{
1081        struct ib_qp_attr qp_attr;
1082
1083        qp_attr.qp_state = IB_QPS_ERR;
1084        return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1085}
1086
1087/**
1088 * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
1089 */
1090static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1091{
1092        struct srpt_send_ioctx *ioctx;
1093        unsigned long flags;
1094
1095        BUG_ON(!ch);
1096
1097        ioctx = NULL;
1098        spin_lock_irqsave(&ch->spinlock, flags);
1099        if (!list_empty(&ch->free_list)) {
1100                ioctx = list_first_entry(&ch->free_list,
1101                                         struct srpt_send_ioctx, free_list);
1102                list_del(&ioctx->free_list);
1103        }
1104        spin_unlock_irqrestore(&ch->spinlock, flags);
1105
1106        if (!ioctx)
1107                return ioctx;
1108
1109        BUG_ON(ioctx->ch != ch);
1110        spin_lock_init(&ioctx->spinlock);
1111        ioctx->state = SRPT_STATE_NEW;
1112        ioctx->n_rdma = 0;
1113        ioctx->n_rw_ctx = 0;
1114        init_completion(&ioctx->tx_done);
1115        ioctx->queue_status_only = false;
1116        /*
1117         * transport_init_se_cmd() does not initialize all fields, so do it
1118         * here.
1119         */
1120        memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1121        memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1122
1123        return ioctx;
1124}
1125
1126/**
1127 * srpt_abort_cmd() - Abort a SCSI command.
1128 * @ioctx:   I/O context associated with the SCSI command.
1129 * @context: Preferred execution context.
1130 */
1131static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1132{
1133        enum srpt_command_state state;
1134        unsigned long flags;
1135
1136        BUG_ON(!ioctx);
1137
1138        /*
1139         * If the command is in a state where the target core is waiting for
1140         * the ib_srpt driver, change the state to the next state.
1141         */
1142
1143        spin_lock_irqsave(&ioctx->spinlock, flags);
1144        state = ioctx->state;
1145        switch (state) {
1146        case SRPT_STATE_NEED_DATA:
1147                ioctx->state = SRPT_STATE_DATA_IN;
1148                break;
1149        case SRPT_STATE_CMD_RSP_SENT:
1150        case SRPT_STATE_MGMT_RSP_SENT:
1151                ioctx->state = SRPT_STATE_DONE;
1152                break;
1153        default:
1154                WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
1155                          __func__, state);
1156                break;
1157        }
1158        spin_unlock_irqrestore(&ioctx->spinlock, flags);
1159
1160        pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
1161                 ioctx->state, ioctx->cmd.tag);
1162
1163        switch (state) {
1164        case SRPT_STATE_NEW:
1165        case SRPT_STATE_DATA_IN:
1166        case SRPT_STATE_MGMT:
1167        case SRPT_STATE_DONE:
1168                /*
1169                 * Do nothing - defer abort processing until
1170                 * srpt_queue_response() is invoked.
1171                 */
1172                break;
1173        case SRPT_STATE_NEED_DATA:
1174                pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
1175                transport_generic_request_failure(&ioctx->cmd,
1176                                        TCM_CHECK_CONDITION_ABORT_CMD);
1177                break;
1178        case SRPT_STATE_CMD_RSP_SENT:
1179                /*
1180                 * SRP_RSP sending failed or the SRP_RSP send completion has
1181                 * not been received in time.
1182                 */
1183                transport_generic_free_cmd(&ioctx->cmd, 0);
1184                break;
1185        case SRPT_STATE_MGMT_RSP_SENT:
1186                transport_generic_free_cmd(&ioctx->cmd, 0);
1187                break;
1188        default:
1189                WARN(1, "Unexpected command state (%d)", state);
1190                break;
1191        }
1192
1193        return state;
1194}
1195
1196/**
1197 * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
1198 * the data that has been transferred via IB RDMA had to be postponed until the
1199 * check_stop_free() callback.  None of this is necessary anymore and needs to
1200 * be cleaned up.
1201 */
1202static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1203{
1204        struct srpt_rdma_ch *ch = cq->cq_context;
1205        struct srpt_send_ioctx *ioctx =
1206                container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
1207
1208        WARN_ON(ioctx->n_rdma <= 0);
1209        atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1210        ioctx->n_rdma = 0;
1211
1212        if (unlikely(wc->status != IB_WC_SUCCESS)) {
1213                pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
1214                        ioctx, wc->status);
1215                srpt_abort_cmd(ioctx);
1216                return;
1217        }
1218
1219        if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1220                                        SRPT_STATE_DATA_IN))
1221                target_execute_cmd(&ioctx->cmd);
1222        else
1223                pr_err("%s[%d]: wrong state = %d\n", __func__,
1224                       __LINE__, srpt_get_cmd_state(ioctx));
1225}
1226
1227/**
1228 * srpt_build_cmd_rsp() - Build an SRP_RSP response.
1229 * @ch: RDMA channel through which the request has been received.
1230 * @ioctx: I/O context associated with the SRP_CMD request. The response will
1231 *   be built in the buffer ioctx->buf points at and hence this function will
1232 *   overwrite the request data.
1233 * @tag: tag of the request for which this response is being generated.
1234 * @status: value for the STATUS field of the SRP_RSP information unit.
1235 *
1236 * Returns the size in bytes of the SRP_RSP response.
1237 *
1238 * An SRP_RSP response contains a SCSI status or service response. See also
1239 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1240 * response. See also SPC-2 for more information about sense data.
1241 */
1242static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1243                              struct srpt_send_ioctx *ioctx, u64 tag,
1244                              int status)
1245{
1246        struct srp_rsp *srp_rsp;
1247        const u8 *sense_data;
1248        int sense_data_len, max_sense_len;
1249
1250        /*
1251         * The lowest bit of all SAM-3 status codes is zero (see also
1252         * paragraph 5.3 in SAM-3).
1253         */
1254        WARN_ON(status & 1);
1255
1256        srp_rsp = ioctx->ioctx.buf;
1257        BUG_ON(!srp_rsp);
1258
1259        sense_data = ioctx->sense_data;
1260        sense_data_len = ioctx->cmd.scsi_sense_length;
1261        WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1262
1263        memset(srp_rsp, 0, sizeof(*srp_rsp));
1264        srp_rsp->opcode = SRP_RSP;
1265        srp_rsp->req_lim_delta =
1266                cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1267        srp_rsp->tag = tag;
1268        srp_rsp->status = status;
1269
1270        if (sense_data_len) {
1271                BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1272                max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1273                if (sense_data_len > max_sense_len) {
1274                        pr_warn("truncated sense data from %d to %d"
1275                                " bytes\n", sense_data_len, max_sense_len);
1276                        sense_data_len = max_sense_len;
1277                }
1278
1279                srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1280                srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1281                memcpy(srp_rsp + 1, sense_data, sense_data_len);
1282        }
1283
1284        return sizeof(*srp_rsp) + sense_data_len;
1285}
1286
1287/**
1288 * srpt_build_tskmgmt_rsp() - Build a task management response.
1289 * @ch:       RDMA channel through which the request has been received.
1290 * @ioctx:    I/O context in which the SRP_RSP response will be built.
1291 * @rsp_code: RSP_CODE that will be stored in the response.
1292 * @tag:      Tag of the request for which this response is being generated.
1293 *
1294 * Returns the size in bytes of the SRP_RSP response.
1295 *
1296 * An SRP_RSP response contains a SCSI status or service response. See also
1297 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1298 * response.
1299 */
1300static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1301                                  struct srpt_send_ioctx *ioctx,
1302                                  u8 rsp_code, u64 tag)
1303{
1304        struct srp_rsp *srp_rsp;
1305        int resp_data_len;
1306        int resp_len;
1307
1308        resp_data_len = 4;
1309        resp_len = sizeof(*srp_rsp) + resp_data_len;
1310
1311        srp_rsp = ioctx->ioctx.buf;
1312        BUG_ON(!srp_rsp);
1313        memset(srp_rsp, 0, sizeof(*srp_rsp));
1314
1315        srp_rsp->opcode = SRP_RSP;
1316        srp_rsp->req_lim_delta =
1317                cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1318        srp_rsp->tag = tag;
1319
1320        srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1321        srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1322        srp_rsp->data[3] = rsp_code;
1323
1324        return resp_len;
1325}
1326
1327static int srpt_check_stop_free(struct se_cmd *cmd)
1328{
1329        struct srpt_send_ioctx *ioctx = container_of(cmd,
1330                                struct srpt_send_ioctx, cmd);
1331
1332        return target_put_sess_cmd(&ioctx->cmd);
1333}
1334
1335/**
1336 * srpt_handle_cmd() - Process SRP_CMD.
1337 */
1338static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
1339                            struct srpt_recv_ioctx *recv_ioctx,
1340                            struct srpt_send_ioctx *send_ioctx)
1341{
1342        struct se_cmd *cmd;
1343        struct srp_cmd *srp_cmd;
1344        struct scatterlist *sg = NULL;
1345        unsigned sg_cnt = 0;
1346        u64 data_len;
1347        enum dma_data_direction dir;
1348        int rc;
1349
1350        BUG_ON(!send_ioctx);
1351
1352        srp_cmd = recv_ioctx->ioctx.buf;
1353        cmd = &send_ioctx->cmd;
1354        cmd->tag = srp_cmd->tag;
1355
1356        switch (srp_cmd->task_attr) {
1357        case SRP_CMD_SIMPLE_Q:
1358                cmd->sam_task_attr = TCM_SIMPLE_TAG;
1359                break;
1360        case SRP_CMD_ORDERED_Q:
1361        default:
1362                cmd->sam_task_attr = TCM_ORDERED_TAG;
1363                break;
1364        case SRP_CMD_HEAD_OF_Q:
1365                cmd->sam_task_attr = TCM_HEAD_TAG;
1366                break;
1367        case SRP_CMD_ACA:
1368                cmd->sam_task_attr = TCM_ACA_TAG;
1369                break;
1370        }
1371
1372        rc = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &sg, &sg_cnt,
1373                        &data_len);
1374        if (rc) {
1375                if (rc != -EAGAIN) {
1376                        pr_err("0x%llx: parsing SRP descriptor table failed.\n",
1377                               srp_cmd->tag);
1378                }
1379                goto release_ioctx;
1380        }
1381
1382        rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
1383                               &send_ioctx->sense_data[0],
1384                               scsilun_to_int(&srp_cmd->lun), data_len,
1385                               TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF,
1386                               sg, sg_cnt, NULL, 0, NULL, 0);
1387        if (rc != 0) {
1388                pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
1389                         srp_cmd->tag);
1390                goto release_ioctx;
1391        }
1392        return;
1393
1394release_ioctx:
1395        send_ioctx->state = SRPT_STATE_DONE;
1396        srpt_release_cmd(cmd);
1397}
1398
1399static int srp_tmr_to_tcm(int fn)
1400{
1401        switch (fn) {
1402        case SRP_TSK_ABORT_TASK:
1403                return TMR_ABORT_TASK;
1404        case SRP_TSK_ABORT_TASK_SET:
1405                return TMR_ABORT_TASK_SET;
1406        case SRP_TSK_CLEAR_TASK_SET:
1407                return TMR_CLEAR_TASK_SET;
1408        case SRP_TSK_LUN_RESET:
1409                return TMR_LUN_RESET;
1410        case SRP_TSK_CLEAR_ACA:
1411                return TMR_CLEAR_ACA;
1412        default:
1413                return -1;
1414        }
1415}
1416
1417/**
1418 * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
1419 *
1420 * Returns 0 if and only if the request will be processed by the target core.
1421 *
1422 * For more information about SRP_TSK_MGMT information units, see also section
1423 * 6.7 in the SRP r16a document.
1424 */
1425static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1426                                 struct srpt_recv_ioctx *recv_ioctx,
1427                                 struct srpt_send_ioctx *send_ioctx)
1428{
1429        struct srp_tsk_mgmt *srp_tsk;
1430        struct se_cmd *cmd;
1431        struct se_session *sess = ch->sess;
1432        int tcm_tmr;
1433        int rc;
1434
1435        BUG_ON(!send_ioctx);
1436
1437        srp_tsk = recv_ioctx->ioctx.buf;
1438        cmd = &send_ioctx->cmd;
1439
1440        pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
1441                 " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
1442                 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
1443
1444        srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1445        send_ioctx->cmd.tag = srp_tsk->tag;
1446        tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
1447        rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
1448                               scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
1449                               GFP_KERNEL, srp_tsk->task_tag,
1450                               TARGET_SCF_ACK_KREF);
1451        if (rc != 0) {
1452                send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1453                goto fail;
1454        }
1455        return;
1456fail:
1457        transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
1458}
1459
1460/**
1461 * srpt_handle_new_iu() - Process a newly received information unit.
1462 * @ch:    RDMA channel through which the information unit has been received.
1463 * @ioctx: SRPT I/O context associated with the information unit.
1464 */
1465static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1466                               struct srpt_recv_ioctx *recv_ioctx,
1467                               struct srpt_send_ioctx *send_ioctx)
1468{
1469        struct srp_cmd *srp_cmd;
1470
1471        BUG_ON(!ch);
1472        BUG_ON(!recv_ioctx);
1473
1474        ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1475                                   recv_ioctx->ioctx.dma, srp_max_req_size,
1476                                   DMA_FROM_DEVICE);
1477
1478        if (unlikely(ch->state == CH_CONNECTING))
1479                goto out_wait;
1480
1481        if (unlikely(ch->state != CH_LIVE))
1482                return;
1483
1484        srp_cmd = recv_ioctx->ioctx.buf;
1485        if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
1486                if (!send_ioctx) {
1487                        if (!list_empty(&ch->cmd_wait_list))
1488                                goto out_wait;
1489                        send_ioctx = srpt_get_send_ioctx(ch);
1490                }
1491                if (unlikely(!send_ioctx))
1492                        goto out_wait;
1493        }
1494
1495        switch (srp_cmd->opcode) {
1496        case SRP_CMD:
1497                srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1498                break;
1499        case SRP_TSK_MGMT:
1500                srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1501                break;
1502        case SRP_I_LOGOUT:
1503                pr_err("Not yet implemented: SRP_I_LOGOUT\n");
1504                break;
1505        case SRP_CRED_RSP:
1506                pr_debug("received SRP_CRED_RSP\n");
1507                break;
1508        case SRP_AER_RSP:
1509                pr_debug("received SRP_AER_RSP\n");
1510                break;
1511        case SRP_RSP:
1512                pr_err("Received SRP_RSP\n");
1513                break;
1514        default:
1515                pr_err("received IU with unknown opcode 0x%x\n",
1516                       srp_cmd->opcode);
1517                break;
1518        }
1519
1520        srpt_post_recv(ch->sport->sdev, recv_ioctx);
1521        return;
1522
1523out_wait:
1524        list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1525}
1526
1527static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1528{
1529        struct srpt_rdma_ch *ch = cq->cq_context;
1530        struct srpt_recv_ioctx *ioctx =
1531                container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
1532
1533        if (wc->status == IB_WC_SUCCESS) {
1534                int req_lim;
1535
1536                req_lim = atomic_dec_return(&ch->req_lim);
1537                if (unlikely(req_lim < 0))
1538                        pr_err("req_lim = %d < 0\n", req_lim);
1539                srpt_handle_new_iu(ch, ioctx, NULL);
1540        } else {
1541                pr_info("receiving failed for ioctx %p with status %d\n",
1542                        ioctx, wc->status);
1543        }
1544}
1545
1546/*
1547 * This function must be called from the context in which RDMA completions are
1548 * processed because it accesses the wait list without protection against
1549 * access from other threads.
1550 */
1551static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
1552{
1553        struct srpt_send_ioctx *ioctx;
1554
1555        while (!list_empty(&ch->cmd_wait_list) &&
1556               ch->state >= CH_LIVE &&
1557               (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
1558                struct srpt_recv_ioctx *recv_ioctx;
1559
1560                recv_ioctx = list_first_entry(&ch->cmd_wait_list,
1561                                              struct srpt_recv_ioctx,
1562                                              wait_list);
1563                list_del(&recv_ioctx->wait_list);
1564                srpt_handle_new_iu(ch, recv_ioctx, ioctx);
1565        }
1566}
1567
1568/**
1569 * Note: Although this has not yet been observed during tests, at least in
1570 * theory it is possible that the srpt_get_send_ioctx() call invoked by
1571 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
1572 * value in each response is set to one, and it is possible that this response
1573 * makes the initiator send a new request before the send completion for that
1574 * response has been processed. This could e.g. happen if the call to
1575 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
1576 * if IB retransmission causes generation of the send completion to be
1577 * delayed. Incoming information units for which srpt_get_send_ioctx() fails
1578 * are queued on cmd_wait_list. The code below processes these delayed
1579 * requests one at a time.
1580 */
1581static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
1582{
1583        struct srpt_rdma_ch *ch = cq->cq_context;
1584        struct srpt_send_ioctx *ioctx =
1585                container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
1586        enum srpt_command_state state;
1587
1588        state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1589
1590        WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
1591                state != SRPT_STATE_MGMT_RSP_SENT);
1592
1593        atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
1594
1595        if (wc->status != IB_WC_SUCCESS)
1596                pr_info("sending response for ioctx 0x%p failed"
1597                        " with status %d\n", ioctx, wc->status);
1598
1599        if (state != SRPT_STATE_DONE) {
1600                transport_generic_free_cmd(&ioctx->cmd, 0);
1601        } else {
1602                pr_err("IB completion has been received too late for"
1603                       " wr_id = %u.\n", ioctx->ioctx.index);
1604        }
1605
1606        srpt_process_wait_list(ch);
1607}
1608
1609/**
1610 * srpt_create_ch_ib() - Create receive and send completion queues.
1611 */
1612static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1613{
1614        struct ib_qp_init_attr *qp_init;
1615        struct srpt_port *sport = ch->sport;
1616        struct srpt_device *sdev = sport->sdev;
1617        const struct ib_device_attr *attrs = &sdev->device->attrs;
1618        u32 srp_sq_size = sport->port_attrib.srp_sq_size;
1619        int ret;
1620
1621        WARN_ON(ch->rq_size < 1);
1622
1623        ret = -ENOMEM;
1624        qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
1625        if (!qp_init)
1626                goto out;
1627
1628retry:
1629        ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + srp_sq_size,
1630                        0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
1631        if (IS_ERR(ch->cq)) {
1632                ret = PTR_ERR(ch->cq);
1633                pr_err("failed to create CQ cqe= %d ret= %d\n",
1634                       ch->rq_size + srp_sq_size, ret);
1635                goto out;
1636        }
1637
1638        qp_init->qp_context = (void *)ch;
1639        qp_init->event_handler
1640                = (void(*)(struct ib_event *, void*))srpt_qp_event;
1641        qp_init->send_cq = ch->cq;
1642        qp_init->recv_cq = ch->cq;
1643        qp_init->srq = sdev->srq;
1644        qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
1645        qp_init->qp_type = IB_QPT_RC;
1646        /*
1647         * We divide up our send queue size into half SEND WRs to send the
1648         * completions, and half R/W contexts to actually do the RDMA
1649         * READ/WRITE transfers.  Note that we need to allocate CQ slots for
1650         * both both, as RDMA contexts will also post completions for the
1651         * RDMA READ case.
1652         */
1653        qp_init->cap.max_send_wr = srp_sq_size / 2;
1654        qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
1655        qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
1656        qp_init->port_num = ch->sport->port;
1657
1658        ch->qp = ib_create_qp(sdev->pd, qp_init);
1659        if (IS_ERR(ch->qp)) {
1660                ret = PTR_ERR(ch->qp);
1661                if (ret == -ENOMEM) {
1662                        srp_sq_size /= 2;
1663                        if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
1664                                ib_destroy_cq(ch->cq);
1665                                goto retry;
1666                        }
1667                }
1668                pr_err("failed to create_qp ret= %d\n", ret);
1669                goto err_destroy_cq;
1670        }
1671
1672        atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
1673
1674        pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1675                 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1676                 qp_init->cap.max_send_wr, ch->cm_id);
1677
1678        ret = srpt_init_ch_qp(ch, ch->qp);
1679        if (ret)
1680                goto err_destroy_qp;
1681
1682out:
1683        kfree(qp_init);
1684        return ret;
1685
1686err_destroy_qp:
1687        ib_destroy_qp(ch->qp);
1688err_destroy_cq:
1689        ib_free_cq(ch->cq);
1690        goto out;
1691}
1692
1693static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
1694{
1695        ib_destroy_qp(ch->qp);
1696        ib_free_cq(ch->cq);
1697}
1698
1699/**
1700 * srpt_close_ch() - Close an RDMA channel.
1701 *
1702 * Make sure all resources associated with the channel will be deallocated at
1703 * an appropriate time.
1704 *
1705 * Returns true if and only if the channel state has been modified into
1706 * CH_DRAINING.
1707 */
1708static bool srpt_close_ch(struct srpt_rdma_ch *ch)
1709{
1710        int ret;
1711
1712        if (!srpt_set_ch_state(ch, CH_DRAINING)) {
1713                pr_debug("%s-%d: already closed\n", ch->sess_name,
1714                         ch->qp->qp_num);
1715                return false;
1716        }
1717
1718        kref_get(&ch->kref);
1719
1720        ret = srpt_ch_qp_err(ch);
1721        if (ret < 0)
1722                pr_err("%s-%d: changing queue pair into error state failed: %d\n",
1723                       ch->sess_name, ch->qp->qp_num, ret);
1724
1725        pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
1726                 ch->qp->qp_num);
1727        ret = srpt_zerolength_write(ch);
1728        if (ret < 0) {
1729                pr_err("%s-%d: queuing zero-length write failed: %d\n",
1730                       ch->sess_name, ch->qp->qp_num, ret);
1731                if (srpt_set_ch_state(ch, CH_DISCONNECTED))
1732                        schedule_work(&ch->release_work);
1733                else
1734                        WARN_ON_ONCE(true);
1735        }
1736
1737        kref_put(&ch->kref, srpt_free_ch);
1738
1739        return true;
1740}
1741
1742/*
1743 * Change the channel state into CH_DISCONNECTING. If a channel has not yet
1744 * reached the connected state, close it. If a channel is in the connected
1745 * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
1746 * the responsibility of the caller to ensure that this function is not
1747 * invoked concurrently with the code that accepts a connection. This means
1748 * that this function must either be invoked from inside a CM callback
1749 * function or that it must be invoked with the srpt_port.mutex held.
1750 */
1751static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
1752{
1753        int ret;
1754
1755        if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
1756                return -ENOTCONN;
1757
1758        ret = ib_send_cm_dreq(ch->cm_id, NULL, 0);
1759        if (ret < 0)
1760                ret = ib_send_cm_drep(ch->cm_id, NULL, 0);
1761
1762        if (ret < 0 && srpt_close_ch(ch))
1763                ret = 0;
1764
1765        return ret;
1766}
1767
1768static void __srpt_close_all_ch(struct srpt_device *sdev)
1769{
1770        struct srpt_rdma_ch *ch;
1771
1772        lockdep_assert_held(&sdev->mutex);
1773
1774        list_for_each_entry(ch, &sdev->rch_list, list) {
1775                if (srpt_disconnect_ch(ch) >= 0)
1776                        pr_info("Closing channel %s-%d because target %s has been disabled\n",
1777                                ch->sess_name, ch->qp->qp_num,
1778                                sdev->device->name);
1779                srpt_close_ch(ch);
1780        }
1781}
1782
1783static void srpt_free_ch(struct kref *kref)
1784{
1785        struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
1786
1787        kfree(ch);
1788}
1789
1790static void srpt_release_channel_work(struct work_struct *w)
1791{
1792        struct srpt_rdma_ch *ch;
1793        struct srpt_device *sdev;
1794        struct se_session *se_sess;
1795
1796        ch = container_of(w, struct srpt_rdma_ch, release_work);
1797        pr_debug("%s: %s-%d; release_done = %p\n", __func__, ch->sess_name,
1798                 ch->qp->qp_num, ch->release_done);
1799
1800        sdev = ch->sport->sdev;
1801        BUG_ON(!sdev);
1802
1803        se_sess = ch->sess;
1804        BUG_ON(!se_sess);
1805
1806        target_sess_cmd_list_set_waiting(se_sess);
1807        target_wait_for_sess_cmds(se_sess);
1808
1809        transport_deregister_session_configfs(se_sess);
1810        transport_deregister_session(se_sess);
1811        ch->sess = NULL;
1812
1813        ib_destroy_cm_id(ch->cm_id);
1814
1815        srpt_destroy_ch_ib(ch);
1816
1817        srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
1818                             ch->sport->sdev, ch->rq_size,
1819                             ch->rsp_size, DMA_TO_DEVICE);
1820
1821        mutex_lock(&sdev->mutex);
1822        list_del_init(&ch->list);
1823        if (ch->release_done)
1824                complete(ch->release_done);
1825        mutex_unlock(&sdev->mutex);
1826
1827        wake_up(&sdev->ch_releaseQ);
1828
1829        kref_put(&ch->kref, srpt_free_ch);
1830}
1831
1832/**
1833 * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
1834 *
1835 * Ownership of the cm_id is transferred to the target session if this
1836 * functions returns zero. Otherwise the caller remains the owner of cm_id.
1837 */
1838static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
1839                            struct ib_cm_req_event_param *param,
1840                            void *private_data)
1841{
1842        struct srpt_device *sdev = cm_id->context;
1843        struct srpt_port *sport = &sdev->port[param->port - 1];
1844        struct srp_login_req *req;
1845        struct srp_login_rsp *rsp;
1846        struct srp_login_rej *rej;
1847        struct ib_cm_rep_param *rep_param;
1848        struct srpt_rdma_ch *ch, *tmp_ch;
1849        __be16 *guid;
1850        u32 it_iu_len;
1851        int i, ret = 0;
1852
1853        WARN_ON_ONCE(irqs_disabled());
1854
1855        if (WARN_ON(!sdev || !private_data))
1856                return -EINVAL;
1857
1858        req = (struct srp_login_req *)private_data;
1859
1860        it_iu_len = be32_to_cpu(req->req_it_iu_len);
1861
1862        pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
1863                " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
1864                " (guid=0x%llx:0x%llx)\n",
1865                be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
1866                be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
1867                be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
1868                be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
1869                it_iu_len,
1870                param->port,
1871                be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
1872                be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
1873
1874        rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
1875        rej = kzalloc(sizeof(*rej), GFP_KERNEL);
1876        rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
1877
1878        if (!rsp || !rej || !rep_param) {
1879                ret = -ENOMEM;
1880                goto out;
1881        }
1882
1883        if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
1884                rej->reason = cpu_to_be32(
1885                              SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
1886                ret = -EINVAL;
1887                pr_err("rejected SRP_LOGIN_REQ because its"
1888                       " length (%d bytes) is out of range (%d .. %d)\n",
1889                       it_iu_len, 64, srp_max_req_size);
1890                goto reject;
1891        }
1892
1893        if (!sport->enabled) {
1894                rej->reason = cpu_to_be32(
1895                              SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1896                ret = -EINVAL;
1897                pr_err("rejected SRP_LOGIN_REQ because the target port"
1898                       " has not yet been enabled\n");
1899                goto reject;
1900        }
1901
1902        if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
1903                rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
1904
1905                mutex_lock(&sdev->mutex);
1906
1907                list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
1908                        if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
1909                            && !memcmp(ch->t_port_id, req->target_port_id, 16)
1910                            && param->port == ch->sport->port
1911                            && param->listen_id == ch->sport->sdev->cm_id
1912                            && ch->cm_id) {
1913                                if (srpt_disconnect_ch(ch) < 0)
1914                                        continue;
1915                                pr_info("Relogin - closed existing channel %s\n",
1916                                        ch->sess_name);
1917                                rsp->rsp_flags =
1918                                        SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
1919                        }
1920                }
1921
1922                mutex_unlock(&sdev->mutex);
1923
1924        } else
1925                rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
1926
1927        if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
1928            || *(__be64 *)(req->target_port_id + 8) !=
1929               cpu_to_be64(srpt_service_guid)) {
1930                rej->reason = cpu_to_be32(
1931                              SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
1932                ret = -ENOMEM;
1933                pr_err("rejected SRP_LOGIN_REQ because it"
1934                       " has an invalid target port identifier.\n");
1935                goto reject;
1936        }
1937
1938        ch = kzalloc(sizeof(*ch), GFP_KERNEL);
1939        if (!ch) {
1940                rej->reason = cpu_to_be32(
1941                              SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1942                pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
1943                ret = -ENOMEM;
1944                goto reject;
1945        }
1946
1947        kref_init(&ch->kref);
1948        ch->zw_cqe.done = srpt_zerolength_write_done;
1949        INIT_WORK(&ch->release_work, srpt_release_channel_work);
1950        memcpy(ch->i_port_id, req->initiator_port_id, 16);
1951        memcpy(ch->t_port_id, req->target_port_id, 16);
1952        ch->sport = &sdev->port[param->port - 1];
1953        ch->cm_id = cm_id;
1954        cm_id->context = ch;
1955        /*
1956         * Avoid QUEUE_FULL conditions by limiting the number of buffers used
1957         * for the SRP protocol to the command queue size.
1958         */
1959        ch->rq_size = SRPT_RQ_SIZE;
1960        spin_lock_init(&ch->spinlock);
1961        ch->state = CH_CONNECTING;
1962        INIT_LIST_HEAD(&ch->cmd_wait_list);
1963        ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
1964
1965        ch->ioctx_ring = (struct srpt_send_ioctx **)
1966                srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
1967                                      sizeof(*ch->ioctx_ring[0]),
1968                                      ch->rsp_size, DMA_TO_DEVICE);
1969        if (!ch->ioctx_ring)
1970                goto free_ch;
1971
1972        INIT_LIST_HEAD(&ch->free_list);
1973        for (i = 0; i < ch->rq_size; i++) {
1974                ch->ioctx_ring[i]->ch = ch;
1975                list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
1976        }
1977
1978        ret = srpt_create_ch_ib(ch);
1979        if (ret) {
1980                rej->reason = cpu_to_be32(
1981                              SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1982                pr_err("rejected SRP_LOGIN_REQ because creating"
1983                       " a new RDMA channel failed.\n");
1984                goto free_ring;
1985        }
1986
1987        ret = srpt_ch_qp_rtr(ch, ch->qp);
1988        if (ret) {
1989                rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1990                pr_err("rejected SRP_LOGIN_REQ because enabling"
1991                       " RTR failed (error code = %d)\n", ret);
1992                goto destroy_ib;
1993        }
1994
1995        guid = (__be16 *)&param->primary_path->sgid.global.interface_id;
1996        snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x",
1997                 be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
1998                 be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
1999        snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
2000                        be64_to_cpu(*(__be64 *)ch->i_port_id),
2001                        be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
2002
2003        pr_debug("registering session %s\n", ch->sess_name);
2004
2005        if (sport->port_guid_tpg.se_tpg_wwn)
2006                ch->sess = target_alloc_session(&sport->port_guid_tpg, 0, 0,
2007                                                TARGET_PROT_NORMAL,
2008                                                ch->ini_guid, ch, NULL);
2009        if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
2010                ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
2011                                        TARGET_PROT_NORMAL, ch->sess_name, ch,
2012                                        NULL);
2013        /* Retry without leading "0x" */
2014        if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
2015                ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
2016                                                TARGET_PROT_NORMAL,
2017                                                ch->sess_name + 2, ch, NULL);
2018        if (IS_ERR_OR_NULL(ch->sess)) {
2019                pr_info("Rejected login because no ACL has been configured yet for initiator %s.\n",
2020                        ch->sess_name);
2021                rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
2022                                SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
2023                                SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2024                goto destroy_ib;
2025        }
2026
2027        pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
2028                 ch->sess_name, ch->cm_id);
2029
2030        /* create srp_login_response */
2031        rsp->opcode = SRP_LOGIN_RSP;
2032        rsp->tag = req->tag;
2033        rsp->max_it_iu_len = req->req_it_iu_len;
2034        rsp->max_ti_iu_len = req->req_it_iu_len;
2035        ch->max_ti_iu_len = it_iu_len;
2036        rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2037                                   | SRP_BUF_FORMAT_INDIRECT);
2038        rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2039        atomic_set(&ch->req_lim, ch->rq_size);
2040        atomic_set(&ch->req_lim_delta, 0);
2041
2042        /* create cm reply */
2043        rep_param->qp_num = ch->qp->qp_num;
2044        rep_param->private_data = (void *)rsp;
2045        rep_param->private_data_len = sizeof(*rsp);
2046        rep_param->rnr_retry_count = 7;
2047        rep_param->flow_control = 1;
2048        rep_param->failover_accepted = 0;
2049        rep_param->srq = 1;
2050        rep_param->responder_resources = 4;
2051        rep_param->initiator_depth = 4;
2052
2053        ret = ib_send_cm_rep(cm_id, rep_param);
2054        if (ret) {
2055                pr_err("sending SRP_LOGIN_REQ response failed"
2056                       " (error code = %d)\n", ret);
2057                goto release_channel;
2058        }
2059
2060        mutex_lock(&sdev->mutex);
2061        list_add_tail(&ch->list, &sdev->rch_list);
2062        mutex_unlock(&sdev->mutex);
2063
2064        goto out;
2065
2066release_channel:
2067        srpt_disconnect_ch(ch);
2068        transport_deregister_session_configfs(ch->sess);
2069        transport_deregister_session(ch->sess);
2070        ch->sess = NULL;
2071
2072destroy_ib:
2073        srpt_destroy_ch_ib(ch);
2074
2075free_ring:
2076        srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2077                             ch->sport->sdev, ch->rq_size,
2078                             ch->rsp_size, DMA_TO_DEVICE);
2079free_ch:
2080        kfree(ch);
2081
2082reject:
2083        rej->opcode = SRP_LOGIN_REJ;
2084        rej->tag = req->tag;
2085        rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2086                                   | SRP_BUF_FORMAT_INDIRECT);
2087
2088        ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2089                             (void *)rej, sizeof(*rej));
2090
2091out:
2092        kfree(rep_param);
2093        kfree(rsp);
2094        kfree(rej);
2095
2096        return ret;
2097}
2098
2099static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
2100                             enum ib_cm_rej_reason reason,
2101                             const u8 *private_data,
2102                             u8 private_data_len)
2103{
2104        char *priv = NULL;
2105        int i;
2106
2107        if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
2108                                                GFP_KERNEL))) {
2109                for (i = 0; i < private_data_len; i++)
2110                        sprintf(priv + 3 * i, " %02x", private_data[i]);
2111        }
2112        pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
2113                ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
2114                "; private data" : "", priv ? priv : " (?)");
2115        kfree(priv);
2116}
2117
2118/**
2119 * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
2120 *
2121 * An IB_CM_RTU_RECEIVED message indicates that the connection is established
2122 * and that the recipient may begin transmitting (RTU = ready to use).
2123 */
2124static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
2125{
2126        int ret;
2127
2128        if (srpt_set_ch_state(ch, CH_LIVE)) {
2129                ret = srpt_ch_qp_rts(ch, ch->qp);
2130
2131                if (ret == 0) {
2132                        /* Trigger wait list processing. */
2133                        ret = srpt_zerolength_write(ch);
2134                        WARN_ONCE(ret < 0, "%d\n", ret);
2135                } else {
2136                        srpt_close_ch(ch);
2137                }
2138        }
2139}
2140
2141/**
2142 * srpt_cm_handler() - IB connection manager callback function.
2143 *
2144 * A non-zero return value will cause the caller destroy the CM ID.
2145 *
2146 * Note: srpt_cm_handler() must only return a non-zero value when transferring
2147 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
2148 * a non-zero value in any other case will trigger a race with the
2149 * ib_destroy_cm_id() call in srpt_release_channel().
2150 */
2151static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2152{
2153        struct srpt_rdma_ch *ch = cm_id->context;
2154        int ret;
2155
2156        ret = 0;
2157        switch (event->event) {
2158        case IB_CM_REQ_RECEIVED:
2159                ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
2160                                       event->private_data);
2161                break;
2162        case IB_CM_REJ_RECEIVED:
2163                srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
2164                                 event->private_data,
2165                                 IB_CM_REJ_PRIVATE_DATA_SIZE);
2166                break;
2167        case IB_CM_RTU_RECEIVED:
2168        case IB_CM_USER_ESTABLISHED:
2169                srpt_cm_rtu_recv(ch);
2170                break;
2171        case IB_CM_DREQ_RECEIVED:
2172                srpt_disconnect_ch(ch);
2173                break;
2174        case IB_CM_DREP_RECEIVED:
2175                pr_info("Received CM DREP message for ch %s-%d.\n",
2176                        ch->sess_name, ch->qp->qp_num);
2177                srpt_close_ch(ch);
2178                break;
2179        case IB_CM_TIMEWAIT_EXIT:
2180                pr_info("Received CM TimeWait exit for ch %s-%d.\n",
2181                        ch->sess_name, ch->qp->qp_num);
2182                srpt_close_ch(ch);
2183                break;
2184        case IB_CM_REP_ERROR:
2185                pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
2186                        ch->qp->qp_num);
2187                break;
2188        case IB_CM_DREQ_ERROR:
2189                pr_info("Received CM DREQ ERROR event.\n");
2190                break;
2191        case IB_CM_MRA_RECEIVED:
2192                pr_info("Received CM MRA event\n");
2193                break;
2194        default:
2195                pr_err("received unrecognized CM event %d\n", event->event);
2196                break;
2197        }
2198
2199        return ret;
2200}
2201
2202static int srpt_write_pending_status(struct se_cmd *se_cmd)
2203{
2204        struct srpt_send_ioctx *ioctx;
2205
2206        ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2207        return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
2208}
2209
2210/*
2211 * srpt_write_pending() - Start data transfer from initiator to target (write).
2212 */
2213static int srpt_write_pending(struct se_cmd *se_cmd)
2214{
2215        struct srpt_send_ioctx *ioctx =
2216                container_of(se_cmd, struct srpt_send_ioctx, cmd);
2217        struct srpt_rdma_ch *ch = ioctx->ch;
2218        struct ib_send_wr *first_wr = NULL, *bad_wr;
2219        struct ib_cqe *cqe = &ioctx->rdma_cqe;
2220        enum srpt_command_state new_state;
2221        int ret, i;
2222
2223        new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2224        WARN_ON(new_state == SRPT_STATE_DONE);
2225
2226        if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
2227                pr_warn("%s: IB send queue full (needed %d)\n",
2228                                __func__, ioctx->n_rdma);
2229                ret = -ENOMEM;
2230                goto out_undo;
2231        }
2232
2233        cqe->done = srpt_rdma_read_done;
2234        for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2235                struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2236
2237                first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
2238                                cqe, first_wr);
2239                cqe = NULL;
2240        }
2241
2242        ret = ib_post_send(ch->qp, first_wr, &bad_wr);
2243        if (ret) {
2244                pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
2245                         __func__, ret, ioctx->n_rdma,
2246                         atomic_read(&ch->sq_wr_avail));
2247                goto out_undo;
2248        }
2249
2250        return 0;
2251out_undo:
2252        atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
2253        return ret;
2254}
2255
2256static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
2257{
2258        switch (tcm_mgmt_status) {
2259        case TMR_FUNCTION_COMPLETE:
2260                return SRP_TSK_MGMT_SUCCESS;
2261        case TMR_FUNCTION_REJECTED:
2262                return SRP_TSK_MGMT_FUNC_NOT_SUPP;
2263        }
2264        return SRP_TSK_MGMT_FAILED;
2265}
2266
2267/**
2268 * srpt_queue_response() - Transmits the response to a SCSI command.
2269 *
2270 * Callback function called by the TCM core. Must not block since it can be
2271 * invoked on the context of the IB completion handler.
2272 */
2273static void srpt_queue_response(struct se_cmd *cmd)
2274{
2275        struct srpt_send_ioctx *ioctx =
2276                container_of(cmd, struct srpt_send_ioctx, cmd);
2277        struct srpt_rdma_ch *ch = ioctx->ch;
2278        struct srpt_device *sdev = ch->sport->sdev;
2279        struct ib_send_wr send_wr, *first_wr = &send_wr, *bad_wr;
2280        struct ib_sge sge;
2281        enum srpt_command_state state;
2282        unsigned long flags;
2283        int resp_len, ret, i;
2284        u8 srp_tm_status;
2285
2286        BUG_ON(!ch);
2287
2288        spin_lock_irqsave(&ioctx->spinlock, flags);
2289        state = ioctx->state;
2290        switch (state) {
2291        case SRPT_STATE_NEW:
2292        case SRPT_STATE_DATA_IN:
2293                ioctx->state = SRPT_STATE_CMD_RSP_SENT;
2294                break;
2295        case SRPT_STATE_MGMT:
2296                ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
2297                break;
2298        default:
2299                WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
2300                        ch, ioctx->ioctx.index, ioctx->state);
2301                break;
2302        }
2303        spin_unlock_irqrestore(&ioctx->spinlock, flags);
2304
2305        if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
2306                return;
2307
2308        /* For read commands, transfer the data to the initiator. */
2309        if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
2310            ioctx->cmd.data_length &&
2311            !ioctx->queue_status_only) {
2312                for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2313                        struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2314
2315                        first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
2316                                        ch->sport->port, NULL, first_wr);
2317                }
2318        }
2319
2320        if (state != SRPT_STATE_MGMT)
2321                resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
2322                                              cmd->scsi_status);
2323        else {
2324                srp_tm_status
2325                        = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
2326                resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
2327                                                 ioctx->cmd.tag);
2328        }
2329
2330        atomic_inc(&ch->req_lim);
2331
2332        if (unlikely(atomic_sub_return(1 + ioctx->n_rdma,
2333                        &ch->sq_wr_avail) < 0)) {
2334                pr_warn("%s: IB send queue full (needed %d)\n",
2335                                __func__, ioctx->n_rdma);
2336                ret = -ENOMEM;
2337                goto out;
2338        }
2339
2340        ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
2341                                      DMA_TO_DEVICE);
2342
2343        sge.addr = ioctx->ioctx.dma;
2344        sge.length = resp_len;
2345        sge.lkey = sdev->pd->local_dma_lkey;
2346
2347        ioctx->ioctx.cqe.done = srpt_send_done;
2348        send_wr.next = NULL;
2349        send_wr.wr_cqe = &ioctx->ioctx.cqe;
2350        send_wr.sg_list = &sge;
2351        send_wr.num_sge = 1;
2352        send_wr.opcode = IB_WR_SEND;
2353        send_wr.send_flags = IB_SEND_SIGNALED;
2354
2355        ret = ib_post_send(ch->qp, first_wr, &bad_wr);
2356        if (ret < 0) {
2357                pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
2358                        __func__, ioctx->cmd.tag, ret);
2359                goto out;
2360        }
2361
2362        return;
2363
2364out:
2365        atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
2366        atomic_dec(&ch->req_lim);
2367        srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
2368        target_put_sess_cmd(&ioctx->cmd);
2369}
2370
2371static int srpt_queue_data_in(struct se_cmd *cmd)
2372{
2373        srpt_queue_response(cmd);
2374        return 0;
2375}
2376
2377static void srpt_queue_tm_rsp(struct se_cmd *cmd)
2378{
2379        srpt_queue_response(cmd);
2380}
2381
2382static void srpt_aborted_task(struct se_cmd *cmd)
2383{
2384}
2385
2386static int srpt_queue_status(struct se_cmd *cmd)
2387{
2388        struct srpt_send_ioctx *ioctx;
2389
2390        ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2391        BUG_ON(ioctx->sense_data != cmd->sense_buffer);
2392        if (cmd->se_cmd_flags &
2393            (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
2394                WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
2395        ioctx->queue_status_only = true;
2396        srpt_queue_response(cmd);
2397        return 0;
2398}
2399
2400static void srpt_refresh_port_work(struct work_struct *work)
2401{
2402        struct srpt_port *sport = container_of(work, struct srpt_port, work);
2403
2404        srpt_refresh_port(sport);
2405}
2406
2407/**
2408 * srpt_release_sdev() - Free the channel resources associated with a target.
2409 */
2410static int srpt_release_sdev(struct srpt_device *sdev)
2411{
2412        int i, res;
2413
2414        WARN_ON_ONCE(irqs_disabled());
2415
2416        BUG_ON(!sdev);
2417
2418        mutex_lock(&sdev->mutex);
2419        for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
2420                sdev->port[i].enabled = false;
2421        __srpt_close_all_ch(sdev);
2422        mutex_unlock(&sdev->mutex);
2423
2424        res = wait_event_interruptible(sdev->ch_releaseQ,
2425                                       list_empty_careful(&sdev->rch_list));
2426        if (res)
2427                pr_err("%s: interrupted.\n", __func__);
2428
2429        return 0;
2430}
2431
2432static struct se_wwn *__srpt_lookup_wwn(const char *name)
2433{
2434        struct ib_device *dev;
2435        struct srpt_device *sdev;
2436        struct srpt_port *sport;
2437        int i;
2438
2439        list_for_each_entry(sdev, &srpt_dev_list, list) {
2440                dev = sdev->device;
2441                if (!dev)
2442                        continue;
2443
2444                for (i = 0; i < dev->phys_port_cnt; i++) {
2445                        sport = &sdev->port[i];
2446
2447                        if (strcmp(sport->port_guid, name) == 0)
2448                                return &sport->port_guid_wwn;
2449                        if (strcmp(sport->port_gid, name) == 0)
2450                                return &sport->port_gid_wwn;
2451                }
2452        }
2453
2454        return NULL;
2455}
2456
2457static struct se_wwn *srpt_lookup_wwn(const char *name)
2458{
2459        struct se_wwn *wwn;
2460
2461        spin_lock(&srpt_dev_lock);
2462        wwn = __srpt_lookup_wwn(name);
2463        spin_unlock(&srpt_dev_lock);
2464
2465        return wwn;
2466}
2467
2468/**
2469 * srpt_add_one() - Infiniband device addition callback function.
2470 */
2471static void srpt_add_one(struct ib_device *device)
2472{
2473        struct srpt_device *sdev;
2474        struct srpt_port *sport;
2475        struct ib_srq_init_attr srq_attr;
2476        int i;
2477
2478        pr_debug("device = %p\n", device);
2479
2480        sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
2481        if (!sdev)
2482                goto err;
2483
2484        sdev->device = device;
2485        INIT_LIST_HEAD(&sdev->rch_list);
2486        init_waitqueue_head(&sdev->ch_releaseQ);
2487        mutex_init(&sdev->mutex);
2488
2489        sdev->pd = ib_alloc_pd(device, 0);
2490        if (IS_ERR(sdev->pd))
2491                goto free_dev;
2492
2493        sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
2494
2495        srq_attr.event_handler = srpt_srq_event;
2496        srq_attr.srq_context = (void *)sdev;
2497        srq_attr.attr.max_wr = sdev->srq_size;
2498        srq_attr.attr.max_sge = 1;
2499        srq_attr.attr.srq_limit = 0;
2500        srq_attr.srq_type = IB_SRQT_BASIC;
2501
2502        sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
2503        if (IS_ERR(sdev->srq))
2504                goto err_pd;
2505
2506        pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
2507                 __func__, sdev->srq_size, sdev->device->attrs.max_srq_wr,
2508                 device->name);
2509
2510        if (!srpt_service_guid)
2511                srpt_service_guid = be64_to_cpu(device->node_guid);
2512
2513        sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
2514        if (IS_ERR(sdev->cm_id))
2515                goto err_srq;
2516
2517        /* print out target login information */
2518        pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
2519                 "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
2520                 srpt_service_guid, srpt_service_guid);
2521
2522        /*
2523         * We do not have a consistent service_id (ie. also id_ext of target_id)
2524         * to identify this target. We currently use the guid of the first HCA
2525         * in the system as service_id; therefore, the target_id will change
2526         * if this HCA is gone bad and replaced by different HCA
2527         */
2528        if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0))
2529                goto err_cm;
2530
2531        INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
2532                              srpt_event_handler);
2533        ib_register_event_handler(&sdev->event_handler);
2534
2535        sdev->ioctx_ring = (struct srpt_recv_ioctx **)
2536                srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
2537                                      sizeof(*sdev->ioctx_ring[0]),
2538                                      srp_max_req_size, DMA_FROM_DEVICE);
2539        if (!sdev->ioctx_ring)
2540                goto err_event;
2541
2542        for (i = 0; i < sdev->srq_size; ++i)
2543                srpt_post_recv(sdev, sdev->ioctx_ring[i]);
2544
2545        WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
2546
2547        for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
2548                sport = &sdev->port[i - 1];
2549                sport->sdev = sdev;
2550                sport->port = i;
2551                sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
2552                sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
2553                sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
2554                INIT_WORK(&sport->work, srpt_refresh_port_work);
2555
2556                if (srpt_refresh_port(sport)) {
2557                        pr_err("MAD registration failed for %s-%d.\n",
2558                               sdev->device->name, i);
2559                        goto err_ring;
2560                }
2561        }
2562
2563        spin_lock(&srpt_dev_lock);
2564        list_add_tail(&sdev->list, &srpt_dev_list);
2565        spin_unlock(&srpt_dev_lock);
2566
2567out:
2568        ib_set_client_data(device, &srpt_client, sdev);
2569        pr_debug("added %s.\n", device->name);
2570        return;
2571
2572err_ring:
2573        srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
2574                             sdev->srq_size, srp_max_req_size,
2575                             DMA_FROM_DEVICE);
2576err_event:
2577        ib_unregister_event_handler(&sdev->event_handler);
2578err_cm:
2579        ib_destroy_cm_id(sdev->cm_id);
2580err_srq:
2581        ib_destroy_srq(sdev->srq);
2582err_pd:
2583        ib_dealloc_pd(sdev->pd);
2584free_dev:
2585        kfree(sdev);
2586err:
2587        sdev = NULL;
2588        pr_info("%s(%s) failed.\n", __func__, device->name);
2589        goto out;
2590}
2591
2592/**
2593 * srpt_remove_one() - InfiniBand device removal callback function.
2594 */
2595static void srpt_remove_one(struct ib_device *device, void *client_data)
2596{
2597        struct srpt_device *sdev = client_data;
2598        int i;
2599
2600        if (!sdev) {
2601                pr_info("%s(%s): nothing to do.\n", __func__, device->name);
2602                return;
2603        }
2604
2605        srpt_unregister_mad_agent(sdev);
2606
2607        ib_unregister_event_handler(&sdev->event_handler);
2608
2609        /* Cancel any work queued by the just unregistered IB event handler. */
2610        for (i = 0; i < sdev->device->phys_port_cnt; i++)
2611                cancel_work_sync(&sdev->port[i].work);
2612
2613        ib_destroy_cm_id(sdev->cm_id);
2614
2615        /*
2616         * Unregistering a target must happen after destroying sdev->cm_id
2617         * such that no new SRP_LOGIN_REQ information units can arrive while
2618         * destroying the target.
2619         */
2620        spin_lock(&srpt_dev_lock);
2621        list_del(&sdev->list);
2622        spin_unlock(&srpt_dev_lock);
2623        srpt_release_sdev(sdev);
2624
2625        ib_destroy_srq(sdev->srq);
2626        ib_dealloc_pd(sdev->pd);
2627
2628        srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
2629                             sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
2630        sdev->ioctx_ring = NULL;
2631        kfree(sdev);
2632}
2633
2634static struct ib_client srpt_client = {
2635        .name = DRV_NAME,
2636        .add = srpt_add_one,
2637        .remove = srpt_remove_one
2638};
2639
2640static int srpt_check_true(struct se_portal_group *se_tpg)
2641{
2642        return 1;
2643}
2644
2645static int srpt_check_false(struct se_portal_group *se_tpg)
2646{
2647        return 0;
2648}
2649
2650static char *srpt_get_fabric_name(void)
2651{
2652        return "srpt";
2653}
2654
2655static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
2656{
2657        return tpg->se_tpg_wwn->priv;
2658}
2659
2660static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
2661{
2662        struct srpt_port *sport = srpt_tpg_to_sport(tpg);
2663
2664        WARN_ON_ONCE(tpg != &sport->port_guid_tpg &&
2665                     tpg != &sport->port_gid_tpg);
2666        return tpg == &sport->port_guid_tpg ? sport->port_guid :
2667                sport->port_gid;
2668}
2669
2670static u16 srpt_get_tag(struct se_portal_group *tpg)
2671{
2672        return 1;
2673}
2674
2675static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
2676{
2677        return 1;
2678}
2679
2680static void srpt_release_cmd(struct se_cmd *se_cmd)
2681{
2682        struct srpt_send_ioctx *ioctx = container_of(se_cmd,
2683                                struct srpt_send_ioctx, cmd);
2684        struct srpt_rdma_ch *ch = ioctx->ch;
2685        unsigned long flags;
2686
2687        WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
2688                     !(ioctx->cmd.transport_state & CMD_T_ABORTED));
2689
2690        if (ioctx->n_rw_ctx) {
2691                srpt_free_rw_ctxs(ch, ioctx);
2692                ioctx->n_rw_ctx = 0;
2693        }
2694
2695        spin_lock_irqsave(&ch->spinlock, flags);
2696        list_add(&ioctx->free_list, &ch->free_list);
2697        spin_unlock_irqrestore(&ch->spinlock, flags);
2698}
2699
2700/**
2701 * srpt_close_session() - Forcibly close a session.
2702 *
2703 * Callback function invoked by the TCM core to clean up sessions associated
2704 * with a node ACL when the user invokes
2705 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
2706 */
2707static void srpt_close_session(struct se_session *se_sess)
2708{
2709        DECLARE_COMPLETION_ONSTACK(release_done);
2710        struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
2711        struct srpt_device *sdev = ch->sport->sdev;
2712        bool wait;
2713
2714        pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
2715                 ch->state);
2716
2717        mutex_lock(&sdev->mutex);
2718        BUG_ON(ch->release_done);
2719        ch->release_done = &release_done;
2720        wait = !list_empty(&ch->list);
2721        srpt_disconnect_ch(ch);
2722        mutex_unlock(&sdev->mutex);
2723
2724        if (!wait)
2725                return;
2726
2727        while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
2728                pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
2729                        ch->sess_name, ch->qp->qp_num, ch->state);
2730}
2731
2732/**
2733 * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
2734 *
2735 * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
2736 * This object represents an arbitrary integer used to uniquely identify a
2737 * particular attached remote initiator port to a particular SCSI target port
2738 * within a particular SCSI target device within a particular SCSI instance.
2739 */
2740static u32 srpt_sess_get_index(struct se_session *se_sess)
2741{
2742        return 0;
2743}
2744
2745static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
2746{
2747}
2748
2749/* Note: only used from inside debug printk's by the TCM core. */
2750static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
2751{
2752        struct srpt_send_ioctx *ioctx;
2753
2754        ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2755        return srpt_get_cmd_state(ioctx);
2756}
2757
2758static int srpt_parse_guid(u64 *guid, const char *name)
2759{
2760        u16 w[4];
2761        int ret = -EINVAL;
2762
2763        if (sscanf(name, "%hx:%hx:%hx:%hx", &w[0], &w[1], &w[2], &w[3]) != 4)
2764                goto out;
2765        *guid = get_unaligned_be64(w);
2766        ret = 0;
2767out:
2768        return ret;
2769}
2770
2771/**
2772 * srpt_parse_i_port_id() - Parse an initiator port ID.
2773 * @name: ASCII representation of a 128-bit initiator port ID.
2774 * @i_port_id: Binary 128-bit port ID.
2775 */
2776static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
2777{
2778        const char *p;
2779        unsigned len, count, leading_zero_bytes;
2780        int ret, rc;
2781
2782        p = name;
2783        if (strncasecmp(p, "0x", 2) == 0)
2784                p += 2;
2785        ret = -EINVAL;
2786        len = strlen(p);
2787        if (len % 2)
2788                goto out;
2789        count = min(len / 2, 16U);
2790        leading_zero_bytes = 16 - count;
2791        memset(i_port_id, 0, leading_zero_bytes);
2792        rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
2793        if (rc < 0)
2794                pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
2795        ret = 0;
2796out:
2797        return ret;
2798}
2799
2800/*
2801 * configfs callback function invoked for
2802 * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
2803 */
2804static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
2805{
2806        u64 guid;
2807        u8 i_port_id[16];
2808        int ret;
2809
2810        ret = srpt_parse_guid(&guid, name);
2811        if (ret < 0)
2812                ret = srpt_parse_i_port_id(i_port_id, name);
2813        if (ret < 0)
2814                pr_err("invalid initiator port ID %s\n", name);
2815        return ret;
2816}
2817
2818static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
2819                char *page)
2820{
2821        struct se_portal_group *se_tpg = attrib_to_tpg(item);
2822        struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
2823
2824        return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
2825}
2826
2827static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
2828                const char *page, size_t count)
2829{
2830        struct se_portal_group *se_tpg = attrib_to_tpg(item);
2831        struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
2832        unsigned long val;
2833        int ret;
2834
2835        ret = kstrtoul(page, 0, &val);
2836        if (ret < 0) {
2837                pr_err("kstrtoul() failed with ret: %d\n", ret);
2838                return -EINVAL;
2839        }
2840        if (val > MAX_SRPT_RDMA_SIZE) {
2841                pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
2842                        MAX_SRPT_RDMA_SIZE);
2843                return -EINVAL;
2844        }
2845        if (val < DEFAULT_MAX_RDMA_SIZE) {
2846                pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
2847                        val, DEFAULT_MAX_RDMA_SIZE);
2848                return -EINVAL;
2849        }
2850        sport->port_attrib.srp_max_rdma_size = val;
2851
2852        return count;
2853}
2854
2855static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
2856                char *page)
2857{
2858        struct se_portal_group *se_tpg = attrib_to_tpg(item);
2859        struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
2860
2861        return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
2862}
2863
2864static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item,
2865                const char *page, size_t count)
2866{
2867        struct se_portal_group *se_tpg = attrib_to_tpg(item);
2868        struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
2869        unsigned long val;
2870        int ret;
2871
2872        ret = kstrtoul(page, 0, &val);
2873        if (ret < 0) {
2874                pr_err("kstrtoul() failed with ret: %d\n", ret);
2875                return -EINVAL;
2876        }
2877        if (val > MAX_SRPT_RSP_SIZE) {
2878                pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
2879                        MAX_SRPT_RSP_SIZE);
2880                return -EINVAL;
2881        }
2882        if (val < MIN_MAX_RSP_SIZE) {
2883                pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
2884                        MIN_MAX_RSP_SIZE);
2885                return -EINVAL;
2886        }
2887        sport->port_attrib.srp_max_rsp_size = val;
2888
2889        return count;
2890}
2891
2892static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item,
2893                char *page)
2894{
2895        struct se_portal_group *se_tpg = attrib_to_tpg(item);
2896        struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
2897
2898        return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
2899}
2900
2901static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
2902                const char *page, size_t count)
2903{
2904        struct se_portal_group *se_tpg = attrib_to_tpg(item);
2905        struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
2906        unsigned long val;
2907        int ret;
2908
2909        ret = kstrtoul(page, 0, &val);
2910        if (ret < 0) {
2911                pr_err("kstrtoul() failed with ret: %d\n", ret);
2912                return -EINVAL;
2913        }
2914        if (val > MAX_SRPT_SRQ_SIZE) {
2915                pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
2916                        MAX_SRPT_SRQ_SIZE);
2917                return -EINVAL;
2918        }
2919        if (val < MIN_SRPT_SRQ_SIZE) {
2920                pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
2921                        MIN_SRPT_SRQ_SIZE);
2922                return -EINVAL;
2923        }
2924        sport->port_attrib.srp_sq_size = val;
2925
2926        return count;
2927}
2928
2929CONFIGFS_ATTR(srpt_tpg_attrib_,  srp_max_rdma_size);
2930CONFIGFS_ATTR(srpt_tpg_attrib_,  srp_max_rsp_size);
2931CONFIGFS_ATTR(srpt_tpg_attrib_,  srp_sq_size);
2932
2933static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
2934        &srpt_tpg_attrib_attr_srp_max_rdma_size,
2935        &srpt_tpg_attrib_attr_srp_max_rsp_size,
2936        &srpt_tpg_attrib_attr_srp_sq_size,
2937        NULL,
2938};
2939
2940static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
2941{
2942        struct se_portal_group *se_tpg = to_tpg(item);
2943        struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
2944
2945        return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
2946}
2947
2948static ssize_t srpt_tpg_enable_store(struct config_item *item,
2949                const char *page, size_t count)
2950{
2951        struct se_portal_group *se_tpg = to_tpg(item);
2952        struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
2953        struct srpt_device *sdev = sport->sdev;
2954        struct srpt_rdma_ch *ch;
2955        unsigned long tmp;
2956        int ret;
2957
2958        ret = kstrtoul(page, 0, &tmp);
2959        if (ret < 0) {
2960                pr_err("Unable to extract srpt_tpg_store_enable\n");
2961                return -EINVAL;
2962        }
2963
2964        if ((tmp != 0) && (tmp != 1)) {
2965                pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
2966                return -EINVAL;
2967        }
2968        if (sport->enabled == tmp)
2969                goto out;
2970        sport->enabled = tmp;
2971        if (sport->enabled)
2972                goto out;
2973
2974        mutex_lock(&sdev->mutex);
2975        list_for_each_entry(ch, &sdev->rch_list, list) {
2976                if (ch->sport == sport) {
2977                        pr_debug("%s: ch %p %s-%d\n", __func__, ch,
2978                                 ch->sess_name, ch->qp->qp_num);
2979                        srpt_disconnect_ch(ch);
2980                        srpt_close_ch(ch);
2981                }
2982        }
2983        mutex_unlock(&sdev->mutex);
2984
2985out:
2986        return count;
2987}
2988
2989CONFIGFS_ATTR(srpt_tpg_, enable);
2990
2991static struct configfs_attribute *srpt_tpg_attrs[] = {
2992        &srpt_tpg_attr_enable,
2993        NULL,
2994};
2995
2996/**
2997 * configfs callback invoked for
2998 * mkdir /sys/kernel/config/target/$driver/$port/$tpg
2999 */
3000static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3001                                             struct config_group *group,
3002                                             const char *name)
3003{
3004        struct srpt_port *sport = wwn->priv;
3005        static struct se_portal_group *tpg;
3006        int res;
3007
3008        WARN_ON_ONCE(wwn != &sport->port_guid_wwn &&
3009                     wwn != &sport->port_gid_wwn);
3010        tpg = wwn == &sport->port_guid_wwn ? &sport->port_guid_tpg :
3011                &sport->port_gid_tpg;
3012        res = core_tpg_register(wwn, tpg, SCSI_PROTOCOL_SRP);
3013        if (res)
3014                return ERR_PTR(res);
3015
3016        return tpg;
3017}
3018
3019/**
3020 * configfs callback invoked for
3021 * rmdir /sys/kernel/config/target/$driver/$port/$tpg
3022 */
3023static void srpt_drop_tpg(struct se_portal_group *tpg)
3024{
3025        struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3026
3027        sport->enabled = false;
3028        core_tpg_deregister(tpg);
3029}
3030
3031/**
3032 * configfs callback invoked for
3033 * mkdir /sys/kernel/config/target/$driver/$port
3034 */
3035static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3036                                      struct config_group *group,
3037                                      const char *name)
3038{
3039        return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL);
3040}
3041
3042/**
3043 * configfs callback invoked for
3044 * rmdir /sys/kernel/config/target/$driver/$port
3045 */
3046static void srpt_drop_tport(struct se_wwn *wwn)
3047{
3048}
3049
3050static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
3051{
3052        return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
3053}
3054
3055CONFIGFS_ATTR_RO(srpt_wwn_, version);
3056
3057static struct configfs_attribute *srpt_wwn_attrs[] = {
3058        &srpt_wwn_attr_version,
3059        NULL,
3060};
3061
3062static const struct target_core_fabric_ops srpt_template = {
3063        .module                         = THIS_MODULE,
3064        .name                           = "srpt",
3065        .get_fabric_name                = srpt_get_fabric_name,
3066        .tpg_get_wwn                    = srpt_get_fabric_wwn,
3067        .tpg_get_tag                    = srpt_get_tag,
3068        .tpg_check_demo_mode            = srpt_check_false,
3069        .tpg_check_demo_mode_cache      = srpt_check_true,
3070        .tpg_check_demo_mode_write_protect = srpt_check_true,
3071        .tpg_check_prod_mode_write_protect = srpt_check_false,
3072        .tpg_get_inst_index             = srpt_tpg_get_inst_index,
3073        .release_cmd                    = srpt_release_cmd,
3074        .check_stop_free                = srpt_check_stop_free,
3075        .close_session                  = srpt_close_session,
3076        .sess_get_index                 = srpt_sess_get_index,
3077        .sess_get_initiator_sid         = NULL,
3078        .write_pending                  = srpt_write_pending,
3079        .write_pending_status           = srpt_write_pending_status,
3080        .set_default_node_attributes    = srpt_set_default_node_attrs,
3081        .get_cmd_state                  = srpt_get_tcm_cmd_state,
3082        .queue_data_in                  = srpt_queue_data_in,
3083        .queue_status                   = srpt_queue_status,
3084        .queue_tm_rsp                   = srpt_queue_tm_rsp,
3085        .aborted_task                   = srpt_aborted_task,
3086        /*
3087         * Setup function pointers for generic logic in
3088         * target_core_fabric_configfs.c
3089         */
3090        .fabric_make_wwn                = srpt_make_tport,
3091        .fabric_drop_wwn                = srpt_drop_tport,
3092        .fabric_make_tpg                = srpt_make_tpg,
3093        .fabric_drop_tpg                = srpt_drop_tpg,
3094        .fabric_init_nodeacl            = srpt_init_nodeacl,
3095
3096        .tfc_wwn_attrs                  = srpt_wwn_attrs,
3097        .tfc_tpg_base_attrs             = srpt_tpg_attrs,
3098        .tfc_tpg_attrib_attrs           = srpt_tpg_attrib_attrs,
3099};
3100
3101/**
3102 * srpt_init_module() - Kernel module initialization.
3103 *
3104 * Note: Since ib_register_client() registers callback functions, and since at
3105 * least one of these callback functions (srpt_add_one()) calls target core
3106 * functions, this driver must be registered with the target core before
3107 * ib_register_client() is called.
3108 */
3109static int __init srpt_init_module(void)
3110{
3111        int ret;
3112
3113        ret = -EINVAL;
3114        if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3115                pr_err("invalid value %d for kernel module parameter"
3116                       " srp_max_req_size -- must be at least %d.\n",
3117                       srp_max_req_size, MIN_MAX_REQ_SIZE);
3118                goto out;
3119        }
3120
3121        if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
3122            || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
3123                pr_err("invalid value %d for kernel module parameter"
3124                       " srpt_srq_size -- must be in the range [%d..%d].\n",
3125                       srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
3126                goto out;
3127        }
3128
3129        ret = target_register_template(&srpt_template);
3130        if (ret)
3131                goto out;
3132
3133        ret = ib_register_client(&srpt_client);
3134        if (ret) {
3135                pr_err("couldn't register IB client\n");
3136                goto out_unregister_target;
3137        }
3138
3139        return 0;
3140
3141out_unregister_target:
3142        target_unregister_template(&srpt_template);
3143out:
3144        return ret;
3145}
3146
3147static void __exit srpt_cleanup_module(void)
3148{
3149        ib_unregister_client(&srpt_client);
3150        target_unregister_template(&srpt_template);
3151}
3152
3153module_init(srpt_init_module);
3154module_exit(srpt_cleanup_module);
3155