linux/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * IBM Virtual SCSI Target Driver
   4 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
   5 *                         Santiago Leon (santil@us.ibm.com) IBM Corp.
   6 *                         Linda Xie (lxie@us.ibm.com) IBM Corp.
   7 *
   8 * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
   9 * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
  10 *
  11 * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
  12 * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
  13 *
  14 ****************************************************************************/
  15
  16#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
  17
  18#include <linux/module.h>
  19#include <linux/kernel.h>
  20#include <linux/slab.h>
  21#include <linux/types.h>
  22#include <linux/list.h>
  23#include <linux/string.h>
  24#include <linux/delay.h>
  25#include <linux/of.h>
  26
  27#include <target/target_core_base.h>
  28#include <target/target_core_fabric.h>
  29
  30#include <asm/hvcall.h>
  31#include <asm/vio.h>
  32
  33#include <scsi/viosrp.h>
  34
  35#include "ibmvscsi_tgt.h"
  36
  37#define IBMVSCSIS_VERSION       "v0.2"
  38
  39#define INITIAL_SRP_LIMIT       800
  40#define DEFAULT_MAX_SECTORS     256
  41#define MAX_TXU                 1024 * 1024
  42
  43static uint max_vdma_size = MAX_H_COPY_RDMA;
  44
  45static char system_id[SYS_ID_NAME_LEN] = "";
  46static char partition_name[PARTITION_NAMELEN] = "UNKNOWN";
  47static uint partition_number = -1;
  48
  49/* Adapter list and lock to control it */
  50static DEFINE_SPINLOCK(ibmvscsis_dev_lock);
  51static LIST_HEAD(ibmvscsis_dev_list);
  52
  53static long ibmvscsis_parse_command(struct scsi_info *vscsi,
  54                                    struct viosrp_crq *crq);
  55
  56static void ibmvscsis_adapter_idle(struct scsi_info *vscsi);
  57
  58static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
  59                                      struct srp_rsp *rsp)
  60{
  61        u32 residual_count = se_cmd->residual_count;
  62
  63        if (!residual_count)
  64                return;
  65
  66        if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
  67                if (se_cmd->data_direction == DMA_TO_DEVICE) {
  68                        /* residual data from an underflow write */
  69                        rsp->flags = SRP_RSP_FLAG_DOUNDER;
  70                        rsp->data_out_res_cnt = cpu_to_be32(residual_count);
  71                } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
  72                        /* residual data from an underflow read */
  73                        rsp->flags = SRP_RSP_FLAG_DIUNDER;
  74                        rsp->data_in_res_cnt = cpu_to_be32(residual_count);
  75                }
  76        } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
  77                if (se_cmd->data_direction == DMA_TO_DEVICE) {
  78                        /* residual data from an overflow write */
  79                        rsp->flags = SRP_RSP_FLAG_DOOVER;
  80                        rsp->data_out_res_cnt = cpu_to_be32(residual_count);
  81                } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
  82                        /* residual data from an overflow read */
  83                        rsp->flags = SRP_RSP_FLAG_DIOVER;
  84                        rsp->data_in_res_cnt = cpu_to_be32(residual_count);
  85                }
  86        }
  87}
  88
  89/**
  90 * connection_broken() - Determine if the connection to the client is good
  91 * @vscsi:      Pointer to our adapter structure
  92 *
  93 * This function attempts to send a ping MAD to the client. If the call to
  94 * queue the request returns H_CLOSED then the connection has been broken
  95 * and the function returns TRUE.
  96 *
  97 * EXECUTION ENVIRONMENT:
  98 *      Interrupt or Process environment
  99 */
 100static bool connection_broken(struct scsi_info *vscsi)
 101{
 102        struct viosrp_crq *crq;
 103        u64 buffer[2] = { 0, 0 };
 104        long h_return_code;
 105        bool rc = false;
 106
 107        /* create a PING crq */
 108        crq = (struct viosrp_crq *)&buffer;
 109        crq->valid = VALID_CMD_RESP_EL;
 110        crq->format = MESSAGE_IN_CRQ;
 111        crq->status = PING;
 112
 113        h_return_code = h_send_crq(vscsi->dds.unit_id,
 114                                   cpu_to_be64(buffer[MSG_HI]),
 115                                   cpu_to_be64(buffer[MSG_LOW]));
 116
 117        dev_dbg(&vscsi->dev, "Connection_broken: rc %ld\n", h_return_code);
 118
 119        if (h_return_code == H_CLOSED)
 120                rc = true;
 121
 122        return rc;
 123}
 124
 125/**
 126 * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue
 127 * @vscsi:      Pointer to our adapter structure
 128 *
 129 * This function calls h_free_q then frees the interrupt bit etc.
 130 * It must release the lock before doing so because of the time it can take
 131 * for h_free_crq in PHYP
 132 * NOTE: * the caller must make sure that state and or flags will prevent
 133 *         interrupt handler from scheduling work.
 134 *       * anyone calling this function may need to set the CRQ_CLOSED flag
 135 *         we can't do it here, because we don't have the lock
 136 *
 137 * EXECUTION ENVIRONMENT:
 138 *      Process level
 139 */
 140static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
 141{
 142        long qrc;
 143        long rc = ADAPT_SUCCESS;
 144        int ticks = 0;
 145
 146        do {
 147                qrc = h_free_crq(vscsi->dds.unit_id);
 148                switch (qrc) {
 149                case H_SUCCESS:
 150                        spin_lock_bh(&vscsi->intr_lock);
 151                        vscsi->flags &= ~PREP_FOR_SUSPEND_FLAGS;
 152                        spin_unlock_bh(&vscsi->intr_lock);
 153                        break;
 154
 155                case H_HARDWARE:
 156                case H_PARAMETER:
 157                        dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n",
 158                                qrc);
 159                        rc = ERROR;
 160                        break;
 161
 162                case H_BUSY:
 163                case H_LONG_BUSY_ORDER_1_MSEC:
 164                        /* msleep not good for small values */
 165                        usleep_range(1000, 2000);
 166                        ticks += 1;
 167                        break;
 168                case H_LONG_BUSY_ORDER_10_MSEC:
 169                        usleep_range(10000, 20000);
 170                        ticks += 10;
 171                        break;
 172                case H_LONG_BUSY_ORDER_100_MSEC:
 173                        msleep(100);
 174                        ticks += 100;
 175                        break;
 176                case H_LONG_BUSY_ORDER_1_SEC:
 177                        ssleep(1);
 178                        ticks += 1000;
 179                        break;
 180                case H_LONG_BUSY_ORDER_10_SEC:
 181                        ssleep(10);
 182                        ticks += 10000;
 183                        break;
 184                case H_LONG_BUSY_ORDER_100_SEC:
 185                        ssleep(100);
 186                        ticks += 100000;
 187                        break;
 188                default:
 189                        dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n",
 190                                qrc);
 191                        rc = ERROR;
 192                        break;
 193                }
 194
 195                /*
 196                 * dont wait more then 300 seconds
 197                 * ticks are in milliseconds more or less
 198                 */
 199                if (ticks > 300000 && qrc != H_SUCCESS) {
 200                        rc = ERROR;
 201                        dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n");
 202                }
 203        } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
 204
 205        dev_dbg(&vscsi->dev, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
 206
 207        return rc;
 208}
 209
 210/**
 211 * ibmvscsis_delete_client_info() - Helper function to Delete Client Info
 212 * @vscsi:      Pointer to our adapter structure
 213 * @client_closed:      True if client closed its queue
 214 *
 215 * Deletes information specific to the client when the client goes away
 216 *
 217 * EXECUTION ENVIRONMENT:
 218 *      Interrupt or Process
 219 */
 220static void ibmvscsis_delete_client_info(struct scsi_info *vscsi,
 221                                         bool client_closed)
 222{
 223        vscsi->client_cap = 0;
 224
 225        /*
 226         * Some things we don't want to clear if we're closing the queue,
 227         * because some clients don't resend the host handshake when they
 228         * get a transport event.
 229         */
 230        if (client_closed)
 231                vscsi->client_data.os_type = 0;
 232}
 233
 234/**
 235 * ibmvscsis_free_command_q() - Free Command Queue
 236 * @vscsi:      Pointer to our adapter structure
 237 *
 238 * This function calls unregister_command_q, then clears interrupts and
 239 * any pending interrupt acknowledgments associated with the command q.
 240 * It also clears memory if there is no error.
 241 *
 242 * PHYP did not meet the PAPR architecture so that we must give up the
 243 * lock. This causes a timing hole regarding state change.  To close the
 244 * hole this routine does accounting on any change that occurred during
 245 * the time the lock is not held.
 246 * NOTE: must give up and then acquire the interrupt lock, the caller must
 247 *       make sure that state and or flags will prevent interrupt handler from
 248 *       scheduling work.
 249 *
 250 * EXECUTION ENVIRONMENT:
 251 *      Process level, interrupt lock is held
 252 */
 253static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
 254{
 255        int bytes;
 256        u32 flags_under_lock;
 257        u16 state_under_lock;
 258        long rc = ADAPT_SUCCESS;
 259
 260        if (!(vscsi->flags & CRQ_CLOSED)) {
 261                vio_disable_interrupts(vscsi->dma_dev);
 262
 263                state_under_lock = vscsi->new_state;
 264                flags_under_lock = vscsi->flags;
 265                vscsi->phyp_acr_state = 0;
 266                vscsi->phyp_acr_flags = 0;
 267
 268                spin_unlock_bh(&vscsi->intr_lock);
 269                rc = ibmvscsis_unregister_command_q(vscsi);
 270                spin_lock_bh(&vscsi->intr_lock);
 271
 272                if (state_under_lock != vscsi->new_state)
 273                        vscsi->phyp_acr_state = vscsi->new_state;
 274
 275                vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags);
 276
 277                if (rc == ADAPT_SUCCESS) {
 278                        bytes = vscsi->cmd_q.size * PAGE_SIZE;
 279                        memset(vscsi->cmd_q.base_addr, 0, bytes);
 280                        vscsi->cmd_q.index = 0;
 281                        vscsi->flags |= CRQ_CLOSED;
 282
 283                        ibmvscsis_delete_client_info(vscsi, false);
 284                }
 285
 286                dev_dbg(&vscsi->dev, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
 287                        vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
 288                        vscsi->phyp_acr_state);
 289        }
 290        return rc;
 291}
 292
 293/**
 294 * ibmvscsis_cmd_q_dequeue() - Get valid Command element
 295 * @mask:       Mask to use in case index wraps
 296 * @current_index:      Current index into command queue
 297 * @base_addr:  Pointer to start of command queue
 298 *
 299 * Returns a pointer to a valid command element or NULL, if the command
 300 * queue is empty
 301 *
 302 * EXECUTION ENVIRONMENT:
 303 *      Interrupt environment, interrupt lock held
 304 */
 305static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
 306                                                  uint *current_index,
 307                                                  struct viosrp_crq *base_addr)
 308{
 309        struct viosrp_crq *ptr;
 310
 311        ptr = base_addr + *current_index;
 312
 313        if (ptr->valid) {
 314                *current_index = (*current_index + 1) & mask;
 315                dma_rmb();
 316        } else {
 317                ptr = NULL;
 318        }
 319
 320        return ptr;
 321}
 322
 323/**
 324 * ibmvscsis_send_init_message() - send initialize message to the client
 325 * @vscsi:      Pointer to our adapter structure
 326 * @format:     Which Init Message format to send
 327 *
 328 * EXECUTION ENVIRONMENT:
 329 *      Interrupt environment interrupt lock held
 330 */
 331static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format)
 332{
 333        struct viosrp_crq *crq;
 334        u64 buffer[2] = { 0, 0 };
 335        long rc;
 336
 337        crq = (struct viosrp_crq *)&buffer;
 338        crq->valid = VALID_INIT_MSG;
 339        crq->format = format;
 340        rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
 341                        cpu_to_be64(buffer[MSG_LOW]));
 342
 343        return rc;
 344}
 345
 346/**
 347 * ibmvscsis_check_init_msg() - Check init message valid
 348 * @vscsi:      Pointer to our adapter structure
 349 * @format:     Pointer to return format of Init Message, if any.
 350 *              Set to UNUSED_FORMAT if no Init Message in queue.
 351 *
 352 * Checks if an initialize message was queued by the initiatior
 353 * after the queue was created and before the interrupt was enabled.
 354 *
 355 * EXECUTION ENVIRONMENT:
 356 *      Process level only, interrupt lock held
 357 */
 358static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
 359{
 360        struct viosrp_crq *crq;
 361        long rc = ADAPT_SUCCESS;
 362
 363        crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index,
 364                                      vscsi->cmd_q.base_addr);
 365        if (!crq) {
 366                *format = (uint)UNUSED_FORMAT;
 367        } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) {
 368                *format = (uint)INIT_MSG;
 369                crq->valid = INVALIDATE_CMD_RESP_EL;
 370                dma_rmb();
 371
 372                /*
 373                 * the caller has ensured no initialize message was
 374                 * sent after the queue was
 375                 * created so there should be no other message on the queue.
 376                 */
 377                crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask,
 378                                              &vscsi->cmd_q.index,
 379                                              vscsi->cmd_q.base_addr);
 380                if (crq) {
 381                        *format = (uint)(crq->format);
 382                        rc = ERROR;
 383                        crq->valid = INVALIDATE_CMD_RESP_EL;
 384                        dma_rmb();
 385                }
 386        } else {
 387                *format = (uint)(crq->format);
 388                rc = ERROR;
 389                crq->valid = INVALIDATE_CMD_RESP_EL;
 390                dma_rmb();
 391        }
 392
 393        return rc;
 394}
 395
 396/**
 397 * ibmvscsis_disconnect() - Helper function to disconnect
 398 * @work:       Pointer to work_struct, gives access to our adapter structure
 399 *
 400 * An error has occurred or the driver received a Transport event,
 401 * and the driver is requesting that the command queue be de-registered
 402 * in a safe manner. If there is no outstanding I/O then we can stop the
 403 * queue. If we are restarting the queue it will be reflected in the
 404 * the state of the adapter.
 405 *
 406 * EXECUTION ENVIRONMENT:
 407 *      Process environment
 408 */
 409static void ibmvscsis_disconnect(struct work_struct *work)
 410{
 411        struct scsi_info *vscsi = container_of(work, struct scsi_info,
 412                                               proc_work);
 413        u16 new_state;
 414        bool wait_idle = false;
 415
 416        spin_lock_bh(&vscsi->intr_lock);
 417        new_state = vscsi->new_state;
 418        vscsi->new_state = 0;
 419
 420        vscsi->flags |= DISCONNECT_SCHEDULED;
 421        vscsi->flags &= ~SCHEDULE_DISCONNECT;
 422
 423        dev_dbg(&vscsi->dev, "disconnect: flags 0x%x, state 0x%hx\n",
 424                vscsi->flags, vscsi->state);
 425
 426        /*
 427         * check which state we are in and see if we
 428         * should transitition to the new state
 429         */
 430        switch (vscsi->state) {
 431        /* Should never be called while in this state. */
 432        case NO_QUEUE:
 433        /*
 434         * Can never transition from this state;
 435         * igonore errors and logout.
 436         */
 437        case UNCONFIGURING:
 438                break;
 439
 440        /* can transition from this state to UNCONFIGURING */
 441        case ERR_DISCONNECT:
 442                if (new_state == UNCONFIGURING)
 443                        vscsi->state = new_state;
 444                break;
 445
 446        /*
 447         * Can transition from this state to to unconfiguring
 448         * or err disconnect.
 449         */
 450        case ERR_DISCONNECT_RECONNECT:
 451                switch (new_state) {
 452                case UNCONFIGURING:
 453                case ERR_DISCONNECT:
 454                        vscsi->state = new_state;
 455                        break;
 456
 457                case WAIT_IDLE:
 458                        break;
 459                default:
 460                        break;
 461                }
 462                break;
 463
 464        /* can transition from this state to UNCONFIGURING */
 465        case ERR_DISCONNECTED:
 466                if (new_state == UNCONFIGURING)
 467                        vscsi->state = new_state;
 468                break;
 469
 470        case WAIT_ENABLED:
 471                switch (new_state) {
 472                case UNCONFIGURING:
 473                        vscsi->state = new_state;
 474                        vscsi->flags |= RESPONSE_Q_DOWN;
 475                        vscsi->flags &= ~(SCHEDULE_DISCONNECT |
 476                                          DISCONNECT_SCHEDULED);
 477                        dma_rmb();
 478                        if (vscsi->flags & CFG_SLEEPING) {
 479                                vscsi->flags &= ~CFG_SLEEPING;
 480                                complete(&vscsi->unconfig);
 481                        }
 482                        break;
 483
 484                /* should never happen */
 485                case ERR_DISCONNECT:
 486                case ERR_DISCONNECT_RECONNECT:
 487                case WAIT_IDLE:
 488                        dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
 489                                vscsi->state);
 490                        break;
 491                }
 492                break;
 493
 494        case WAIT_IDLE:
 495                switch (new_state) {
 496                case UNCONFIGURING:
 497                        vscsi->flags |= RESPONSE_Q_DOWN;
 498                        vscsi->state = new_state;
 499                        vscsi->flags &= ~(SCHEDULE_DISCONNECT |
 500                                          DISCONNECT_SCHEDULED);
 501                        ibmvscsis_free_command_q(vscsi);
 502                        break;
 503                case ERR_DISCONNECT:
 504                case ERR_DISCONNECT_RECONNECT:
 505                        vscsi->state = new_state;
 506                        break;
 507                }
 508                break;
 509
 510        /*
 511         * Initiator has not done a successful srp login
 512         * or has done a successful srp logout ( adapter was not
 513         * busy). In the first case there can be responses queued
 514         * waiting for space on the initiators response queue (MAD)
 515         * The second case the adapter is idle. Assume the worse case,
 516         * i.e. the second case.
 517         */
 518        case WAIT_CONNECTION:
 519        case CONNECTED:
 520        case SRP_PROCESSING:
 521                wait_idle = true;
 522                vscsi->state = new_state;
 523                break;
 524
 525        /* can transition from this state to UNCONFIGURING */
 526        case UNDEFINED:
 527                if (new_state == UNCONFIGURING)
 528                        vscsi->state = new_state;
 529                break;
 530        default:
 531                break;
 532        }
 533
 534        if (wait_idle) {
 535                dev_dbg(&vscsi->dev, "disconnect start wait, active %d, sched %d\n",
 536                        (int)list_empty(&vscsi->active_q),
 537                        (int)list_empty(&vscsi->schedule_q));
 538                if (!list_empty(&vscsi->active_q) ||
 539                    !list_empty(&vscsi->schedule_q)) {
 540                        vscsi->flags |= WAIT_FOR_IDLE;
 541                        dev_dbg(&vscsi->dev, "disconnect flags 0x%x\n",
 542                                vscsi->flags);
 543                        /*
 544                         * This routine is can not be called with the interrupt
 545                         * lock held.
 546                         */
 547                        spin_unlock_bh(&vscsi->intr_lock);
 548                        wait_for_completion(&vscsi->wait_idle);
 549                        spin_lock_bh(&vscsi->intr_lock);
 550                }
 551                dev_dbg(&vscsi->dev, "disconnect stop wait\n");
 552
 553                ibmvscsis_adapter_idle(vscsi);
 554        }
 555
 556        spin_unlock_bh(&vscsi->intr_lock);
 557}
 558
 559/**
 560 * ibmvscsis_post_disconnect() - Schedule the disconnect
 561 * @vscsi:      Pointer to our adapter structure
 562 * @new_state:  State to move to after disconnecting
 563 * @flag_bits:  Flags to turn on in adapter structure
 564 *
 565 * If it's already been scheduled, then see if we need to "upgrade"
 566 * the new state (if the one passed in is more "severe" than the
 567 * previous one).
 568 *
 569 * PRECONDITION:
 570 *      interrupt lock is held
 571 */
 572static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
 573                                      uint flag_bits)
 574{
 575        uint state;
 576
 577        /* check the validity of the new state */
 578        switch (new_state) {
 579        case UNCONFIGURING:
 580        case ERR_DISCONNECT:
 581        case ERR_DISCONNECT_RECONNECT:
 582        case WAIT_IDLE:
 583                break;
 584
 585        default:
 586                dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n",
 587                        new_state);
 588                return;
 589        }
 590
 591        vscsi->flags |= flag_bits;
 592
 593        dev_dbg(&vscsi->dev, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
 594                new_state, flag_bits, vscsi->flags, vscsi->state);
 595
 596        if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
 597                vscsi->flags |= SCHEDULE_DISCONNECT;
 598                vscsi->new_state = new_state;
 599
 600                INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect);
 601                (void)queue_work(vscsi->work_q, &vscsi->proc_work);
 602        } else {
 603                if (vscsi->new_state)
 604                        state = vscsi->new_state;
 605                else
 606                        state = vscsi->state;
 607
 608                switch (state) {
 609                case NO_QUEUE:
 610                case UNCONFIGURING:
 611                        break;
 612
 613                case ERR_DISCONNECTED:
 614                case ERR_DISCONNECT:
 615                case UNDEFINED:
 616                        if (new_state == UNCONFIGURING)
 617                                vscsi->new_state = new_state;
 618                        break;
 619
 620                case ERR_DISCONNECT_RECONNECT:
 621                        switch (new_state) {
 622                        case UNCONFIGURING:
 623                        case ERR_DISCONNECT:
 624                                vscsi->new_state = new_state;
 625                                break;
 626                        default:
 627                                break;
 628                        }
 629                        break;
 630
 631                case WAIT_ENABLED:
 632                case WAIT_IDLE:
 633                case WAIT_CONNECTION:
 634                case CONNECTED:
 635                case SRP_PROCESSING:
 636                        vscsi->new_state = new_state;
 637                        break;
 638
 639                default:
 640                        break;
 641                }
 642        }
 643
 644        dev_dbg(&vscsi->dev, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
 645                vscsi->flags, vscsi->new_state);
 646}
 647
 648/**
 649 * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
 650 * @vscsi:      Pointer to our adapter structure
 651 *
 652 * Must be called with interrupt lock held.
 653 */
 654static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
 655{
 656        long rc = ADAPT_SUCCESS;
 657
 658        switch (vscsi->state) {
 659        case NO_QUEUE:
 660        case ERR_DISCONNECT:
 661        case ERR_DISCONNECT_RECONNECT:
 662        case ERR_DISCONNECTED:
 663        case UNCONFIGURING:
 664        case UNDEFINED:
 665                rc = ERROR;
 666                break;
 667
 668        case WAIT_CONNECTION:
 669                vscsi->state = CONNECTED;
 670                break;
 671
 672        case WAIT_IDLE:
 673        case SRP_PROCESSING:
 674        case CONNECTED:
 675        case WAIT_ENABLED:
 676        default:
 677                rc = ERROR;
 678                dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
 679                        vscsi->state);
 680                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
 681                break;
 682        }
 683
 684        return rc;
 685}
 686
 687/**
 688 * ibmvscsis_handle_init_msg() - Respond to an Init Message
 689 * @vscsi:      Pointer to our adapter structure
 690 *
 691 * Must be called with interrupt lock held.
 692 */
 693static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
 694{
 695        long rc = ADAPT_SUCCESS;
 696
 697        switch (vscsi->state) {
 698        case WAIT_CONNECTION:
 699                rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
 700                switch (rc) {
 701                case H_SUCCESS:
 702                        vscsi->state = CONNECTED;
 703                        break;
 704
 705                case H_PARAMETER:
 706                        dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
 707                                rc);
 708                        ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
 709                        break;
 710
 711                case H_DROPPED:
 712                        dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
 713                                rc);
 714                        rc = ERROR;
 715                        ibmvscsis_post_disconnect(vscsi,
 716                                                  ERR_DISCONNECT_RECONNECT, 0);
 717                        break;
 718
 719                case H_CLOSED:
 720                        dev_warn(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
 721                                 rc);
 722                        rc = 0;
 723                        break;
 724                }
 725                break;
 726
 727        case UNDEFINED:
 728                rc = ERROR;
 729                break;
 730
 731        case UNCONFIGURING:
 732                break;
 733
 734        case WAIT_ENABLED:
 735        case CONNECTED:
 736        case SRP_PROCESSING:
 737        case WAIT_IDLE:
 738        case NO_QUEUE:
 739        case ERR_DISCONNECT:
 740        case ERR_DISCONNECT_RECONNECT:
 741        case ERR_DISCONNECTED:
 742        default:
 743                rc = ERROR;
 744                dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
 745                        vscsi->state);
 746                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
 747                break;
 748        }
 749
 750        return rc;
 751}
 752
 753/**
 754 * ibmvscsis_init_msg() - Respond to an init message
 755 * @vscsi:      Pointer to our adapter structure
 756 * @crq:        Pointer to CRQ element containing the Init Message
 757 *
 758 * EXECUTION ENVIRONMENT:
 759 *      Interrupt, interrupt lock held
 760 */
 761static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
 762{
 763        long rc = ADAPT_SUCCESS;
 764
 765        dev_dbg(&vscsi->dev, "init_msg: state 0x%hx\n", vscsi->state);
 766
 767        rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
 768                      (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
 769                      0);
 770        if (rc == H_SUCCESS) {
 771                vscsi->client_data.partition_number =
 772                        be64_to_cpu(*(u64 *)vscsi->map_buf);
 773                dev_dbg(&vscsi->dev, "init_msg, part num %d\n",
 774                        vscsi->client_data.partition_number);
 775        } else {
 776                dev_dbg(&vscsi->dev, "init_msg h_vioctl rc %ld\n", rc);
 777                rc = ADAPT_SUCCESS;
 778        }
 779
 780        if (crq->format == INIT_MSG) {
 781                rc = ibmvscsis_handle_init_msg(vscsi);
 782        } else if (crq->format == INIT_COMPLETE_MSG) {
 783                rc = ibmvscsis_handle_init_compl_msg(vscsi);
 784        } else {
 785                rc = ERROR;
 786                dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
 787                        (uint)crq->format);
 788                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
 789        }
 790
 791        return rc;
 792}
 793
 794/**
 795 * ibmvscsis_establish_new_q() - Establish new CRQ queue
 796 * @vscsi:      Pointer to our adapter structure
 797 *
 798 * Must be called with interrupt lock held.
 799 */
 800static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
 801{
 802        long rc = ADAPT_SUCCESS;
 803        uint format;
 804
 805        rc = h_vioctl(vscsi->dds.unit_id, H_ENABLE_PREPARE_FOR_SUSPEND, 30000,
 806                      0, 0, 0, 0);
 807        if (rc == H_SUCCESS)
 808                vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
 809        else if (rc != H_NOT_FOUND)
 810                dev_err(&vscsi->dev, "Error from Enable Prepare for Suspend: %ld\n",
 811                        rc);
 812
 813        vscsi->flags &= PRESERVE_FLAG_FIELDS;
 814        vscsi->rsp_q_timer.timer_pops = 0;
 815        vscsi->debit = 0;
 816        vscsi->credit = 0;
 817
 818        rc = vio_enable_interrupts(vscsi->dma_dev);
 819        if (rc) {
 820                dev_warn(&vscsi->dev, "establish_new_q: failed to enable interrupts, rc %ld\n",
 821                         rc);
 822                return rc;
 823        }
 824
 825        rc = ibmvscsis_check_init_msg(vscsi, &format);
 826        if (rc) {
 827                dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
 828                        rc);
 829                return rc;
 830        }
 831
 832        if (format == UNUSED_FORMAT) {
 833                rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
 834                switch (rc) {
 835                case H_SUCCESS:
 836                case H_DROPPED:
 837                case H_CLOSED:
 838                        rc = ADAPT_SUCCESS;
 839                        break;
 840
 841                case H_PARAMETER:
 842                case H_HARDWARE:
 843                        break;
 844
 845                default:
 846                        vscsi->state = UNDEFINED;
 847                        rc = H_HARDWARE;
 848                        break;
 849                }
 850        } else if (format == INIT_MSG) {
 851                rc = ibmvscsis_handle_init_msg(vscsi);
 852        }
 853
 854        return rc;
 855}
 856
 857/**
 858 * ibmvscsis_reset_queue() - Reset CRQ Queue
 859 * @vscsi:      Pointer to our adapter structure
 860 *
 861 * This function calls h_free_q and then calls h_reg_q and does all
 862 * of the bookkeeping to get us back to where we can communicate.
 863 *
 864 * Actually, we don't always call h_free_crq.  A problem was discovered
 865 * where one partition would close and reopen his queue, which would
 866 * cause his partner to get a transport event, which would cause him to
 867 * close and reopen his queue, which would cause the original partition
 868 * to get a transport event, etc., etc.  To prevent this, we don't
 869 * actually close our queue if the client initiated the reset, (i.e.
 870 * either we got a transport event or we have detected that the client's
 871 * queue is gone)
 872 *
 873 * EXECUTION ENVIRONMENT:
 874 *      Process environment, called with interrupt lock held
 875 */
 876static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
 877{
 878        int bytes;
 879        long rc = ADAPT_SUCCESS;
 880
 881        dev_dbg(&vscsi->dev, "reset_queue: flags 0x%x\n", vscsi->flags);
 882
 883        /* don't reset, the client did it for us */
 884        if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
 885                vscsi->flags &= PRESERVE_FLAG_FIELDS;
 886                vscsi->rsp_q_timer.timer_pops = 0;
 887                vscsi->debit = 0;
 888                vscsi->credit = 0;
 889                vscsi->state = WAIT_CONNECTION;
 890                vio_enable_interrupts(vscsi->dma_dev);
 891        } else {
 892                rc = ibmvscsis_free_command_q(vscsi);
 893                if (rc == ADAPT_SUCCESS) {
 894                        vscsi->state = WAIT_CONNECTION;
 895
 896                        bytes = vscsi->cmd_q.size * PAGE_SIZE;
 897                        rc = h_reg_crq(vscsi->dds.unit_id,
 898                                       vscsi->cmd_q.crq_token, bytes);
 899                        if (rc == H_CLOSED || rc == H_SUCCESS) {
 900                                rc = ibmvscsis_establish_new_q(vscsi);
 901                        }
 902
 903                        if (rc != ADAPT_SUCCESS) {
 904                                dev_dbg(&vscsi->dev, "reset_queue: reg_crq rc %ld\n",
 905                                        rc);
 906
 907                                vscsi->state = ERR_DISCONNECTED;
 908                                vscsi->flags |= RESPONSE_Q_DOWN;
 909                                ibmvscsis_free_command_q(vscsi);
 910                        }
 911                } else {
 912                        vscsi->state = ERR_DISCONNECTED;
 913                        vscsi->flags |= RESPONSE_Q_DOWN;
 914                }
 915        }
 916}
 917
 918/**
 919 * ibmvscsis_free_cmd_resources() - Free command resources
 920 * @vscsi:      Pointer to our adapter structure
 921 * @cmd:        Command which is not longer in use
 922 *
 923 * Must be called with interrupt lock held.
 924 */
 925static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
 926                                         struct ibmvscsis_cmd *cmd)
 927{
 928        struct iu_entry *iue = cmd->iue;
 929
 930        switch (cmd->type) {
 931        case TASK_MANAGEMENT:
 932        case SCSI_CDB:
 933                /*
 934                 * When the queue goes down this value is cleared, so it
 935                 * cannot be cleared in this general purpose function.
 936                 */
 937                if (vscsi->debit)
 938                        vscsi->debit -= 1;
 939                break;
 940        case ADAPTER_MAD:
 941                vscsi->flags &= ~PROCESSING_MAD;
 942                break;
 943        case UNSET_TYPE:
 944                break;
 945        default:
 946                dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
 947                        cmd->type);
 948                break;
 949        }
 950
 951        cmd->iue = NULL;
 952        list_add_tail(&cmd->list, &vscsi->free_cmd);
 953        srp_iu_put(iue);
 954
 955        if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
 956            list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
 957                vscsi->flags &= ~WAIT_FOR_IDLE;
 958                complete(&vscsi->wait_idle);
 959        }
 960}
 961
 962/**
 963 * ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL
 964 * @vscsi:      Pointer to our adapter structure
 965 * @idle:       Indicates whether we were called from adapter_idle.  This
 966 *              is important to know if we need to do a disconnect, since if
 967 *              we're called from adapter_idle, we're still processing the
 968 *              current disconnect, so we can't just call post_disconnect.
 969 *
 970 * This function is called when the adapter is idle when phyp has sent
 971 * us a Prepare for Suspend Transport Event.
 972 *
 973 * EXECUTION ENVIRONMENT:
 974 *      Process or interrupt environment called with interrupt lock held
 975 */
 976static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
 977{
 978        long rc = 0;
 979        struct viosrp_crq *crq;
 980
 981        /* See if there is a Resume event in the queue */
 982        crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
 983
 984        dev_dbg(&vscsi->dev, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
 985                vscsi->flags, vscsi->state, (int)crq->valid);
 986
 987        if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
 988                rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
 989                              0, 0);
 990                if (rc) {
 991                        dev_err(&vscsi->dev, "Ready for Suspend Vioctl failed: %ld\n",
 992                                rc);
 993                        rc = 0;
 994                }
 995        } else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
 996                    (vscsi->flags & PREP_FOR_SUSPEND_ABORTED)) ||
 997                   ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
 998                                     (crq->format != RESUME_FROM_SUSP)))) {
 999                if (idle) {
1000                        vscsi->state = ERR_DISCONNECT_RECONNECT;
1001                        ibmvscsis_reset_queue(vscsi);
1002                        rc = -1;
1003                } else if (vscsi->state == CONNECTED) {
1004                        ibmvscsis_post_disconnect(vscsi,
1005                                                  ERR_DISCONNECT_RECONNECT, 0);
1006                }
1007
1008                vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1009
1010                if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
1011                                     (crq->format != RESUME_FROM_SUSP)))
1012                        dev_err(&vscsi->dev, "Invalid element in CRQ after Prepare for Suspend");
1013        }
1014
1015        vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
1016
1017        return rc;
1018}
1019
1020/**
1021 * ibmvscsis_trans_event() - Handle a Transport Event
1022 * @vscsi:      Pointer to our adapter structure
1023 * @crq:        Pointer to CRQ entry containing the Transport Event
1024 *
1025 * Do the logic to close the I_T nexus.  This function may not
1026 * behave to specification.
1027 *
1028 * EXECUTION ENVIRONMENT:
1029 *      Interrupt, interrupt lock held
1030 */
1031static long ibmvscsis_trans_event(struct scsi_info *vscsi,
1032                                  struct viosrp_crq *crq)
1033{
1034        long rc = ADAPT_SUCCESS;
1035
1036        dev_dbg(&vscsi->dev, "trans_event: format %d, flags 0x%x, state 0x%hx\n",
1037                (int)crq->format, vscsi->flags, vscsi->state);
1038
1039        switch (crq->format) {
1040        case MIGRATED:
1041        case PARTNER_FAILED:
1042        case PARTNER_DEREGISTER:
1043                ibmvscsis_delete_client_info(vscsi, true);
1044                if (crq->format == MIGRATED)
1045                        vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1046                switch (vscsi->state) {
1047                case NO_QUEUE:
1048                case ERR_DISCONNECTED:
1049                case UNDEFINED:
1050                        break;
1051
1052                case UNCONFIGURING:
1053                        vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
1054                        break;
1055
1056                case WAIT_ENABLED:
1057                        break;
1058
1059                case WAIT_CONNECTION:
1060                        break;
1061
1062                case CONNECTED:
1063                        ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
1064                                                  (RESPONSE_Q_DOWN |
1065                                                   TRANS_EVENT));
1066                        break;
1067
1068                case SRP_PROCESSING:
1069                        if ((vscsi->debit > 0) ||
1070                            !list_empty(&vscsi->schedule_q) ||
1071                            !list_empty(&vscsi->waiting_rsp) ||
1072                            !list_empty(&vscsi->active_q)) {
1073                                dev_dbg(&vscsi->dev, "debit %d, sched %d, wait %d, active %d\n",
1074                                        vscsi->debit,
1075                                        (int)list_empty(&vscsi->schedule_q),
1076                                        (int)list_empty(&vscsi->waiting_rsp),
1077                                        (int)list_empty(&vscsi->active_q));
1078                                dev_warn(&vscsi->dev, "connection lost with outstanding work\n");
1079                        } else {
1080                                dev_dbg(&vscsi->dev, "trans_event: SRP Processing, but no outstanding work\n");
1081                        }
1082
1083                        ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
1084                                                  (RESPONSE_Q_DOWN |
1085                                                   TRANS_EVENT));
1086                        break;
1087
1088                case ERR_DISCONNECT:
1089                case ERR_DISCONNECT_RECONNECT:
1090                case WAIT_IDLE:
1091                        vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
1092                        break;
1093                }
1094                break;
1095
1096        case PREPARE_FOR_SUSPEND:
1097                dev_dbg(&vscsi->dev, "Prep for Suspend, crq status = 0x%x\n",
1098                        (int)crq->status);
1099                switch (vscsi->state) {
1100                case ERR_DISCONNECTED:
1101                case WAIT_CONNECTION:
1102                case CONNECTED:
1103                        ibmvscsis_ready_for_suspend(vscsi, false);
1104                        break;
1105                case SRP_PROCESSING:
1106                        vscsi->resume_state = vscsi->state;
1107                        vscsi->flags |= PREP_FOR_SUSPEND_PENDING;
1108                        if (crq->status == CRQ_ENTRY_OVERWRITTEN)
1109                                vscsi->flags |= PREP_FOR_SUSPEND_OVERWRITE;
1110                        ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
1111                        break;
1112                case NO_QUEUE:
1113                case UNDEFINED:
1114                case UNCONFIGURING:
1115                case WAIT_ENABLED:
1116                case ERR_DISCONNECT:
1117                case ERR_DISCONNECT_RECONNECT:
1118                case WAIT_IDLE:
1119                        dev_err(&vscsi->dev, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
1120                                vscsi->state);
1121                        break;
1122                }
1123                break;
1124
1125        case RESUME_FROM_SUSP:
1126                dev_dbg(&vscsi->dev, "Resume from Suspend, crq status = 0x%x\n",
1127                        (int)crq->status);
1128                if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
1129                        vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
1130                } else {
1131                        if ((crq->status == CRQ_ENTRY_OVERWRITTEN) ||
1132                            (vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE)) {
1133                                ibmvscsis_post_disconnect(vscsi,
1134                                                          ERR_DISCONNECT_RECONNECT,
1135                                                          0);
1136                                vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1137                        }
1138                }
1139                break;
1140
1141        default:
1142                rc = ERROR;
1143                dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
1144                        (uint)crq->format);
1145                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
1146                                          RESPONSE_Q_DOWN);
1147                break;
1148        }
1149
1150        rc = vscsi->flags & SCHEDULE_DISCONNECT;
1151
1152        dev_dbg(&vscsi->dev, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
1153                vscsi->flags, vscsi->state, rc);
1154
1155        return rc;
1156}
1157
1158/**
1159 * ibmvscsis_poll_cmd_q() - Poll Command Queue
1160 * @vscsi:      Pointer to our adapter structure
1161 *
1162 * Called to handle command elements that may have arrived while
1163 * interrupts were disabled.
1164 *
1165 * EXECUTION ENVIRONMENT:
1166 *      intr_lock must be held
1167 */
1168static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
1169{
1170        struct viosrp_crq *crq;
1171        long rc;
1172        bool ack = true;
1173        volatile u8 valid;
1174
1175        dev_dbg(&vscsi->dev, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
1176                vscsi->flags, vscsi->state, vscsi->cmd_q.index);
1177
1178        rc = vscsi->flags & SCHEDULE_DISCONNECT;
1179        crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
1180        valid = crq->valid;
1181        dma_rmb();
1182
1183        while (valid) {
1184poll_work:
1185                vscsi->cmd_q.index =
1186                        (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
1187
1188                if (!rc) {
1189                        rc = ibmvscsis_parse_command(vscsi, crq);
1190                } else {
1191                        if ((uint)crq->valid == VALID_TRANS_EVENT) {
1192                                /*
1193                                 * must service the transport layer events even
1194                                 * in an error state, dont break out until all
1195                                 * the consecutive transport events have been
1196                                 * processed
1197                                 */
1198                                rc = ibmvscsis_trans_event(vscsi, crq);
1199                        } else if (vscsi->flags & TRANS_EVENT) {
1200                                /*
1201                                 * if a tranport event has occurred leave
1202                                 * everything but transport events on the queue
1203                                 */
1204                                dev_dbg(&vscsi->dev, "poll_cmd_q, ignoring\n");
1205
1206                                /*
1207                                 * need to decrement the queue index so we can
1208                                 * look at the elment again
1209                                 */
1210                                if (vscsi->cmd_q.index)
1211                                        vscsi->cmd_q.index -= 1;
1212                                else
1213                                        /*
1214                                         * index is at 0 it just wrapped.
1215                                         * have it index last element in q
1216                                         */
1217                                        vscsi->cmd_q.index = vscsi->cmd_q.mask;
1218                                break;
1219                        }
1220                }
1221
1222                crq->valid = INVALIDATE_CMD_RESP_EL;
1223
1224                crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
1225                valid = crq->valid;
1226                dma_rmb();
1227        }
1228
1229        if (!rc) {
1230                if (ack) {
1231                        vio_enable_interrupts(vscsi->dma_dev);
1232                        ack = false;
1233                        dev_dbg(&vscsi->dev, "poll_cmd_q, reenabling interrupts\n");
1234                }
1235                valid = crq->valid;
1236                dma_rmb();
1237                if (valid)
1238                        goto poll_work;
1239        }
1240
1241        dev_dbg(&vscsi->dev, "Leaving poll_cmd_q: rc %ld\n", rc);
1242}
1243
1244/**
1245 * ibmvscsis_free_cmd_qs() - Free elements in queue
1246 * @vscsi:      Pointer to our adapter structure
1247 *
1248 * Free all of the elements on all queues that are waiting for
1249 * whatever reason.
1250 *
1251 * PRECONDITION:
1252 *      Called with interrupt lock held
1253 */
1254static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
1255{
1256        struct ibmvscsis_cmd *cmd, *nxt;
1257
1258        dev_dbg(&vscsi->dev, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
1259                (int)list_empty(&vscsi->waiting_rsp),
1260                vscsi->rsp_q_timer.started);
1261
1262        list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
1263                list_del(&cmd->list);
1264                ibmvscsis_free_cmd_resources(vscsi, cmd);
1265        }
1266}
1267
1268/**
1269 * ibmvscsis_get_free_cmd() - Get free command from list
1270 * @vscsi:      Pointer to our adapter structure
1271 *
1272 * Must be called with interrupt lock held.
1273 */
1274static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
1275{
1276        struct ibmvscsis_cmd *cmd = NULL;
1277        struct iu_entry *iue;
1278
1279        iue = srp_iu_get(&vscsi->target);
1280        if (iue) {
1281                cmd = list_first_entry_or_null(&vscsi->free_cmd,
1282                                               struct ibmvscsis_cmd, list);
1283                if (cmd) {
1284                        if (cmd->abort_cmd)
1285                                cmd->abort_cmd = NULL;
1286                        cmd->flags &= ~(DELAY_SEND);
1287                        list_del(&cmd->list);
1288                        cmd->iue = iue;
1289                        cmd->type = UNSET_TYPE;
1290                        memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd));
1291                } else {
1292                        srp_iu_put(iue);
1293                }
1294        }
1295
1296        return cmd;
1297}
1298
1299/**
1300 * ibmvscsis_adapter_idle() - Helper function to handle idle adapter
1301 * @vscsi:      Pointer to our adapter structure
1302 *
1303 * This function is called when the adapter is idle when the driver
1304 * is attempting to clear an error condition.
1305 * The adapter is considered busy if any of its cmd queues
1306 * are non-empty. This function can be invoked
1307 * from the off level disconnect function.
1308 *
1309 * EXECUTION ENVIRONMENT:
1310 *      Process environment called with interrupt lock held
1311 */
1312static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1313{
1314        int free_qs = false;
1315        long rc = 0;
1316
1317        dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx\n",
1318                vscsi->flags, vscsi->state);
1319
1320        /* Only need to free qs if we're disconnecting from client */
1321        if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
1322                free_qs = true;
1323
1324        switch (vscsi->state) {
1325        case UNCONFIGURING:
1326                ibmvscsis_free_command_q(vscsi);
1327                dma_rmb();
1328                isync();
1329                if (vscsi->flags & CFG_SLEEPING) {
1330                        vscsi->flags &= ~CFG_SLEEPING;
1331                        complete(&vscsi->unconfig);
1332                }
1333                break;
1334        case ERR_DISCONNECT_RECONNECT:
1335                ibmvscsis_reset_queue(vscsi);
1336                dev_dbg(&vscsi->dev, "adapter_idle, disc_rec: flags 0x%x\n",
1337                        vscsi->flags);
1338                break;
1339
1340        case ERR_DISCONNECT:
1341                ibmvscsis_free_command_q(vscsi);
1342                vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
1343                vscsi->flags |= RESPONSE_Q_DOWN;
1344                if (vscsi->tport.enabled)
1345                        vscsi->state = ERR_DISCONNECTED;
1346                else
1347                        vscsi->state = WAIT_ENABLED;
1348                dev_dbg(&vscsi->dev, "adapter_idle, disc: flags 0x%x, state 0x%hx\n",
1349                        vscsi->flags, vscsi->state);
1350                break;
1351
1352        case WAIT_IDLE:
1353                vscsi->rsp_q_timer.timer_pops = 0;
1354                vscsi->debit = 0;
1355                vscsi->credit = 0;
1356                if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
1357                        vscsi->state = vscsi->resume_state;
1358                        vscsi->resume_state = 0;
1359                        rc = ibmvscsis_ready_for_suspend(vscsi, true);
1360                        vscsi->flags &= ~DISCONNECT_SCHEDULED;
1361                        if (rc)
1362                                break;
1363                } else if (vscsi->flags & TRANS_EVENT) {
1364                        vscsi->state = WAIT_CONNECTION;
1365                        vscsi->flags &= PRESERVE_FLAG_FIELDS;
1366                } else {
1367                        vscsi->state = CONNECTED;
1368                        vscsi->flags &= ~DISCONNECT_SCHEDULED;
1369                }
1370
1371                dev_dbg(&vscsi->dev, "adapter_idle, wait: flags 0x%x, state 0x%hx\n",
1372                        vscsi->flags, vscsi->state);
1373                ibmvscsis_poll_cmd_q(vscsi);
1374                break;
1375
1376        case ERR_DISCONNECTED:
1377                vscsi->flags &= ~DISCONNECT_SCHEDULED;
1378                dev_dbg(&vscsi->dev, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
1379                        vscsi->flags, vscsi->state);
1380                break;
1381
1382        default:
1383                dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n",
1384                        vscsi->state);
1385                break;
1386        }
1387
1388        if (free_qs)
1389                ibmvscsis_free_cmd_qs(vscsi);
1390
1391        /*
1392         * There is a timing window where we could lose a disconnect request.
1393         * The known path to this window occurs during the DISCONNECT_RECONNECT
1394         * case above: reset_queue calls free_command_q, which will release the
1395         * interrupt lock.  During that time, a new post_disconnect call can be
1396         * made with a "more severe" state (DISCONNECT or UNCONFIGURING).
1397         * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect
1398         * will only set the new_state.  Now free_command_q reacquires the intr
1399         * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_
1400         * FIELDS), and the disconnect is lost.  This is particularly bad when
1401         * the new disconnect was for UNCONFIGURING, since the unconfigure hangs
1402         * forever.
1403         * Fix is that free command queue sets acr state and acr flags if there
1404         * is a change under the lock
1405         * note free command queue writes to this state it clears it
1406         * before releasing the lock, different drivers call the free command
1407         * queue different times so dont initialize above
1408         */
1409        if (vscsi->phyp_acr_state != 0) {
1410                /*
1411                 * set any bits in flags that may have been cleared by
1412                 * a call to free command queue in switch statement
1413                 * or reset queue
1414                 */
1415                vscsi->flags |= vscsi->phyp_acr_flags;
1416                ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0);
1417                vscsi->phyp_acr_state = 0;
1418                vscsi->phyp_acr_flags = 0;
1419
1420                dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
1421                        vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
1422                        vscsi->phyp_acr_state);
1423        }
1424
1425        dev_dbg(&vscsi->dev, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
1426                vscsi->flags, vscsi->state, vscsi->new_state);
1427}
1428
1429/**
1430 * ibmvscsis_copy_crq_packet() - Copy CRQ Packet
1431 * @vscsi:      Pointer to our adapter structure
1432 * @cmd:        Pointer to command element to use to process the request
1433 * @crq:        Pointer to CRQ entry containing the request
1434 *
1435 * Copy the srp information unit from the hosted
1436 * partition using remote dma
1437 *
1438 * EXECUTION ENVIRONMENT:
1439 *      Interrupt, interrupt lock held
1440 */
1441static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
1442                                      struct ibmvscsis_cmd *cmd,
1443                                      struct viosrp_crq *crq)
1444{
1445        struct iu_entry *iue = cmd->iue;
1446        long rc = 0;
1447        u16 len;
1448
1449        len = be16_to_cpu(crq->IU_length);
1450        if ((len > SRP_MAX_IU_LEN) || (len == 0)) {
1451                dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len);
1452                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1453                return SRP_VIOLATION;
1454        }
1455
1456        rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn,
1457                         be64_to_cpu(crq->IU_data_ptr),
1458                         vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma);
1459
1460        switch (rc) {
1461        case H_SUCCESS:
1462                cmd->init_time = mftb();
1463                iue->remote_token = crq->IU_data_ptr;
1464                iue->iu_len = len;
1465                dev_dbg(&vscsi->dev, "copy_crq: ioba 0x%llx, init_time 0x%llx\n",
1466                        be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
1467                break;
1468        case H_PERMISSION:
1469                if (connection_broken(vscsi))
1470                        ibmvscsis_post_disconnect(vscsi,
1471                                                  ERR_DISCONNECT_RECONNECT,
1472                                                  (RESPONSE_Q_DOWN |
1473                                                   CLIENT_FAILED));
1474                else
1475                        ibmvscsis_post_disconnect(vscsi,
1476                                                  ERR_DISCONNECT_RECONNECT, 0);
1477
1478                dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1479                        rc);
1480                break;
1481        case H_DEST_PARM:
1482        case H_SOURCE_PARM:
1483        default:
1484                dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1485                        rc);
1486                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1487                break;
1488        }
1489
1490        return rc;
1491}
1492
1493/**
1494 * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram
1495 * @vscsi:      Pointer to our adapter structure
1496 * @iue:        Information Unit containing the Adapter Info MAD request
1497 *
1498 * EXECUTION ENVIRONMENT:
1499 *      Interrupt adapter lock is held
1500 */
1501static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1502                                   struct iu_entry *iue)
1503{
1504        struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info;
1505        struct mad_adapter_info_data *info;
1506        uint flag_bits = 0;
1507        dma_addr_t token;
1508        long rc;
1509
1510        mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1511
1512        if (be16_to_cpu(mad->common.length) > sizeof(*info)) {
1513                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1514                return 0;
1515        }
1516
1517        info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
1518                                  GFP_ATOMIC);
1519        if (!info) {
1520                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1521                        iue->target);
1522                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1523                return 0;
1524        }
1525
1526        /* Get remote info */
1527        rc = h_copy_rdma(be16_to_cpu(mad->common.length),
1528                         vscsi->dds.window[REMOTE].liobn,
1529                         be64_to_cpu(mad->buffer),
1530                         vscsi->dds.window[LOCAL].liobn, token);
1531
1532        if (rc != H_SUCCESS) {
1533                if (rc == H_PERMISSION) {
1534                        if (connection_broken(vscsi))
1535                                flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1536                }
1537                dev_warn(&vscsi->dev, "adapter_info: h_copy_rdma from client failed, rc %ld\n",
1538                         rc);
1539                dev_dbg(&vscsi->dev, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
1540                        be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
1541                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1542                                          flag_bits);
1543                goto free_dma;
1544        }
1545
1546        /*
1547         * Copy client info, but ignore partition number, which we
1548         * already got from phyp - unless we failed to get it from
1549         * phyp (e.g. if we're running on a p5 system).
1550         */
1551        if (vscsi->client_data.partition_number == 0)
1552                vscsi->client_data.partition_number =
1553                        be32_to_cpu(info->partition_number);
1554        strncpy(vscsi->client_data.srp_version, info->srp_version,
1555                sizeof(vscsi->client_data.srp_version));
1556        strncpy(vscsi->client_data.partition_name, info->partition_name,
1557                sizeof(vscsi->client_data.partition_name));
1558        vscsi->client_data.mad_version = be32_to_cpu(info->mad_version);
1559        vscsi->client_data.os_type = be32_to_cpu(info->os_type);
1560
1561        /* Copy our info */
1562        strncpy(info->srp_version, SRP_VERSION,
1563                sizeof(info->srp_version));
1564        strncpy(info->partition_name, vscsi->dds.partition_name,
1565                sizeof(info->partition_name));
1566        info->partition_number = cpu_to_be32(vscsi->dds.partition_num);
1567        info->mad_version = cpu_to_be32(MAD_VERSION_1);
1568        info->os_type = cpu_to_be32(LINUX);
1569        memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
1570        info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
1571
1572        dma_wmb();
1573        rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
1574                         token, vscsi->dds.window[REMOTE].liobn,
1575                         be64_to_cpu(mad->buffer));
1576        switch (rc) {
1577        case H_SUCCESS:
1578                break;
1579
1580        case H_SOURCE_PARM:
1581        case H_DEST_PARM:
1582        case H_PERMISSION:
1583                if (connection_broken(vscsi))
1584                        flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1585                fallthrough;
1586        default:
1587                dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
1588                        rc);
1589                ibmvscsis_post_disconnect(vscsi,
1590                                          ERR_DISCONNECT_RECONNECT,
1591                                          flag_bits);
1592                break;
1593        }
1594
1595free_dma:
1596        dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
1597        dev_dbg(&vscsi->dev, "Leaving adapter_info, rc %ld\n", rc);
1598
1599        return rc;
1600}
1601
1602/**
1603 * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram
1604 * @vscsi:      Pointer to our adapter structure
1605 * @iue:        Information Unit containing the Capabilities MAD request
1606 *
1607 * NOTE: if you return an error from this routine you must be
1608 * disconnecting or you will cause a hang
1609 *
1610 * EXECUTION ENVIRONMENT:
1611 *      Interrupt called with adapter lock held
1612 */
1613static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1614{
1615        struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities;
1616        struct capabilities *cap;
1617        struct mad_capability_common *common;
1618        dma_addr_t token;
1619        u16 olen, len, status, min_len, cap_len;
1620        u32 flag;
1621        uint flag_bits = 0;
1622        long rc = 0;
1623
1624        olen = be16_to_cpu(mad->common.length);
1625        /*
1626         * struct capabilities hardcodes a couple capabilities after the
1627         * header, but the capabilities can actually be in any order.
1628         */
1629        min_len = offsetof(struct capabilities, migration);
1630        if ((olen < min_len) || (olen > PAGE_SIZE)) {
1631                dev_warn(&vscsi->dev, "cap_mad: invalid len %d\n", olen);
1632                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1633                return 0;
1634        }
1635
1636        cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
1637                                 GFP_ATOMIC);
1638        if (!cap) {
1639                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1640                        iue->target);
1641                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1642                return 0;
1643        }
1644        rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn,
1645                         be64_to_cpu(mad->buffer),
1646                         vscsi->dds.window[LOCAL].liobn, token);
1647        if (rc == H_SUCCESS) {
1648                strncpy(cap->name, dev_name(&vscsi->dma_dev->dev),
1649                        SRP_MAX_LOC_LEN);
1650
1651                len = olen - min_len;
1652                status = VIOSRP_MAD_SUCCESS;
1653                common = (struct mad_capability_common *)&cap->migration;
1654
1655                while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
1656                        dev_dbg(&vscsi->dev, "cap_mad: len left %hd, cap type %d, cap len %hd\n",
1657                                len, be32_to_cpu(common->cap_type),
1658                                be16_to_cpu(common->length));
1659
1660                        cap_len = be16_to_cpu(common->length);
1661                        if (cap_len > len) {
1662                                dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n");
1663                                status = VIOSRP_MAD_FAILED;
1664                                break;
1665                        }
1666
1667                        if (cap_len == 0) {
1668                                dev_err(&vscsi->dev, "cap_mad: cap len is 0\n");
1669                                status = VIOSRP_MAD_FAILED;
1670                                break;
1671                        }
1672
1673                        switch (common->cap_type) {
1674                        default:
1675                                dev_dbg(&vscsi->dev, "cap_mad: unsupported capability\n");
1676                                common->server_support = 0;
1677                                flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
1678                                cap->flags &= ~flag;
1679                                break;
1680                        }
1681
1682                        len = len - cap_len;
1683                        common = (struct mad_capability_common *)
1684                                ((char *)common + cap_len);
1685                }
1686
1687                mad->common.status = cpu_to_be16(status);
1688
1689                dma_wmb();
1690                rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token,
1691                                 vscsi->dds.window[REMOTE].liobn,
1692                                 be64_to_cpu(mad->buffer));
1693
1694                if (rc != H_SUCCESS) {
1695                        dev_dbg(&vscsi->dev, "cap_mad: failed to copy to client, rc %ld\n",
1696                                rc);
1697
1698                        if (rc == H_PERMISSION) {
1699                                if (connection_broken(vscsi))
1700                                        flag_bits = (RESPONSE_Q_DOWN |
1701                                                     CLIENT_FAILED);
1702                        }
1703
1704                        dev_warn(&vscsi->dev, "cap_mad: error copying data to client, rc %ld\n",
1705                                 rc);
1706                        ibmvscsis_post_disconnect(vscsi,
1707                                                  ERR_DISCONNECT_RECONNECT,
1708                                                  flag_bits);
1709                }
1710        }
1711
1712        dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
1713
1714        dev_dbg(&vscsi->dev, "Leaving cap_mad, rc %ld, client_cap 0x%x\n",
1715                rc, vscsi->client_cap);
1716
1717        return rc;
1718}
1719
1720/**
1721 * ibmvscsis_process_mad() - Service a MAnagement Data gram
1722 * @vscsi:      Pointer to our adapter structure
1723 * @iue:        Information Unit containing the MAD request
1724 *
1725 * Must be called with interrupt lock held.
1726 */
1727static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1728{
1729        struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1730        struct viosrp_empty_iu *empty;
1731        long rc = ADAPT_SUCCESS;
1732
1733        switch (be32_to_cpu(mad->type)) {
1734        case VIOSRP_EMPTY_IU_TYPE:
1735                empty = &vio_iu(iue)->mad.empty_iu;
1736                vscsi->empty_iu_id = be64_to_cpu(empty->buffer);
1737                vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag);
1738                mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1739                break;
1740        case VIOSRP_ADAPTER_INFO_TYPE:
1741                rc = ibmvscsis_adapter_info(vscsi, iue);
1742                break;
1743        case VIOSRP_CAPABILITIES_TYPE:
1744                rc = ibmvscsis_cap_mad(vscsi, iue);
1745                break;
1746        case VIOSRP_ENABLE_FAST_FAIL:
1747                if (vscsi->state == CONNECTED) {
1748                        vscsi->fast_fail = true;
1749                        mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1750                } else {
1751                        dev_warn(&vscsi->dev, "fast fail mad sent after login\n");
1752                        mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
1753                }
1754                break;
1755        default:
1756                mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED);
1757                break;
1758        }
1759
1760        return rc;
1761}
1762
1763/**
1764 * srp_snd_msg_failed() - Handle an error when sending a response
1765 * @vscsi:      Pointer to our adapter structure
1766 * @rc:         The return code from the h_send_crq command
1767 *
1768 * Must be called with interrupt lock held.
1769 */
1770static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
1771{
1772        ktime_t kt;
1773
1774        if (rc != H_DROPPED) {
1775                ibmvscsis_free_cmd_qs(vscsi);
1776
1777                if (rc == H_CLOSED)
1778                        vscsi->flags |= CLIENT_FAILED;
1779
1780                /* don't flag the same problem multiple times */
1781                if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1782                        vscsi->flags |= RESPONSE_Q_DOWN;
1783                        if (!(vscsi->state & (ERR_DISCONNECT |
1784                                              ERR_DISCONNECT_RECONNECT |
1785                                              ERR_DISCONNECTED | UNDEFINED))) {
1786                                dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n",
1787                                        vscsi->state, vscsi->flags, rc);
1788                        }
1789                        ibmvscsis_post_disconnect(vscsi,
1790                                                  ERR_DISCONNECT_RECONNECT, 0);
1791                }
1792                return;
1793        }
1794
1795        /*
1796         * The response queue is full.
1797         * If the server is processing SRP requests, i.e.
1798         * the client has successfully done an
1799         * SRP_LOGIN, then it will wait forever for room in
1800         * the queue.  However if the system admin
1801         * is attempting to unconfigure the server then one
1802         * or more children will be in a state where
1803         * they are being removed. So if there is even one
1804         * child being removed then the driver assumes
1805         * the system admin is attempting to break the
1806         * connection with the client and MAX_TIMER_POPS
1807         * is honored.
1808         */
1809        if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
1810            (vscsi->state == SRP_PROCESSING)) {
1811                dev_dbg(&vscsi->dev, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
1812                        vscsi->flags, (int)vscsi->rsp_q_timer.started,
1813                        vscsi->rsp_q_timer.timer_pops);
1814
1815                /*
1816                 * Check if the timer is running; if it
1817                 * is not then start it up.
1818                 */
1819                if (!vscsi->rsp_q_timer.started) {
1820                        if (vscsi->rsp_q_timer.timer_pops <
1821                            MAX_TIMER_POPS) {
1822                                kt = WAIT_NANO_SECONDS;
1823                        } else {
1824                                /*
1825                                 * slide the timeslice if the maximum
1826                                 * timer pops have already happened
1827                                 */
1828                                kt = ktime_set(WAIT_SECONDS, 0);
1829                        }
1830
1831                        vscsi->rsp_q_timer.started = true;
1832                        hrtimer_start(&vscsi->rsp_q_timer.timer, kt,
1833                                      HRTIMER_MODE_REL);
1834                }
1835        } else {
1836                /*
1837                 * TBD: Do we need to worry about this? Need to get
1838                 *      remove working.
1839                 */
1840                /*
1841                 * waited a long time and it appears the system admin
1842                 * is bring this driver down
1843                 */
1844                vscsi->flags |= RESPONSE_Q_DOWN;
1845                ibmvscsis_free_cmd_qs(vscsi);
1846                /*
1847                 * if the driver is already attempting to disconnect
1848                 * from the client and has already logged an error
1849                 * trace this event but don't put it in the error log
1850                 */
1851                if (!(vscsi->state & (ERR_DISCONNECT |
1852                                      ERR_DISCONNECT_RECONNECT |
1853                                      ERR_DISCONNECTED | UNDEFINED))) {
1854                        dev_err(&vscsi->dev, "client crq full too long\n");
1855                        ibmvscsis_post_disconnect(vscsi,
1856                                                  ERR_DISCONNECT_RECONNECT,
1857                                                  0);
1858                }
1859        }
1860}
1861
1862/**
1863 * ibmvscsis_send_messages() - Send a Response
1864 * @vscsi:      Pointer to our adapter structure
1865 *
1866 * Send a response, first checking the waiting queue. Responses are
1867 * sent in order they are received. If the response cannot be sent,
1868 * because the client queue is full, it stays on the waiting queue.
1869 *
1870 * PRECONDITION:
1871 *      Called with interrupt lock held
1872 */
1873static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1874{
1875        u64 msg_hi = 0;
1876        /* note do not attempt to access the IU_data_ptr with this pointer
1877         * it is not valid
1878         */
1879        struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
1880        struct ibmvscsis_cmd *cmd, *nxt;
1881        long rc = ADAPT_SUCCESS;
1882        bool retry = false;
1883
1884        if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1885                do {
1886                        retry = false;
1887                        list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp,
1888                                                 list) {
1889                                /*
1890                                 * Check to make sure abort cmd gets processed
1891                                 * prior to the abort tmr cmd
1892                                 */
1893                                if (cmd->flags & DELAY_SEND)
1894                                        continue;
1895
1896                                if (cmd->abort_cmd) {
1897                                        retry = true;
1898                                        cmd->abort_cmd->flags &= ~(DELAY_SEND);
1899                                        cmd->abort_cmd = NULL;
1900                                }
1901
1902                                /*
1903                                 * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and
1904                                 * the case where LIO issued a
1905                                 * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST
1906                                 * case then we dont send a response, since it
1907                                 * was already done.
1908                                 */
1909                                if (cmd->se_cmd.transport_state & CMD_T_ABORTED &&
1910                                    !(cmd->se_cmd.transport_state & CMD_T_TAS)) {
1911                                        list_del(&cmd->list);
1912                                        ibmvscsis_free_cmd_resources(vscsi,
1913                                                                     cmd);
1914                                        /*
1915                                         * With a successfully aborted op
1916                                         * through LIO we want to increment the
1917                                         * the vscsi credit so that when we dont
1918                                         * send a rsp to the original scsi abort
1919                                         * op (h_send_crq), but the tm rsp to
1920                                         * the abort is sent, the credit is
1921                                         * correctly sent with the abort tm rsp.
1922                                         * We would need 1 for the abort tm rsp
1923                                         * and 1 credit for the aborted scsi op.
1924                                         * Thus we need to increment here.
1925                                         * Also we want to increment the credit
1926                                         * here because we want to make sure
1927                                         * cmd is actually released first
1928                                         * otherwise the client will think it
1929                                         * it can send a new cmd, and we could
1930                                         * find ourselves short of cmd elements.
1931                                         */
1932                                        vscsi->credit += 1;
1933                                } else {
1934                                        crq->valid = VALID_CMD_RESP_EL;
1935                                        crq->format = cmd->rsp.format;
1936
1937                                        if (cmd->flags & CMD_FAST_FAIL)
1938                                                crq->status = VIOSRP_ADAPTER_FAIL;
1939
1940                                        crq->IU_length = cpu_to_be16(cmd->rsp.len);
1941
1942                                        rc = h_send_crq(vscsi->dma_dev->unit_address,
1943                                                        be64_to_cpu(msg_hi),
1944                                                        be64_to_cpu(cmd->rsp.tag));
1945
1946                                        dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
1947                                                cmd, be64_to_cpu(cmd->rsp.tag),
1948                                                rc);
1949
1950                                        /* if all ok free up the command
1951                                         * element resources
1952                                         */
1953                                        if (rc == H_SUCCESS) {
1954                                                /* some movement has occurred */
1955                                                vscsi->rsp_q_timer.timer_pops = 0;
1956                                                list_del(&cmd->list);
1957
1958                                                ibmvscsis_free_cmd_resources(vscsi,
1959                                                                             cmd);
1960                                        } else {
1961                                                srp_snd_msg_failed(vscsi, rc);
1962                                                break;
1963                                        }
1964                                }
1965                        }
1966                } while (retry);
1967
1968                if (!rc) {
1969                        /*
1970                         * The timer could pop with the queue empty.  If
1971                         * this happens, rc will always indicate a
1972                         * success; clear the pop count.
1973                         */
1974                        vscsi->rsp_q_timer.timer_pops = 0;
1975                }
1976        } else {
1977                ibmvscsis_free_cmd_qs(vscsi);
1978        }
1979}
1980
1981/* Called with intr lock held */
1982static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
1983                                    struct ibmvscsis_cmd *cmd,
1984                                    struct viosrp_crq *crq)
1985{
1986        struct iu_entry *iue = cmd->iue;
1987        struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1988        uint flag_bits = 0;
1989        long rc;
1990
1991        dma_wmb();
1992        rc = h_copy_rdma(sizeof(struct mad_common),
1993                         vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
1994                         vscsi->dds.window[REMOTE].liobn,
1995                         be64_to_cpu(crq->IU_data_ptr));
1996        if (!rc) {
1997                cmd->rsp.format = VIOSRP_MAD_FORMAT;
1998                cmd->rsp.len = sizeof(struct mad_common);
1999                cmd->rsp.tag = mad->tag;
2000                list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2001                ibmvscsis_send_messages(vscsi);
2002        } else {
2003                dev_dbg(&vscsi->dev, "Error sending mad response, rc %ld\n",
2004                        rc);
2005                if (rc == H_PERMISSION) {
2006                        if (connection_broken(vscsi))
2007                                flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
2008                }
2009                dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n",
2010                        rc);
2011
2012                ibmvscsis_free_cmd_resources(vscsi, cmd);
2013                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
2014                                          flag_bits);
2015        }
2016}
2017
2018/**
2019 * ibmvscsis_mad() - Service a MAnagement Data gram.
2020 * @vscsi:      Pointer to our adapter structure
2021 * @crq:        Pointer to the CRQ entry containing the MAD request
2022 *
2023 * EXECUTION ENVIRONMENT:
2024 *      Interrupt, called with adapter lock held
2025 */
2026static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
2027{
2028        struct iu_entry *iue;
2029        struct ibmvscsis_cmd *cmd;
2030        struct mad_common *mad;
2031        long rc = ADAPT_SUCCESS;
2032
2033        switch (vscsi->state) {
2034                /*
2035                 * We have not exchanged Init Msgs yet, so this MAD was sent
2036                 * before the last Transport Event; client will not be
2037                 * expecting a response.
2038                 */
2039        case WAIT_CONNECTION:
2040                dev_dbg(&vscsi->dev, "mad: in Wait Connection state, ignoring MAD, flags %d\n",
2041                        vscsi->flags);
2042                return ADAPT_SUCCESS;
2043
2044        case SRP_PROCESSING:
2045        case CONNECTED:
2046                break;
2047
2048                /*
2049                 * We should never get here while we're in these states.
2050                 * Just log an error and get out.
2051                 */
2052        case UNCONFIGURING:
2053        case WAIT_IDLE:
2054        case ERR_DISCONNECT:
2055        case ERR_DISCONNECT_RECONNECT:
2056        default:
2057                dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n",
2058                        vscsi->state);
2059                return ADAPT_SUCCESS;
2060        }
2061
2062        cmd = ibmvscsis_get_free_cmd(vscsi);
2063        if (!cmd) {
2064                dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n",
2065                        vscsi->debit);
2066                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2067                return ERROR;
2068        }
2069        iue = cmd->iue;
2070        cmd->type = ADAPTER_MAD;
2071
2072        rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
2073        if (!rc) {
2074                mad = (struct mad_common *)&vio_iu(iue)->mad;
2075
2076                dev_dbg(&vscsi->dev, "mad: type %d\n", be32_to_cpu(mad->type));
2077
2078                rc = ibmvscsis_process_mad(vscsi, iue);
2079
2080                dev_dbg(&vscsi->dev, "mad: status %hd, rc %ld\n",
2081                        be16_to_cpu(mad->status), rc);
2082
2083                if (!rc)
2084                        ibmvscsis_send_mad_resp(vscsi, cmd, crq);
2085        } else {
2086                ibmvscsis_free_cmd_resources(vscsi, cmd);
2087        }
2088
2089        dev_dbg(&vscsi->dev, "Leaving mad, rc %ld\n", rc);
2090        return rc;
2091}
2092
2093/**
2094 * ibmvscsis_login_rsp() - Create/copy a login response notice to the client
2095 * @vscsi:      Pointer to our adapter structure
2096 * @cmd:        Pointer to the command for the SRP Login request
2097 *
2098 * EXECUTION ENVIRONMENT:
2099 *      Interrupt, interrupt lock held
2100 */
2101static long ibmvscsis_login_rsp(struct scsi_info *vscsi,
2102                                struct ibmvscsis_cmd *cmd)
2103{
2104        struct iu_entry *iue = cmd->iue;
2105        struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp;
2106        struct format_code *fmt;
2107        uint flag_bits = 0;
2108        long rc = ADAPT_SUCCESS;
2109
2110        memset(rsp, 0, sizeof(struct srp_login_rsp));
2111
2112        rsp->opcode = SRP_LOGIN_RSP;
2113        rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit);
2114        rsp->tag = cmd->rsp.tag;
2115        rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
2116        rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
2117        fmt = (struct format_code *)&rsp->buf_fmt;
2118        fmt->buffers = SUPPORTED_FORMATS;
2119        vscsi->credit = 0;
2120
2121        cmd->rsp.len = sizeof(struct srp_login_rsp);
2122
2123        dma_wmb();
2124        rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
2125                         iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
2126                         be64_to_cpu(iue->remote_token));
2127
2128        switch (rc) {
2129        case H_SUCCESS:
2130                break;
2131
2132        case H_PERMISSION:
2133                if (connection_broken(vscsi))
2134                        flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
2135                dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
2136                        rc);
2137                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
2138                                          flag_bits);
2139                break;
2140        case H_SOURCE_PARM:
2141        case H_DEST_PARM:
2142        default:
2143                dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
2144                        rc);
2145                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2146                break;
2147        }
2148
2149        return rc;
2150}
2151
2152/**
2153 * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client
2154 * @vscsi:      Pointer to our adapter structure
2155 * @cmd:        Pointer to the command for the SRP Login request
2156 * @reason:     The reason the SRP Login is being rejected, per SRP protocol
2157 *
2158 * EXECUTION ENVIRONMENT:
2159 *      Interrupt, interrupt lock held
2160 */
2161static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
2162                                    struct ibmvscsis_cmd *cmd, u32 reason)
2163{
2164        struct iu_entry *iue = cmd->iue;
2165        struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej;
2166        struct format_code *fmt;
2167        uint flag_bits = 0;
2168        long rc = ADAPT_SUCCESS;
2169
2170        memset(rej, 0, sizeof(*rej));
2171
2172        rej->opcode = SRP_LOGIN_REJ;
2173        rej->reason = cpu_to_be32(reason);
2174        rej->tag = cmd->rsp.tag;
2175        fmt = (struct format_code *)&rej->buf_fmt;
2176        fmt->buffers = SUPPORTED_FORMATS;
2177
2178        cmd->rsp.len = sizeof(*rej);
2179
2180        dma_wmb();
2181        rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
2182                         iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
2183                         be64_to_cpu(iue->remote_token));
2184
2185        switch (rc) {
2186        case H_SUCCESS:
2187                break;
2188        case H_PERMISSION:
2189                if (connection_broken(vscsi))
2190                        flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
2191                dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
2192                        rc);
2193                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
2194                                          flag_bits);
2195                break;
2196        case H_SOURCE_PARM:
2197        case H_DEST_PARM:
2198        default:
2199                dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
2200                        rc);
2201                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2202                break;
2203        }
2204
2205        return rc;
2206}
2207
2208static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
2209{
2210        char *name = tport->tport_name;
2211        struct ibmvscsis_nexus *nexus;
2212        struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
2213        int rc;
2214
2215        if (tport->ibmv_nexus) {
2216                dev_dbg(&vscsi->dev, "tport->ibmv_nexus already exists\n");
2217                return 0;
2218        }
2219
2220        nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
2221        if (!nexus) {
2222                dev_err(&vscsi->dev, "Unable to allocate struct ibmvscsis_nexus\n");
2223                return -ENOMEM;
2224        }
2225
2226        nexus->se_sess = target_setup_session(&tport->se_tpg, 0, 0,
2227                                              TARGET_PROT_NORMAL, name, nexus,
2228                                              NULL);
2229        if (IS_ERR(nexus->se_sess)) {
2230                rc = PTR_ERR(nexus->se_sess);
2231                goto transport_init_fail;
2232        }
2233
2234        tport->ibmv_nexus = nexus;
2235
2236        return 0;
2237
2238transport_init_fail:
2239        kfree(nexus);
2240        return rc;
2241}
2242
2243static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
2244{
2245        struct se_session *se_sess;
2246        struct ibmvscsis_nexus *nexus;
2247
2248        nexus = tport->ibmv_nexus;
2249        if (!nexus)
2250                return -ENODEV;
2251
2252        se_sess = nexus->se_sess;
2253        if (!se_sess)
2254                return -ENODEV;
2255
2256        /*
2257         * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
2258         */
2259        target_remove_session(se_sess);
2260        tport->ibmv_nexus = NULL;
2261        kfree(nexus);
2262
2263        return 0;
2264}
2265
2266/**
2267 * ibmvscsis_srp_login() - Process an SRP Login Request
2268 * @vscsi:      Pointer to our adapter structure
2269 * @cmd:        Command element to use to process the SRP Login request
2270 * @crq:        Pointer to CRQ entry containing the SRP Login request
2271 *
2272 * EXECUTION ENVIRONMENT:
2273 *      Interrupt, called with interrupt lock held
2274 */
2275static long ibmvscsis_srp_login(struct scsi_info *vscsi,
2276                                struct ibmvscsis_cmd *cmd,
2277                                struct viosrp_crq *crq)
2278{
2279        struct iu_entry *iue = cmd->iue;
2280        struct srp_login_req *req = &vio_iu(iue)->srp.login_req;
2281        struct port_id {
2282                __be64 id_extension;
2283                __be64 io_guid;
2284        } *iport, *tport;
2285        struct format_code *fmt;
2286        u32 reason = 0x0;
2287        long rc = ADAPT_SUCCESS;
2288
2289        iport = (struct port_id *)req->initiator_port_id;
2290        tport = (struct port_id *)req->target_port_id;
2291        fmt = (struct format_code *)&req->req_buf_fmt;
2292        if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN)
2293                reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE;
2294        else if (be32_to_cpu(req->req_it_iu_len) < 64)
2295                reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
2296        else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) ||
2297                 (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1)))
2298                reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL;
2299        else if (req->req_flags & SRP_MULTICHAN_MULTI)
2300                reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
2301        else if (fmt->buffers & (~SUPPORTED_FORMATS))
2302                reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
2303        else if ((fmt->buffers & SUPPORTED_FORMATS) == 0)
2304                reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
2305
2306        if (vscsi->state == SRP_PROCESSING)
2307                reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED;
2308
2309        rc = ibmvscsis_make_nexus(&vscsi->tport);
2310        if (rc)
2311                reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
2312
2313        cmd->rsp.format = VIOSRP_SRP_FORMAT;
2314        cmd->rsp.tag = req->tag;
2315
2316        dev_dbg(&vscsi->dev, "srp_login: reason 0x%x\n", reason);
2317
2318        if (reason)
2319                rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
2320        else
2321                rc = ibmvscsis_login_rsp(vscsi, cmd);
2322
2323        if (!rc) {
2324                if (!reason)
2325                        vscsi->state = SRP_PROCESSING;
2326
2327                list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2328                ibmvscsis_send_messages(vscsi);
2329        } else {
2330                ibmvscsis_free_cmd_resources(vscsi, cmd);
2331        }
2332
2333        dev_dbg(&vscsi->dev, "Leaving srp_login, rc %ld\n", rc);
2334        return rc;
2335}
2336
2337/**
2338 * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus
2339 * @vscsi:      Pointer to our adapter structure
2340 * @cmd:        Command element to use to process the Implicit Logout request
2341 * @crq:        Pointer to CRQ entry containing the Implicit Logout request
2342 *
2343 * Do the logic to close the I_T nexus.  This function may not
2344 * behave to specification.
2345 *
2346 * EXECUTION ENVIRONMENT:
2347 *      Interrupt, interrupt lock held
2348 */
2349static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi,
2350                                   struct ibmvscsis_cmd *cmd,
2351                                   struct viosrp_crq *crq)
2352{
2353        struct iu_entry *iue = cmd->iue;
2354        struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout;
2355
2356        if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) ||
2357            !list_empty(&vscsi->waiting_rsp)) {
2358                dev_err(&vscsi->dev, "i_logout: outstanding work\n");
2359                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2360        } else {
2361                cmd->rsp.format = SRP_FORMAT;
2362                cmd->rsp.tag = log_out->tag;
2363                cmd->rsp.len = sizeof(struct mad_common);
2364                list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2365                ibmvscsis_send_messages(vscsi);
2366
2367                ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
2368        }
2369
2370        return ADAPT_SUCCESS;
2371}
2372
2373/* Called with intr lock held */
2374static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
2375{
2376        struct ibmvscsis_cmd *cmd;
2377        struct iu_entry *iue;
2378        struct srp_cmd *srp;
2379        struct srp_tsk_mgmt *tsk;
2380        long rc;
2381
2382        if (vscsi->request_limit - vscsi->debit <= 0) {
2383                /* Client has exceeded request limit */
2384                dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n",
2385                        vscsi->request_limit, vscsi->debit);
2386                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2387                return;
2388        }
2389
2390        cmd = ibmvscsis_get_free_cmd(vscsi);
2391        if (!cmd) {
2392                dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n",
2393                        vscsi->debit);
2394                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2395                return;
2396        }
2397        iue = cmd->iue;
2398        srp = &vio_iu(iue)->srp.cmd;
2399
2400        rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
2401        if (rc) {
2402                ibmvscsis_free_cmd_resources(vscsi, cmd);
2403                return;
2404        }
2405
2406        if (vscsi->state == SRP_PROCESSING) {
2407                switch (srp->opcode) {
2408                case SRP_LOGIN_REQ:
2409                        rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2410                        break;
2411
2412                case SRP_TSK_MGMT:
2413                        tsk = &vio_iu(iue)->srp.tsk_mgmt;
2414                        dev_dbg(&vscsi->dev, "tsk_mgmt tag: %llu (0x%llx)\n",
2415                                tsk->tag, tsk->tag);
2416                        cmd->rsp.tag = tsk->tag;
2417                        vscsi->debit += 1;
2418                        cmd->type = TASK_MANAGEMENT;
2419                        list_add_tail(&cmd->list, &vscsi->schedule_q);
2420                        queue_work(vscsi->work_q, &cmd->work);
2421                        break;
2422
2423                case SRP_CMD:
2424                        dev_dbg(&vscsi->dev, "srp_cmd tag: %llu (0x%llx)\n",
2425                                srp->tag, srp->tag);
2426                        cmd->rsp.tag = srp->tag;
2427                        vscsi->debit += 1;
2428                        cmd->type = SCSI_CDB;
2429                        /*
2430                         * We want to keep track of work waiting for
2431                         * the workqueue.
2432                         */
2433                        list_add_tail(&cmd->list, &vscsi->schedule_q);
2434                        queue_work(vscsi->work_q, &cmd->work);
2435                        break;
2436
2437                case SRP_I_LOGOUT:
2438                        rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq);
2439                        break;
2440
2441                case SRP_CRED_RSP:
2442                case SRP_AER_RSP:
2443                default:
2444                        ibmvscsis_free_cmd_resources(vscsi, cmd);
2445                        dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n",
2446                                (uint)srp->opcode);
2447                        ibmvscsis_post_disconnect(vscsi,
2448                                                  ERR_DISCONNECT_RECONNECT, 0);
2449                        break;
2450                }
2451        } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) {
2452                rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2453        } else {
2454                ibmvscsis_free_cmd_resources(vscsi, cmd);
2455                dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n",
2456                        vscsi->state);
2457                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2458        }
2459}
2460
2461/**
2462 * ibmvscsis_ping_response() - Respond to a ping request
2463 * @vscsi:      Pointer to our adapter structure
2464 *
2465 * Let the client know that the server is alive and waiting on
2466 * its native I/O stack.
2467 * If any type of error occurs from the call to queue a ping
2468 * response then the client is either not accepting or receiving
2469 * interrupts.  Disconnect with an error.
2470 *
2471 * EXECUTION ENVIRONMENT:
2472 *      Interrupt, interrupt lock held
2473 */
2474static long ibmvscsis_ping_response(struct scsi_info *vscsi)
2475{
2476        struct viosrp_crq *crq;
2477        u64 buffer[2] = { 0, 0 };
2478        long rc;
2479
2480        crq = (struct viosrp_crq *)&buffer;
2481        crq->valid = VALID_CMD_RESP_EL;
2482        crq->format = (u8)MESSAGE_IN_CRQ;
2483        crq->status = PING_RESPONSE;
2484
2485        rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
2486                        cpu_to_be64(buffer[MSG_LOW]));
2487
2488        switch (rc) {
2489        case H_SUCCESS:
2490                break;
2491        case H_CLOSED:
2492                vscsi->flags |= CLIENT_FAILED;
2493                fallthrough;
2494        case H_DROPPED:
2495                vscsi->flags |= RESPONSE_Q_DOWN;
2496                fallthrough;
2497        case H_REMOTE_PARM:
2498                dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
2499                        rc);
2500                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2501                break;
2502        default:
2503                dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n",
2504                        rc);
2505                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2506                break;
2507        }
2508
2509        return rc;
2510}
2511
2512/**
2513 * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
2514 * @vscsi:      Pointer to our adapter structure
2515 * @crq:        Pointer to CRQ element containing the SRP request
2516 *
2517 * This function will return success if the command queue element is valid
2518 * and the srp iu or MAD request it pointed to was also valid.  That does
2519 * not mean that an error was not returned to the client.
2520 *
2521 * EXECUTION ENVIRONMENT:
2522 *      Interrupt, intr lock held
2523 */
2524static long ibmvscsis_parse_command(struct scsi_info *vscsi,
2525                                    struct viosrp_crq *crq)
2526{
2527        long rc = ADAPT_SUCCESS;
2528
2529        switch (crq->valid) {
2530        case VALID_CMD_RESP_EL:
2531                switch (crq->format) {
2532                case OS400_FORMAT:
2533                case AIX_FORMAT:
2534                case LINUX_FORMAT:
2535                case MAD_FORMAT:
2536                        if (vscsi->flags & PROCESSING_MAD) {
2537                                rc = ERROR;
2538                                dev_err(&vscsi->dev, "parse_command: already processing mad\n");
2539                                ibmvscsis_post_disconnect(vscsi,
2540                                                       ERR_DISCONNECT_RECONNECT,
2541                                                       0);
2542                        } else {
2543                                vscsi->flags |= PROCESSING_MAD;
2544                                rc = ibmvscsis_mad(vscsi, crq);
2545                        }
2546                        break;
2547
2548                case SRP_FORMAT:
2549                        ibmvscsis_srp_cmd(vscsi, crq);
2550                        break;
2551
2552                case MESSAGE_IN_CRQ:
2553                        if (crq->status == PING)
2554                                ibmvscsis_ping_response(vscsi);
2555                        break;
2556
2557                default:
2558                        dev_err(&vscsi->dev, "parse_command: invalid format %d\n",
2559                                (uint)crq->format);
2560                        ibmvscsis_post_disconnect(vscsi,
2561                                                  ERR_DISCONNECT_RECONNECT, 0);
2562                        break;
2563                }
2564                break;
2565
2566        case VALID_TRANS_EVENT:
2567                rc = ibmvscsis_trans_event(vscsi, crq);
2568                break;
2569
2570        case VALID_INIT_MSG:
2571                rc = ibmvscsis_init_msg(vscsi, crq);
2572                break;
2573
2574        default:
2575                dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n",
2576                        (uint)crq->valid);
2577                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2578                break;
2579        }
2580
2581        /*
2582         * Return only what the interrupt handler cares
2583         * about. Most errors we keep right on trucking.
2584         */
2585        rc = vscsi->flags & SCHEDULE_DISCONNECT;
2586
2587        return rc;
2588}
2589
2590static int read_dma_window(struct scsi_info *vscsi)
2591{
2592        struct vio_dev *vdev = vscsi->dma_dev;
2593        const __be32 *dma_window;
2594        const __be32 *prop;
2595
2596        /* TODO Using of_parse_dma_window would be better, but it doesn't give
2597         * a way to read multiple windows without already knowing the size of
2598         * a window or the number of windows.
2599         */
2600        dma_window = (const __be32 *)vio_get_attribute(vdev,
2601                                                       "ibm,my-dma-window",
2602                                                       NULL);
2603        if (!dma_window) {
2604                dev_err(&vscsi->dev, "Couldn't find ibm,my-dma-window property\n");
2605                return -1;
2606        }
2607
2608        vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window);
2609        dma_window++;
2610
2611        prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2612                                                 NULL);
2613        if (!prop) {
2614                dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-address-cells property\n");
2615                dma_window++;
2616        } else {
2617                dma_window += be32_to_cpu(*prop);
2618        }
2619
2620        prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2621                                                 NULL);
2622        if (!prop) {
2623                dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-size-cells property\n");
2624                dma_window++;
2625        } else {
2626                dma_window += be32_to_cpu(*prop);
2627        }
2628
2629        /* dma_window should point to the second window now */
2630        vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window);
2631
2632        return 0;
2633}
2634
2635static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name)
2636{
2637        struct ibmvscsis_tport *tport = NULL;
2638        struct vio_dev *vdev;
2639        struct scsi_info *vscsi;
2640
2641        spin_lock_bh(&ibmvscsis_dev_lock);
2642        list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) {
2643                vdev = vscsi->dma_dev;
2644                if (!strcmp(dev_name(&vdev->dev), name)) {
2645                        tport = &vscsi->tport;
2646                        break;
2647                }
2648        }
2649        spin_unlock_bh(&ibmvscsis_dev_lock);
2650
2651        return tport;
2652}
2653
2654/**
2655 * ibmvscsis_parse_cmd() - Parse SRP Command
2656 * @vscsi:      Pointer to our adapter structure
2657 * @cmd:        Pointer to command element with SRP command
2658 *
2659 * Parse the srp command; if it is valid then submit it to tcm.
2660 * Note: The return code does not reflect the status of the SCSI CDB.
2661 *
2662 * EXECUTION ENVIRONMENT:
2663 *      Process level
2664 */
2665static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
2666                                struct ibmvscsis_cmd *cmd)
2667{
2668        struct iu_entry *iue = cmd->iue;
2669        struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
2670        struct ibmvscsis_nexus *nexus;
2671        u64 data_len = 0;
2672        enum dma_data_direction dir;
2673        int attr = 0;
2674
2675        nexus = vscsi->tport.ibmv_nexus;
2676        /*
2677         * additional length in bytes.  Note that the SRP spec says that
2678         * additional length is in 4-byte words, but technically the
2679         * additional length field is only the upper 6 bits of the byte.
2680         * The lower 2 bits are reserved.  If the lower 2 bits are 0 (as
2681         * all reserved fields should be), then interpreting the byte as
2682         * an int will yield the length in bytes.
2683         */
2684        if (srp->add_cdb_len & 0x03) {
2685                dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n");
2686                spin_lock_bh(&vscsi->intr_lock);
2687                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2688                ibmvscsis_free_cmd_resources(vscsi, cmd);
2689                spin_unlock_bh(&vscsi->intr_lock);
2690                return;
2691        }
2692
2693        if (srp_get_desc_table(srp, &dir, &data_len)) {
2694                dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
2695                        srp->tag);
2696                goto fail;
2697        }
2698
2699        cmd->rsp.sol_not = srp->sol_not;
2700
2701        switch (srp->task_attr) {
2702        case SRP_SIMPLE_TASK:
2703                attr = TCM_SIMPLE_TAG;
2704                break;
2705        case SRP_ORDERED_TASK:
2706                attr = TCM_ORDERED_TAG;
2707                break;
2708        case SRP_HEAD_TASK:
2709                attr = TCM_HEAD_TAG;
2710                break;
2711        case SRP_ACA_TASK:
2712                attr = TCM_ACA_TAG;
2713                break;
2714        default:
2715                dev_err(&vscsi->dev, "Invalid task attribute %d\n",
2716                        srp->task_attr);
2717                goto fail;
2718        }
2719
2720        cmd->se_cmd.tag = be64_to_cpu(srp->tag);
2721
2722        spin_lock_bh(&vscsi->intr_lock);
2723        list_add_tail(&cmd->list, &vscsi->active_q);
2724        spin_unlock_bh(&vscsi->intr_lock);
2725
2726        srp->lun.scsi_lun[0] &= 0x3f;
2727
2728        target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
2729                          cmd->sense_buf, scsilun_to_int(&srp->lun),
2730                          data_len, attr, dir, 0);
2731        return;
2732
2733fail:
2734        spin_lock_bh(&vscsi->intr_lock);
2735        ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2736        spin_unlock_bh(&vscsi->intr_lock);
2737}
2738
2739/**
2740 * ibmvscsis_parse_task() - Parse SRP Task Management Request
2741 * @vscsi:      Pointer to our adapter structure
2742 * @cmd:        Pointer to command element with SRP task management request
2743 *
2744 * Parse the srp task management request; if it is valid then submit it to tcm.
2745 * Note: The return code does not reflect the status of the task management
2746 * request.
2747 *
2748 * EXECUTION ENVIRONMENT:
2749 *      Processor level
2750 */
2751static void ibmvscsis_parse_task(struct scsi_info *vscsi,
2752                                 struct ibmvscsis_cmd *cmd)
2753{
2754        struct iu_entry *iue = cmd->iue;
2755        struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
2756        int tcm_type;
2757        u64 tag_to_abort = 0;
2758        int rc = 0;
2759        struct ibmvscsis_nexus *nexus;
2760
2761        nexus = vscsi->tport.ibmv_nexus;
2762
2763        cmd->rsp.sol_not = srp_tsk->sol_not;
2764
2765        switch (srp_tsk->tsk_mgmt_func) {
2766        case SRP_TSK_ABORT_TASK:
2767                tcm_type = TMR_ABORT_TASK;
2768                tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
2769                break;
2770        case SRP_TSK_ABORT_TASK_SET:
2771                tcm_type = TMR_ABORT_TASK_SET;
2772                break;
2773        case SRP_TSK_CLEAR_TASK_SET:
2774                tcm_type = TMR_CLEAR_TASK_SET;
2775                break;
2776        case SRP_TSK_LUN_RESET:
2777                tcm_type = TMR_LUN_RESET;
2778                break;
2779        case SRP_TSK_CLEAR_ACA:
2780                tcm_type = TMR_CLEAR_ACA;
2781                break;
2782        default:
2783                dev_err(&vscsi->dev, "unknown task mgmt func %d\n",
2784                        srp_tsk->tsk_mgmt_func);
2785                cmd->se_cmd.se_tmr_req->response =
2786                        TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
2787                rc = -1;
2788                break;
2789        }
2790
2791        if (!rc) {
2792                cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag);
2793
2794                spin_lock_bh(&vscsi->intr_lock);
2795                list_add_tail(&cmd->list, &vscsi->active_q);
2796                spin_unlock_bh(&vscsi->intr_lock);
2797
2798                srp_tsk->lun.scsi_lun[0] &= 0x3f;
2799
2800                dev_dbg(&vscsi->dev, "calling submit_tmr, func %d\n",
2801                        srp_tsk->tsk_mgmt_func);
2802                rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
2803                                       scsilun_to_int(&srp_tsk->lun), srp_tsk,
2804                                       tcm_type, GFP_KERNEL, tag_to_abort, 0);
2805                if (rc) {
2806                        dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
2807                                rc);
2808                        spin_lock_bh(&vscsi->intr_lock);
2809                        list_del(&cmd->list);
2810                        spin_unlock_bh(&vscsi->intr_lock);
2811                        cmd->se_cmd.se_tmr_req->response =
2812                                TMR_FUNCTION_REJECTED;
2813                }
2814        }
2815
2816        if (rc)
2817                transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0);
2818}
2819
2820static void ibmvscsis_scheduler(struct work_struct *work)
2821{
2822        struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd,
2823                                                 work);
2824        struct scsi_info *vscsi = cmd->adapter;
2825
2826        spin_lock_bh(&vscsi->intr_lock);
2827
2828        /* Remove from schedule_q */
2829        list_del(&cmd->list);
2830
2831        /* Don't submit cmd if we're disconnecting */
2832        if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) {
2833                ibmvscsis_free_cmd_resources(vscsi, cmd);
2834
2835                /* ibmvscsis_disconnect might be waiting for us */
2836                if (list_empty(&vscsi->active_q) &&
2837                    list_empty(&vscsi->schedule_q) &&
2838                    (vscsi->flags & WAIT_FOR_IDLE)) {
2839                        vscsi->flags &= ~WAIT_FOR_IDLE;
2840                        complete(&vscsi->wait_idle);
2841                }
2842
2843                spin_unlock_bh(&vscsi->intr_lock);
2844                return;
2845        }
2846
2847        spin_unlock_bh(&vscsi->intr_lock);
2848
2849        switch (cmd->type) {
2850        case SCSI_CDB:
2851                ibmvscsis_parse_cmd(vscsi, cmd);
2852                break;
2853        case TASK_MANAGEMENT:
2854                ibmvscsis_parse_task(vscsi, cmd);
2855                break;
2856        default:
2857                dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n",
2858                        cmd->type);
2859                spin_lock_bh(&vscsi->intr_lock);
2860                ibmvscsis_free_cmd_resources(vscsi, cmd);
2861                spin_unlock_bh(&vscsi->intr_lock);
2862                break;
2863        }
2864}
2865
2866static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num)
2867{
2868        struct ibmvscsis_cmd *cmd;
2869        int i;
2870
2871        INIT_LIST_HEAD(&vscsi->free_cmd);
2872        vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd),
2873                                  GFP_KERNEL);
2874        if (!vscsi->cmd_pool)
2875                return -ENOMEM;
2876
2877        for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
2878             i++, cmd++) {
2879                cmd->abort_cmd = NULL;
2880                cmd->adapter = vscsi;
2881                INIT_WORK(&cmd->work, ibmvscsis_scheduler);
2882                list_add_tail(&cmd->list, &vscsi->free_cmd);
2883        }
2884
2885        return 0;
2886}
2887
2888static void ibmvscsis_free_cmds(struct scsi_info *vscsi)
2889{
2890        kfree(vscsi->cmd_pool);
2891        vscsi->cmd_pool = NULL;
2892        INIT_LIST_HEAD(&vscsi->free_cmd);
2893}
2894
2895/**
2896 * ibmvscsis_service_wait_q() - Service Waiting Queue
2897 * @timer:      Pointer to timer which has expired
2898 *
2899 * This routine is called when the timer pops to service the waiting
2900 * queue. Elements on the queue have completed, their responses have been
2901 * copied to the client, but the client's response queue was full so
2902 * the queue message could not be sent. The routine grabs the proper locks
2903 * and calls send messages.
2904 *
2905 * EXECUTION ENVIRONMENT:
2906 *      called at interrupt level
2907 */
2908static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer)
2909{
2910        struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer);
2911        struct scsi_info *vscsi = container_of(p_timer, struct scsi_info,
2912                                               rsp_q_timer);
2913
2914        spin_lock_bh(&vscsi->intr_lock);
2915        p_timer->timer_pops += 1;
2916        p_timer->started = false;
2917        ibmvscsis_send_messages(vscsi);
2918        spin_unlock_bh(&vscsi->intr_lock);
2919
2920        return HRTIMER_NORESTART;
2921}
2922
2923static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
2924{
2925        struct timer_cb *p_timer;
2926
2927        p_timer = &vscsi->rsp_q_timer;
2928        hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2929
2930        p_timer->timer.function = ibmvscsis_service_wait_q;
2931        p_timer->started = false;
2932        p_timer->timer_pops = 0;
2933
2934        return ADAPT_SUCCESS;
2935}
2936
2937static void ibmvscsis_freetimer(struct scsi_info *vscsi)
2938{
2939        struct timer_cb *p_timer;
2940
2941        p_timer = &vscsi->rsp_q_timer;
2942
2943        (void)hrtimer_cancel(&p_timer->timer);
2944
2945        p_timer->started = false;
2946        p_timer->timer_pops = 0;
2947}
2948
2949static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
2950{
2951        struct scsi_info *vscsi = data;
2952
2953        vio_disable_interrupts(vscsi->dma_dev);
2954        tasklet_schedule(&vscsi->work_task);
2955
2956        return IRQ_HANDLED;
2957}
2958
2959/**
2960 * ibmvscsis_enable_change_state() - Set new state based on enabled status
2961 * @vscsi:      Pointer to our adapter structure
2962 *
2963 * This function determines our new state now that we are enabled.  This
2964 * may involve sending an Init Complete message to the client.
2965 *
2966 * Must be called with interrupt lock held.
2967 */
2968static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
2969{
2970        int bytes;
2971        long rc = ADAPT_SUCCESS;
2972
2973        bytes = vscsi->cmd_q.size * PAGE_SIZE;
2974        rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
2975        if (rc == H_CLOSED || rc == H_SUCCESS) {
2976                vscsi->state = WAIT_CONNECTION;
2977                rc = ibmvscsis_establish_new_q(vscsi);
2978        }
2979
2980        if (rc != ADAPT_SUCCESS) {
2981                vscsi->state = ERR_DISCONNECTED;
2982                vscsi->flags |= RESPONSE_Q_DOWN;
2983        }
2984
2985        return rc;
2986}
2987
2988/**
2989 * ibmvscsis_create_command_q() - Create Command Queue
2990 * @vscsi:      Pointer to our adapter structure
2991 * @num_cmds:   Currently unused.  In the future, may be used to determine
2992 *              the size of the CRQ.
2993 *
2994 * Allocates memory for command queue maps remote memory into an ioba
2995 * initializes the command response queue
2996 *
2997 * EXECUTION ENVIRONMENT:
2998 *      Process level only
2999 */
3000static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
3001{
3002        int pages;
3003        struct vio_dev *vdev = vscsi->dma_dev;
3004
3005        /* We might support multiple pages in the future, but just 1 for now */
3006        pages = 1;
3007
3008        vscsi->cmd_q.size = pages;
3009
3010        vscsi->cmd_q.base_addr =
3011                (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
3012        if (!vscsi->cmd_q.base_addr)
3013                return -ENOMEM;
3014
3015        vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1;
3016
3017        vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev,
3018                                                vscsi->cmd_q.base_addr,
3019                                                PAGE_SIZE, DMA_BIDIRECTIONAL);
3020        if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) {
3021                free_page((unsigned long)vscsi->cmd_q.base_addr);
3022                return -ENOMEM;
3023        }
3024
3025        return 0;
3026}
3027
3028/**
3029 * ibmvscsis_destroy_command_q - Destroy Command Queue
3030 * @vscsi:      Pointer to our adapter structure
3031 *
3032 * Releases memory for command queue and unmaps mapped remote memory.
3033 *
3034 * EXECUTION ENVIRONMENT:
3035 *      Process level only
3036 */
3037static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi)
3038{
3039        dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token,
3040                         PAGE_SIZE, DMA_BIDIRECTIONAL);
3041        free_page((unsigned long)vscsi->cmd_q.base_addr);
3042        vscsi->cmd_q.base_addr = NULL;
3043        vscsi->state = NO_QUEUE;
3044}
3045
3046static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi,
3047                              struct ibmvscsis_cmd *cmd)
3048{
3049        struct iu_entry *iue = cmd->iue;
3050        struct se_cmd *se_cmd = &cmd->se_cmd;
3051        struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
3052        struct scsi_sense_hdr sshdr;
3053        u8 rc = se_cmd->scsi_status;
3054
3055        if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb)))
3056                if (scsi_normalize_sense(se_cmd->sense_buffer,
3057                                         se_cmd->scsi_sense_length, &sshdr))
3058                        if (sshdr.sense_key == HARDWARE_ERROR &&
3059                            (se_cmd->residual_count == 0 ||
3060                             se_cmd->residual_count == se_cmd->data_length)) {
3061                                rc = NO_SENSE;
3062                                cmd->flags |= CMD_FAST_FAIL;
3063                        }
3064
3065        return rc;
3066}
3067
3068/**
3069 * srp_build_response() - Build an SRP response buffer
3070 * @vscsi:      Pointer to our adapter structure
3071 * @cmd:        Pointer to command for which to send the response
3072 * @len_p:      Where to return the length of the IU response sent.  This
3073 *              is needed to construct the CRQ response.
3074 *
3075 * Build the SRP response buffer and copy it to the client's memory space.
3076 */
3077static long srp_build_response(struct scsi_info *vscsi,
3078                               struct ibmvscsis_cmd *cmd, uint *len_p)
3079{
3080        struct iu_entry *iue = cmd->iue;
3081        struct se_cmd *se_cmd = &cmd->se_cmd;
3082        struct srp_rsp *rsp;
3083        uint len;
3084        u32 rsp_code;
3085        char *data;
3086        u32 *tsk_status;
3087        long rc = ADAPT_SUCCESS;
3088
3089        spin_lock_bh(&vscsi->intr_lock);
3090
3091        rsp = &vio_iu(iue)->srp.rsp;
3092        len = sizeof(*rsp);
3093        memset(rsp, 0, len);
3094        data = rsp->data;
3095
3096        rsp->opcode = SRP_RSP;
3097
3098        rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
3099        rsp->tag = cmd->rsp.tag;
3100        rsp->flags = 0;
3101
3102        if (cmd->type == SCSI_CDB) {
3103                rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
3104                if (rsp->status) {
3105                        dev_dbg(&vscsi->dev, "build_resp: cmd %p, scsi status %d\n",
3106                                cmd, (int)rsp->status);
3107                        ibmvscsis_determine_resid(se_cmd, rsp);
3108                        if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
3109                                rsp->sense_data_len =
3110                                        cpu_to_be32(se_cmd->scsi_sense_length);
3111                                rsp->flags |= SRP_RSP_FLAG_SNSVALID;
3112                                len += se_cmd->scsi_sense_length;
3113                                memcpy(data, se_cmd->sense_buffer,
3114                                       se_cmd->scsi_sense_length);
3115                        }
3116                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3117                                UCSOLNT_RESP_SHIFT;
3118                } else if (cmd->flags & CMD_FAST_FAIL) {
3119                        dev_dbg(&vscsi->dev, "build_resp: cmd %p, fast fail\n",
3120                                cmd);
3121                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3122                                UCSOLNT_RESP_SHIFT;
3123                } else {
3124                        rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3125                                SCSOLNT_RESP_SHIFT;
3126                }
3127        } else {
3128                /* this is task management */
3129                rsp->status = 0;
3130                rsp->resp_data_len = cpu_to_be32(4);
3131                rsp->flags |= SRP_RSP_FLAG_RSPVALID;
3132
3133                switch (se_cmd->se_tmr_req->response) {
3134                case TMR_FUNCTION_COMPLETE:
3135                case TMR_TASK_DOES_NOT_EXIST:
3136                        rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE;
3137                        rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3138                                SCSOLNT_RESP_SHIFT;
3139                        break;
3140                case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3141                case TMR_LUN_DOES_NOT_EXIST:
3142                        rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED;
3143                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3144                                UCSOLNT_RESP_SHIFT;
3145                        break;
3146                case TMR_FUNCTION_FAILED:
3147                case TMR_FUNCTION_REJECTED:
3148                default:
3149                        rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED;
3150                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3151                                UCSOLNT_RESP_SHIFT;
3152                        break;
3153                }
3154
3155                tsk_status = (u32 *)data;
3156                *tsk_status = cpu_to_be32(rsp_code);
3157                data = (char *)(tsk_status + 1);
3158                len += 4;
3159        }
3160
3161        dma_wmb();
3162        rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
3163                         vscsi->dds.window[REMOTE].liobn,
3164                         be64_to_cpu(iue->remote_token));
3165
3166        switch (rc) {
3167        case H_SUCCESS:
3168                vscsi->credit = 0;
3169                *len_p = len;
3170                break;
3171        case H_PERMISSION:
3172                if (connection_broken(vscsi))
3173                        vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED;
3174
3175                dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n",
3176                        rc, vscsi->flags, vscsi->state);
3177                break;
3178        case H_SOURCE_PARM:
3179        case H_DEST_PARM:
3180        default:
3181                dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n",
3182                        rc);
3183                break;
3184        }
3185
3186        spin_unlock_bh(&vscsi->intr_lock);
3187
3188        return rc;
3189}
3190
3191static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
3192                          int nsg, struct srp_direct_buf *md, int nmd,
3193                          enum dma_data_direction dir, unsigned int bytes)
3194{
3195        struct iu_entry *iue = cmd->iue;
3196        struct srp_target *target = iue->target;
3197        struct scsi_info *vscsi = target->ldata;
3198        struct scatterlist *sgp;
3199        dma_addr_t client_ioba, server_ioba;
3200        ulong buf_len;
3201        ulong client_len, server_len;
3202        int md_idx;
3203        long tx_len;
3204        long rc = 0;
3205
3206        if (bytes == 0)
3207                return 0;
3208
3209        sgp = sg;
3210        client_len = 0;
3211        server_len = 0;
3212        md_idx = 0;
3213        tx_len = bytes;
3214
3215        do {
3216                if (client_len == 0) {
3217                        if (md_idx >= nmd) {
3218                                dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n");
3219                                rc = -EIO;
3220                                break;
3221                        }
3222                        client_ioba = be64_to_cpu(md[md_idx].va);
3223                        client_len = be32_to_cpu(md[md_idx].len);
3224                }
3225                if (server_len == 0) {
3226                        if (!sgp) {
3227                                dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n");
3228                                rc = -EIO;
3229                                break;
3230                        }
3231                        server_ioba = sg_dma_address(sgp);
3232                        server_len = sg_dma_len(sgp);
3233                }
3234
3235                buf_len = tx_len;
3236
3237                if (buf_len > client_len)
3238                        buf_len = client_len;
3239
3240                if (buf_len > server_len)
3241                        buf_len = server_len;
3242
3243                if (buf_len > max_vdma_size)
3244                        buf_len = max_vdma_size;
3245
3246                if (dir == DMA_TO_DEVICE) {
3247                        /* read from client */
3248                        rc = h_copy_rdma(buf_len,
3249                                         vscsi->dds.window[REMOTE].liobn,
3250                                         client_ioba,
3251                                         vscsi->dds.window[LOCAL].liobn,
3252                                         server_ioba);
3253                } else {
3254                        /* The h_copy_rdma will cause phyp, running in another
3255                         * partition, to read memory, so we need to make sure
3256                         * the data has been written out, hence these syncs.
3257                         */
3258                        /* ensure that everything is in memory */
3259                        isync();
3260                        /* ensure that memory has been made visible */
3261                        dma_wmb();
3262                        rc = h_copy_rdma(buf_len,
3263                                         vscsi->dds.window[LOCAL].liobn,
3264                                         server_ioba,
3265                                         vscsi->dds.window[REMOTE].liobn,
3266                                         client_ioba);
3267                }
3268                switch (rc) {
3269                case H_SUCCESS:
3270                        break;
3271                case H_PERMISSION:
3272                case H_SOURCE_PARM:
3273                case H_DEST_PARM:
3274                        if (connection_broken(vscsi)) {
3275                                spin_lock_bh(&vscsi->intr_lock);
3276                                vscsi->flags |=
3277                                        (RESPONSE_Q_DOWN | CLIENT_FAILED);
3278                                spin_unlock_bh(&vscsi->intr_lock);
3279                        }
3280                        dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n",
3281                                rc);
3282                        break;
3283
3284                default:
3285                        dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n",
3286                                rc);
3287                        break;
3288                }
3289
3290                if (!rc) {
3291                        tx_len -= buf_len;
3292                        if (tx_len) {
3293                                client_len -= buf_len;
3294                                if (client_len == 0)
3295                                        md_idx++;
3296                                else
3297                                        client_ioba += buf_len;
3298
3299                                server_len -= buf_len;
3300                                if (server_len == 0)
3301                                        sgp = sg_next(sgp);
3302                                else
3303                                        server_ioba += buf_len;
3304                        } else {
3305                                break;
3306                        }
3307                }
3308        } while (!rc);
3309
3310        return rc;
3311}
3312
3313/**
3314 * ibmvscsis_handle_crq() - Handle CRQ
3315 * @data:       Pointer to our adapter structure
3316 *
3317 * Read the command elements from the command queue and copy the payloads
3318 * associated with the command elements to local memory and execute the
3319 * SRP requests.
3320 *
3321 * Note: this is an edge triggered interrupt. It can not be shared.
3322 */
3323static void ibmvscsis_handle_crq(unsigned long data)
3324{
3325        struct scsi_info *vscsi = (struct scsi_info *)data;
3326        struct viosrp_crq *crq;
3327        long rc;
3328        bool ack = true;
3329        volatile u8 valid;
3330
3331        spin_lock_bh(&vscsi->intr_lock);
3332
3333        dev_dbg(&vscsi->dev, "got interrupt\n");
3334
3335        /*
3336         * if we are in a path where we are waiting for all pending commands
3337         * to complete because we received a transport event and anything in
3338         * the command queue is for a new connection, do nothing
3339         */
3340        if (TARGET_STOP(vscsi)) {
3341                vio_enable_interrupts(vscsi->dma_dev);
3342
3343                dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
3344                        vscsi->flags, vscsi->state);
3345                spin_unlock_bh(&vscsi->intr_lock);
3346                return;
3347        }
3348
3349        rc = vscsi->flags & SCHEDULE_DISCONNECT;
3350        crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3351        valid = crq->valid;
3352        dma_rmb();
3353
3354        while (valid) {
3355                /*
3356                 * These are edege triggered interrupts. After dropping out of
3357                 * the while loop, the code must check for work since an
3358                 * interrupt could be lost, and an elment be left on the queue,
3359                 * hence the label.
3360                 */
3361cmd_work:
3362                vscsi->cmd_q.index =
3363                        (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
3364
3365                if (!rc) {
3366                        rc = ibmvscsis_parse_command(vscsi, crq);
3367                } else {
3368                        if ((uint)crq->valid == VALID_TRANS_EVENT) {
3369                                /*
3370                                 * must service the transport layer events even
3371                                 * in an error state, dont break out until all
3372                                 * the consecutive transport events have been
3373                                 * processed
3374                                 */
3375                                rc = ibmvscsis_trans_event(vscsi, crq);
3376                        } else if (vscsi->flags & TRANS_EVENT) {
3377                                /*
3378                                 * if a transport event has occurred leave
3379                                 * everything but transport events on the queue
3380                                 *
3381                                 * need to decrement the queue index so we can
3382                                 * look at the element again
3383                                 */
3384                                if (vscsi->cmd_q.index)
3385                                        vscsi->cmd_q.index -= 1;
3386                                else
3387                                        /*
3388                                         * index is at 0 it just wrapped.
3389                                         * have it index last element in q
3390                                         */
3391                                        vscsi->cmd_q.index = vscsi->cmd_q.mask;
3392                                break;
3393                        }
3394                }
3395
3396                crq->valid = INVALIDATE_CMD_RESP_EL;
3397
3398                crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3399                valid = crq->valid;
3400                dma_rmb();
3401        }
3402
3403        if (!rc) {
3404                if (ack) {
3405                        vio_enable_interrupts(vscsi->dma_dev);
3406                        ack = false;
3407                        dev_dbg(&vscsi->dev, "handle_crq, reenabling interrupts\n");
3408                }
3409                valid = crq->valid;
3410                dma_rmb();
3411                if (valid)
3412                        goto cmd_work;
3413        } else {
3414                dev_dbg(&vscsi->dev, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
3415                        vscsi->flags, vscsi->state, vscsi->cmd_q.index);
3416        }
3417
3418        dev_dbg(&vscsi->dev, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
3419                (int)list_empty(&vscsi->schedule_q), vscsi->flags,
3420                vscsi->state);
3421
3422        spin_unlock_bh(&vscsi->intr_lock);
3423}
3424
3425static int ibmvscsis_probe(struct vio_dev *vdev,
3426                           const struct vio_device_id *id)
3427{
3428        struct scsi_info *vscsi;
3429        int rc = 0;
3430        long hrc = 0;
3431        char wq_name[24];
3432
3433        vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
3434        if (!vscsi) {
3435                rc = -ENOMEM;
3436                dev_err(&vdev->dev, "probe: allocation of adapter failed\n");
3437                return rc;
3438        }
3439
3440        vscsi->dma_dev = vdev;
3441        vscsi->dev = vdev->dev;
3442        INIT_LIST_HEAD(&vscsi->schedule_q);
3443        INIT_LIST_HEAD(&vscsi->waiting_rsp);
3444        INIT_LIST_HEAD(&vscsi->active_q);
3445
3446        snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
3447                 dev_name(&vdev->dev));
3448
3449        dev_dbg(&vscsi->dev, "probe tport_name: %s\n", vscsi->tport.tport_name);
3450
3451        rc = read_dma_window(vscsi);
3452        if (rc)
3453                goto free_adapter;
3454        dev_dbg(&vscsi->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
3455                vscsi->dds.window[LOCAL].liobn,
3456                vscsi->dds.window[REMOTE].liobn);
3457
3458        snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
3459
3460        vscsi->dds.unit_id = vdev->unit_address;
3461        strscpy(vscsi->dds.partition_name, partition_name,
3462                sizeof(vscsi->dds.partition_name));
3463        vscsi->dds.partition_num = partition_number;
3464
3465        spin_lock_bh(&ibmvscsis_dev_lock);
3466        list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
3467        spin_unlock_bh(&ibmvscsis_dev_lock);
3468
3469        /*
3470         * TBD: How do we determine # of cmds to request?  Do we know how
3471         * many "children" we have?
3472         */
3473        vscsi->request_limit = INITIAL_SRP_LIMIT;
3474        rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit,
3475                              SRP_MAX_IU_LEN);
3476        if (rc)
3477                goto rem_list;
3478
3479        vscsi->target.ldata = vscsi;
3480
3481        rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit);
3482        if (rc) {
3483                dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n",
3484                        rc, vscsi->request_limit);
3485                goto free_target;
3486        }
3487
3488        /*
3489         * Note: the lock is used in freeing timers, so must initialize
3490         * first so that ordering in case of error is correct.
3491         */
3492        spin_lock_init(&vscsi->intr_lock);
3493
3494        rc = ibmvscsis_alloctimer(vscsi);
3495        if (rc) {
3496                dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc);
3497                goto free_cmds;
3498        }
3499
3500        rc = ibmvscsis_create_command_q(vscsi, 256);
3501        if (rc) {
3502                dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n",
3503                        rc);
3504                goto free_timer;
3505        }
3506
3507        vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
3508        if (!vscsi->map_buf) {
3509                rc = -ENOMEM;
3510                dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n");
3511                goto destroy_queue;
3512        }
3513
3514        vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
3515                                         DMA_BIDIRECTIONAL);
3516        if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
3517                rc = -ENOMEM;
3518                dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
3519                goto free_buf;
3520        }
3521
3522        hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
3523                       (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
3524                       0);
3525        if (hrc == H_SUCCESS)
3526                vscsi->client_data.partition_number =
3527                        be64_to_cpu(*(u64 *)vscsi->map_buf);
3528        /*
3529         * We expect the VIOCTL to fail if we're configured as "any
3530         * client can connect" and the client isn't activated yet.
3531         * We'll make the call again when he sends an init msg.
3532         */
3533        dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n",
3534                hrc, vscsi->client_data.partition_number);
3535
3536        tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
3537                     (unsigned long)vscsi);
3538
3539        init_completion(&vscsi->wait_idle);
3540        init_completion(&vscsi->unconfig);
3541
3542        snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
3543        vscsi->work_q = create_workqueue(wq_name);
3544        if (!vscsi->work_q) {
3545                rc = -ENOMEM;
3546                dev_err(&vscsi->dev, "create_workqueue failed\n");
3547                goto unmap_buf;
3548        }
3549
3550        rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi);
3551        if (rc) {
3552                rc = -EPERM;
3553                dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc);
3554                goto destroy_WQ;
3555        }
3556
3557        vscsi->state = WAIT_ENABLED;
3558
3559        dev_set_drvdata(&vdev->dev, vscsi);
3560
3561        return 0;
3562
3563destroy_WQ:
3564        destroy_workqueue(vscsi->work_q);
3565unmap_buf:
3566        dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3567                         DMA_BIDIRECTIONAL);
3568free_buf:
3569        kfree(vscsi->map_buf);
3570destroy_queue:
3571        tasklet_kill(&vscsi->work_task);
3572        ibmvscsis_unregister_command_q(vscsi);
3573        ibmvscsis_destroy_command_q(vscsi);
3574free_timer:
3575        ibmvscsis_freetimer(vscsi);
3576free_cmds:
3577        ibmvscsis_free_cmds(vscsi);
3578free_target:
3579        srp_target_free(&vscsi->target);
3580rem_list:
3581        spin_lock_bh(&ibmvscsis_dev_lock);
3582        list_del(&vscsi->list);
3583        spin_unlock_bh(&ibmvscsis_dev_lock);
3584free_adapter:
3585        kfree(vscsi);
3586
3587        return rc;
3588}
3589
3590static void ibmvscsis_remove(struct vio_dev *vdev)
3591{
3592        struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
3593
3594        dev_dbg(&vscsi->dev, "remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
3595
3596        spin_lock_bh(&vscsi->intr_lock);
3597        ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
3598        vscsi->flags |= CFG_SLEEPING;
3599        spin_unlock_bh(&vscsi->intr_lock);
3600        wait_for_completion(&vscsi->unconfig);
3601
3602        vio_disable_interrupts(vdev);
3603        free_irq(vdev->irq, vscsi);
3604        destroy_workqueue(vscsi->work_q);
3605        dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3606                         DMA_BIDIRECTIONAL);
3607        kfree(vscsi->map_buf);
3608        tasklet_kill(&vscsi->work_task);
3609        ibmvscsis_destroy_command_q(vscsi);
3610        ibmvscsis_freetimer(vscsi);
3611        ibmvscsis_free_cmds(vscsi);
3612        srp_target_free(&vscsi->target);
3613        spin_lock_bh(&ibmvscsis_dev_lock);
3614        list_del(&vscsi->list);
3615        spin_unlock_bh(&ibmvscsis_dev_lock);
3616        kfree(vscsi);
3617}
3618
3619static ssize_t system_id_show(struct device *dev,
3620                              struct device_attribute *attr, char *buf)
3621{
3622        return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
3623}
3624
3625static ssize_t partition_number_show(struct device *dev,
3626                                     struct device_attribute *attr, char *buf)
3627{
3628        return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
3629}
3630
3631static ssize_t unit_address_show(struct device *dev,
3632                                 struct device_attribute *attr, char *buf)
3633{
3634        struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev);
3635
3636        return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address);
3637}
3638
3639static int ibmvscsis_get_system_info(void)
3640{
3641        struct device_node *rootdn, *vdevdn;
3642        const char *id, *model, *name;
3643        const uint *num;
3644
3645        rootdn = of_find_node_by_path("/");
3646        if (!rootdn)
3647                return -ENOENT;
3648
3649        model = of_get_property(rootdn, "model", NULL);
3650        id = of_get_property(rootdn, "system-id", NULL);
3651        if (model && id)
3652                snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
3653
3654        name = of_get_property(rootdn, "ibm,partition-name", NULL);
3655        if (name)
3656                strncpy(partition_name, name, sizeof(partition_name));
3657
3658        num = of_get_property(rootdn, "ibm,partition-no", NULL);
3659        if (num)
3660                partition_number = of_read_number(num, 1);
3661
3662        of_node_put(rootdn);
3663
3664        vdevdn = of_find_node_by_path("/vdevice");
3665        if (vdevdn) {
3666                const uint *mvds;
3667
3668                mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size",
3669                                       NULL);
3670                if (mvds)
3671                        max_vdma_size = *mvds;
3672                of_node_put(vdevdn);
3673        }
3674
3675        return 0;
3676}
3677
3678static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
3679{
3680        struct ibmvscsis_tport *tport =
3681                container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3682
3683        return tport->tport_name;
3684}
3685
3686static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg)
3687{
3688        struct ibmvscsis_tport *tport =
3689                container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3690
3691        return tport->tport_tpgt;
3692}
3693
3694static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg)
3695{
3696        return 1;
3697}
3698
3699static int ibmvscsis_check_true(struct se_portal_group *se_tpg)
3700{
3701        return 1;
3702}
3703
3704static int ibmvscsis_check_false(struct se_portal_group *se_tpg)
3705{
3706        return 0;
3707}
3708
3709static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg)
3710{
3711        return 1;
3712}
3713
3714static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd)
3715{
3716        return target_put_sess_cmd(se_cmd);
3717}
3718
3719static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
3720{
3721        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3722                                                 se_cmd);
3723        struct scsi_info *vscsi = cmd->adapter;
3724
3725        spin_lock_bh(&vscsi->intr_lock);
3726        /* Remove from active_q */
3727        list_move_tail(&cmd->list, &vscsi->waiting_rsp);
3728        ibmvscsis_send_messages(vscsi);
3729        spin_unlock_bh(&vscsi->intr_lock);
3730}
3731
3732static u32 ibmvscsis_sess_get_index(struct se_session *se_sess)
3733{
3734        return 0;
3735}
3736
3737static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3738{
3739        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3740                                                 se_cmd);
3741        struct scsi_info *vscsi = cmd->adapter;
3742        struct iu_entry *iue = cmd->iue;
3743        int rc;
3744
3745        /*
3746         * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success
3747         * since LIO can't do anything about it, and we dont want to
3748         * attempt an srp_transfer_data.
3749         */
3750        if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
3751                dev_err(&vscsi->dev, "write_pending failed since: %d\n",
3752                        vscsi->flags);
3753                return -EIO;
3754
3755        }
3756
3757        rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
3758                               1, 1);
3759        if (rc) {
3760                dev_err(&vscsi->dev, "srp_transfer_data() failed: %d\n", rc);
3761                return -EIO;
3762        }
3763        /*
3764         * We now tell TCM to add this WRITE CDB directly into the TCM storage
3765         * object execution queue.
3766         */
3767        target_execute_cmd(se_cmd);
3768        return 0;
3769}
3770
3771static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
3772{
3773}
3774
3775static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd)
3776{
3777        return 0;
3778}
3779
3780static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
3781{
3782        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3783                                                 se_cmd);
3784        struct iu_entry *iue = cmd->iue;
3785        struct scsi_info *vscsi = cmd->adapter;
3786        uint len = 0;
3787        int rc;
3788
3789        rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
3790                               1);
3791        if (rc) {
3792                dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
3793                se_cmd->scsi_sense_length = 18;
3794                memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
3795                /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
3796                scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR,
3797                                        0x08, 0x01);
3798        }
3799
3800        srp_build_response(vscsi, cmd, &len);
3801        cmd->rsp.format = SRP_FORMAT;
3802        cmd->rsp.len = len;
3803
3804        return 0;
3805}
3806
3807static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
3808{
3809        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3810                                                 se_cmd);
3811        struct scsi_info *vscsi = cmd->adapter;
3812        uint len;
3813
3814        dev_dbg(&vscsi->dev, "queue_status %p\n", se_cmd);
3815
3816        srp_build_response(vscsi, cmd, &len);
3817        cmd->rsp.format = SRP_FORMAT;
3818        cmd->rsp.len = len;
3819
3820        return 0;
3821}
3822
3823static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
3824{
3825        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3826                                                 se_cmd);
3827        struct scsi_info *vscsi = cmd->adapter;
3828        struct ibmvscsis_cmd *cmd_itr;
3829        struct iu_entry *iue = iue = cmd->iue;
3830        struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
3831        u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
3832        uint len;
3833
3834        dev_dbg(&vscsi->dev, "queue_tm_rsp %p, status %d\n",
3835                se_cmd, (int)se_cmd->se_tmr_req->response);
3836
3837        if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
3838            cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
3839                spin_lock_bh(&vscsi->intr_lock);
3840                list_for_each_entry(cmd_itr, &vscsi->active_q, list) {
3841                        if (tag_to_abort == cmd_itr->se_cmd.tag) {
3842                                cmd_itr->abort_cmd = cmd;
3843                                cmd->flags |= DELAY_SEND;
3844                                break;
3845                        }
3846                }
3847                spin_unlock_bh(&vscsi->intr_lock);
3848        }
3849
3850        srp_build_response(vscsi, cmd, &len);
3851        cmd->rsp.format = SRP_FORMAT;
3852        cmd->rsp.len = len;
3853}
3854
3855static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
3856{
3857        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3858                                                 se_cmd);
3859        struct scsi_info *vscsi = cmd->adapter;
3860
3861        dev_dbg(&vscsi->dev, "ibmvscsis_aborted_task %p task_tag: %llu\n",
3862                se_cmd, se_cmd->tag);
3863}
3864
3865static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
3866                                           struct config_group *group,
3867                                           const char *name)
3868{
3869        struct ibmvscsis_tport *tport;
3870        struct scsi_info *vscsi;
3871
3872        tport = ibmvscsis_lookup_port(name);
3873        if (tport) {
3874                vscsi = container_of(tport, struct scsi_info, tport);
3875                tport->tport_proto_id = SCSI_PROTOCOL_SRP;
3876                dev_dbg(&vscsi->dev, "make_tport(%s), pointer:%p, tport_id:%x\n",
3877                        name, tport, tport->tport_proto_id);
3878                return &tport->tport_wwn;
3879        }
3880
3881        return ERR_PTR(-EINVAL);
3882}
3883
3884static void ibmvscsis_drop_tport(struct se_wwn *wwn)
3885{
3886        struct ibmvscsis_tport *tport = container_of(wwn,
3887                                                     struct ibmvscsis_tport,
3888                                                     tport_wwn);
3889        struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
3890
3891        dev_dbg(&vscsi->dev, "drop_tport(%s)\n",
3892                config_item_name(&tport->tport_wwn.wwn_group.cg_item));
3893}
3894
3895static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
3896                                                  const char *name)
3897{
3898        struct ibmvscsis_tport *tport =
3899                container_of(wwn, struct ibmvscsis_tport, tport_wwn);
3900        u16 tpgt;
3901        int rc;
3902
3903        if (strstr(name, "tpgt_") != name)
3904                return ERR_PTR(-EINVAL);
3905        rc = kstrtou16(name + 5, 0, &tpgt);
3906        if (rc)
3907                return ERR_PTR(rc);
3908        tport->tport_tpgt = tpgt;
3909
3910        tport->releasing = false;
3911
3912        rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
3913                               tport->tport_proto_id);
3914        if (rc)
3915                return ERR_PTR(rc);
3916
3917        return &tport->se_tpg;
3918}
3919
3920static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg)
3921{
3922        struct ibmvscsis_tport *tport = container_of(se_tpg,
3923                                                     struct ibmvscsis_tport,
3924                                                     se_tpg);
3925
3926        tport->releasing = true;
3927        tport->enabled = false;
3928
3929        /*
3930         * Release the virtual I_T Nexus for this ibmvscsis TPG
3931         */
3932        ibmvscsis_drop_nexus(tport);
3933        /*
3934         * Deregister the se_tpg from TCM..
3935         */
3936        core_tpg_deregister(se_tpg);
3937}
3938
3939static ssize_t ibmvscsis_wwn_version_show(struct config_item *item,
3940                                          char *page)
3941{
3942        return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION);
3943}
3944CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version);
3945
3946static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
3947        &ibmvscsis_wwn_attr_version,
3948        NULL,
3949};
3950
3951
3952static int ibmvscsis_enable_tpg(struct se_portal_group *se_tpg, bool enable)
3953{
3954        struct ibmvscsis_tport *tport = container_of(se_tpg,
3955                                                     struct ibmvscsis_tport,
3956                                                     se_tpg);
3957        struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
3958        long lrc;
3959
3960        if (enable) {
3961                spin_lock_bh(&vscsi->intr_lock);
3962                tport->enabled = true;
3963                lrc = ibmvscsis_enable_change_state(vscsi);
3964                if (lrc)
3965                        dev_err(&vscsi->dev, "enable_change_state failed, rc %ld state %d\n",
3966                                lrc, vscsi->state);
3967                spin_unlock_bh(&vscsi->intr_lock);
3968        } else {
3969                spin_lock_bh(&vscsi->intr_lock);
3970                tport->enabled = false;
3971                /* This simulates the server going down */
3972                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
3973                spin_unlock_bh(&vscsi->intr_lock);
3974        }
3975
3976        return 0;
3977}
3978
3979static const struct target_core_fabric_ops ibmvscsis_ops = {
3980        .module                         = THIS_MODULE,
3981        .fabric_name                    = "ibmvscsis",
3982        .max_data_sg_nents              = MAX_TXU / PAGE_SIZE,
3983        .tpg_get_wwn                    = ibmvscsis_get_fabric_wwn,
3984        .tpg_get_tag                    = ibmvscsis_get_tag,
3985        .tpg_get_default_depth          = ibmvscsis_get_default_depth,
3986        .tpg_check_demo_mode            = ibmvscsis_check_true,
3987        .tpg_check_demo_mode_cache      = ibmvscsis_check_true,
3988        .tpg_check_demo_mode_write_protect = ibmvscsis_check_false,
3989        .tpg_check_prod_mode_write_protect = ibmvscsis_check_false,
3990        .tpg_get_inst_index             = ibmvscsis_tpg_get_inst_index,
3991        .check_stop_free                = ibmvscsis_check_stop_free,
3992        .release_cmd                    = ibmvscsis_release_cmd,
3993        .sess_get_index                 = ibmvscsis_sess_get_index,
3994        .write_pending                  = ibmvscsis_write_pending,
3995        .set_default_node_attributes    = ibmvscsis_set_default_node_attrs,
3996        .get_cmd_state                  = ibmvscsis_get_cmd_state,
3997        .queue_data_in                  = ibmvscsis_queue_data_in,
3998        .queue_status                   = ibmvscsis_queue_status,
3999        .queue_tm_rsp                   = ibmvscsis_queue_tm_rsp,
4000        .aborted_task                   = ibmvscsis_aborted_task,
4001        /*
4002         * Setup function pointers for logic in target_core_fabric_configfs.c
4003         */
4004        .fabric_make_wwn                = ibmvscsis_make_tport,
4005        .fabric_drop_wwn                = ibmvscsis_drop_tport,
4006        .fabric_make_tpg                = ibmvscsis_make_tpg,
4007        .fabric_enable_tpg              = ibmvscsis_enable_tpg,
4008        .fabric_drop_tpg                = ibmvscsis_drop_tpg,
4009
4010        .tfc_wwn_attrs                  = ibmvscsis_wwn_attrs,
4011};
4012
4013static void ibmvscsis_dev_release(struct device *dev) {};
4014
4015static struct device_attribute dev_attr_system_id =
4016        __ATTR(system_id, S_IRUGO, system_id_show, NULL);
4017
4018static struct device_attribute dev_attr_partition_number =
4019        __ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
4020
4021static struct device_attribute dev_attr_unit_address =
4022        __ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
4023
4024static struct attribute *ibmvscsis_dev_attrs[] = {
4025        &dev_attr_system_id.attr,
4026        &dev_attr_partition_number.attr,
4027        &dev_attr_unit_address.attr,
4028};
4029ATTRIBUTE_GROUPS(ibmvscsis_dev);
4030
4031static struct class ibmvscsis_class = {
4032        .name           = "ibmvscsis",
4033        .dev_release    = ibmvscsis_dev_release,
4034        .dev_groups     = ibmvscsis_dev_groups,
4035};
4036
4037static const struct vio_device_id ibmvscsis_device_table[] = {
4038        { "v-scsi-host", "IBM,v-scsi-host" },
4039        { "", "" }
4040};
4041MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table);
4042
4043static struct vio_driver ibmvscsis_driver = {
4044        .name = "ibmvscsis",
4045        .id_table = ibmvscsis_device_table,
4046        .probe = ibmvscsis_probe,
4047        .remove = ibmvscsis_remove,
4048};
4049
4050/*
4051 * ibmvscsis_init() - Kernel Module initialization
4052 *
4053 * Note: vio_register_driver() registers callback functions, and at least one
4054 * of those callback functions calls TCM - Linux IO Target Subsystem, thus
4055 * the SCSI Target template must be registered before vio_register_driver()
4056 * is called.
4057 */
4058static int __init ibmvscsis_init(void)
4059{
4060        int rc = 0;
4061
4062        rc = ibmvscsis_get_system_info();
4063        if (rc) {
4064                pr_err("rc %d from get_system_info\n", rc);
4065                goto out;
4066        }
4067
4068        rc = class_register(&ibmvscsis_class);
4069        if (rc) {
4070                pr_err("failed class register\n");
4071                goto out;
4072        }
4073
4074        rc = target_register_template(&ibmvscsis_ops);
4075        if (rc) {
4076                pr_err("rc %d from target_register_template\n", rc);
4077                goto unregister_class;
4078        }
4079
4080        rc = vio_register_driver(&ibmvscsis_driver);
4081        if (rc) {
4082                pr_err("rc %d from vio_register_driver\n", rc);
4083                goto unregister_target;
4084        }
4085
4086        return 0;
4087
4088unregister_target:
4089        target_unregister_template(&ibmvscsis_ops);
4090unregister_class:
4091        class_unregister(&ibmvscsis_class);
4092out:
4093        return rc;
4094}
4095
4096static void __exit ibmvscsis_exit(void)
4097{
4098        pr_info("Unregister IBM virtual SCSI host driver\n");
4099        vio_unregister_driver(&ibmvscsis_driver);
4100        target_unregister_template(&ibmvscsis_ops);
4101        class_unregister(&ibmvscsis_class);
4102}
4103
4104MODULE_DESCRIPTION("IBMVSCSIS fabric driver");
4105MODULE_AUTHOR("Bryant G. Ly and Michael Cyr");
4106MODULE_LICENSE("GPL");
4107MODULE_VERSION(IBMVSCSIS_VERSION);
4108module_init(ibmvscsis_init);
4109module_exit(ibmvscsis_exit);
4110