linux/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * IBM Virtual SCSI Target Driver
   4 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
   5 *                         Santiago Leon (santil@us.ibm.com) IBM Corp.
   6 *                         Linda Xie (lxie@us.ibm.com) IBM Corp.
   7 *
   8 * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
   9 * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
  10 *
  11 * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
  12 * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
  13 *
  14 ****************************************************************************/
  15
  16#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
  17
  18#include <linux/module.h>
  19#include <linux/kernel.h>
  20#include <linux/slab.h>
  21#include <linux/types.h>
  22#include <linux/list.h>
  23#include <linux/string.h>
  24#include <linux/delay.h>
  25
  26#include <target/target_core_base.h>
  27#include <target/target_core_fabric.h>
  28
  29#include <asm/hvcall.h>
  30#include <asm/vio.h>
  31
  32#include <scsi/viosrp.h>
  33
  34#include "ibmvscsi_tgt.h"
  35
  36#define IBMVSCSIS_VERSION       "v0.2"
  37
  38#define INITIAL_SRP_LIMIT       800
  39#define DEFAULT_MAX_SECTORS     256
  40#define MAX_TXU                 1024 * 1024
  41
  42static uint max_vdma_size = MAX_H_COPY_RDMA;
  43
  44static char system_id[SYS_ID_NAME_LEN] = "";
  45static char partition_name[PARTITION_NAMELEN] = "UNKNOWN";
  46static uint partition_number = -1;
  47
  48/* Adapter list and lock to control it */
  49static DEFINE_SPINLOCK(ibmvscsis_dev_lock);
  50static LIST_HEAD(ibmvscsis_dev_list);
  51
  52static long ibmvscsis_parse_command(struct scsi_info *vscsi,
  53                                    struct viosrp_crq *crq);
  54
  55static void ibmvscsis_adapter_idle(struct scsi_info *vscsi);
  56
  57static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
  58                                      struct srp_rsp *rsp)
  59{
  60        u32 residual_count = se_cmd->residual_count;
  61
  62        if (!residual_count)
  63                return;
  64
  65        if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
  66                if (se_cmd->data_direction == DMA_TO_DEVICE) {
  67                        /* residual data from an underflow write */
  68                        rsp->flags = SRP_RSP_FLAG_DOUNDER;
  69                        rsp->data_out_res_cnt = cpu_to_be32(residual_count);
  70                } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
  71                        /* residual data from an underflow read */
  72                        rsp->flags = SRP_RSP_FLAG_DIUNDER;
  73                        rsp->data_in_res_cnt = cpu_to_be32(residual_count);
  74                }
  75        } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
  76                if (se_cmd->data_direction == DMA_TO_DEVICE) {
  77                        /* residual data from an overflow write */
  78                        rsp->flags = SRP_RSP_FLAG_DOOVER;
  79                        rsp->data_out_res_cnt = cpu_to_be32(residual_count);
  80                } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
  81                        /* residual data from an overflow read */
  82                        rsp->flags = SRP_RSP_FLAG_DIOVER;
  83                        rsp->data_in_res_cnt = cpu_to_be32(residual_count);
  84                }
  85        }
  86}
  87
  88/**
  89 * connection_broken() - Determine if the connection to the client is good
  90 * @vscsi:      Pointer to our adapter structure
  91 *
  92 * This function attempts to send a ping MAD to the client. If the call to
  93 * queue the request returns H_CLOSED then the connection has been broken
  94 * and the function returns TRUE.
  95 *
  96 * EXECUTION ENVIRONMENT:
  97 *      Interrupt or Process environment
  98 */
  99static bool connection_broken(struct scsi_info *vscsi)
 100{
 101        struct viosrp_crq *crq;
 102        u64 buffer[2] = { 0, 0 };
 103        long h_return_code;
 104        bool rc = false;
 105
 106        /* create a PING crq */
 107        crq = (struct viosrp_crq *)&buffer;
 108        crq->valid = VALID_CMD_RESP_EL;
 109        crq->format = MESSAGE_IN_CRQ;
 110        crq->status = PING;
 111
 112        h_return_code = h_send_crq(vscsi->dds.unit_id,
 113                                   cpu_to_be64(buffer[MSG_HI]),
 114                                   cpu_to_be64(buffer[MSG_LOW]));
 115
 116        dev_dbg(&vscsi->dev, "Connection_broken: rc %ld\n", h_return_code);
 117
 118        if (h_return_code == H_CLOSED)
 119                rc = true;
 120
 121        return rc;
 122}
 123
 124/**
 125 * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue
 126 * @vscsi:      Pointer to our adapter structure
 127 *
 128 * This function calls h_free_q then frees the interrupt bit etc.
 129 * It must release the lock before doing so because of the time it can take
 130 * for h_free_crq in PHYP
 131 * NOTE: the caller must make sure that state and or flags will prevent
 132 *       interrupt handler from scheduling work.
 133 * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
 134 *       we can't do it here, because we don't have the lock
 135 *
 136 * EXECUTION ENVIRONMENT:
 137 *      Process level
 138 */
 139static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
 140{
 141        long qrc;
 142        long rc = ADAPT_SUCCESS;
 143        int ticks = 0;
 144
 145        do {
 146                qrc = h_free_crq(vscsi->dds.unit_id);
 147                switch (qrc) {
 148                case H_SUCCESS:
 149                        spin_lock_bh(&vscsi->intr_lock);
 150                        vscsi->flags &= ~PREP_FOR_SUSPEND_FLAGS;
 151                        spin_unlock_bh(&vscsi->intr_lock);
 152                        break;
 153
 154                case H_HARDWARE:
 155                case H_PARAMETER:
 156                        dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n",
 157                                qrc);
 158                        rc = ERROR;
 159                        break;
 160
 161                case H_BUSY:
 162                case H_LONG_BUSY_ORDER_1_MSEC:
 163                        /* msleep not good for small values */
 164                        usleep_range(1000, 2000);
 165                        ticks += 1;
 166                        break;
 167                case H_LONG_BUSY_ORDER_10_MSEC:
 168                        usleep_range(10000, 20000);
 169                        ticks += 10;
 170                        break;
 171                case H_LONG_BUSY_ORDER_100_MSEC:
 172                        msleep(100);
 173                        ticks += 100;
 174                        break;
 175                case H_LONG_BUSY_ORDER_1_SEC:
 176                        ssleep(1);
 177                        ticks += 1000;
 178                        break;
 179                case H_LONG_BUSY_ORDER_10_SEC:
 180                        ssleep(10);
 181                        ticks += 10000;
 182                        break;
 183                case H_LONG_BUSY_ORDER_100_SEC:
 184                        ssleep(100);
 185                        ticks += 100000;
 186                        break;
 187                default:
 188                        dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n",
 189                                qrc);
 190                        rc = ERROR;
 191                        break;
 192                }
 193
 194                /*
 195                 * dont wait more then 300 seconds
 196                 * ticks are in milliseconds more or less
 197                 */
 198                if (ticks > 300000 && qrc != H_SUCCESS) {
 199                        rc = ERROR;
 200                        dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n");
 201                }
 202        } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
 203
 204        dev_dbg(&vscsi->dev, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
 205
 206        return rc;
 207}
 208
 209/**
 210 * ibmvscsis_delete_client_info() - Helper function to Delete Client Info
 211 * @vscsi:      Pointer to our adapter structure
 212 * @client_closed:      True if client closed its queue
 213 *
 214 * Deletes information specific to the client when the client goes away
 215 *
 216 * EXECUTION ENVIRONMENT:
 217 *      Interrupt or Process
 218 */
 219static void ibmvscsis_delete_client_info(struct scsi_info *vscsi,
 220                                         bool client_closed)
 221{
 222        vscsi->client_cap = 0;
 223
 224        /*
 225         * Some things we don't want to clear if we're closing the queue,
 226         * because some clients don't resend the host handshake when they
 227         * get a transport event.
 228         */
 229        if (client_closed)
 230                vscsi->client_data.os_type = 0;
 231}
 232
 233/**
 234 * ibmvscsis_free_command_q() - Free Command Queue
 235 * @vscsi:      Pointer to our adapter structure
 236 *
 237 * This function calls unregister_command_q, then clears interrupts and
 238 * any pending interrupt acknowledgments associated with the command q.
 239 * It also clears memory if there is no error.
 240 *
 241 * PHYP did not meet the PAPR architecture so that we must give up the
 242 * lock. This causes a timing hole regarding state change.  To close the
 243 * hole this routine does accounting on any change that occurred during
 244 * the time the lock is not held.
 245 * NOTE: must give up and then acquire the interrupt lock, the caller must
 246 *       make sure that state and or flags will prevent interrupt handler from
 247 *       scheduling work.
 248 *
 249 * EXECUTION ENVIRONMENT:
 250 *      Process level, interrupt lock is held
 251 */
 252static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
 253{
 254        int bytes;
 255        u32 flags_under_lock;
 256        u16 state_under_lock;
 257        long rc = ADAPT_SUCCESS;
 258
 259        if (!(vscsi->flags & CRQ_CLOSED)) {
 260                vio_disable_interrupts(vscsi->dma_dev);
 261
 262                state_under_lock = vscsi->new_state;
 263                flags_under_lock = vscsi->flags;
 264                vscsi->phyp_acr_state = 0;
 265                vscsi->phyp_acr_flags = 0;
 266
 267                spin_unlock_bh(&vscsi->intr_lock);
 268                rc = ibmvscsis_unregister_command_q(vscsi);
 269                spin_lock_bh(&vscsi->intr_lock);
 270
 271                if (state_under_lock != vscsi->new_state)
 272                        vscsi->phyp_acr_state = vscsi->new_state;
 273
 274                vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags);
 275
 276                if (rc == ADAPT_SUCCESS) {
 277                        bytes = vscsi->cmd_q.size * PAGE_SIZE;
 278                        memset(vscsi->cmd_q.base_addr, 0, bytes);
 279                        vscsi->cmd_q.index = 0;
 280                        vscsi->flags |= CRQ_CLOSED;
 281
 282                        ibmvscsis_delete_client_info(vscsi, false);
 283                }
 284
 285                dev_dbg(&vscsi->dev, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
 286                        vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
 287                        vscsi->phyp_acr_state);
 288        }
 289        return rc;
 290}
 291
 292/**
 293 * ibmvscsis_cmd_q_dequeue() - Get valid Command element
 294 * @mask:       Mask to use in case index wraps
 295 * @current_index:      Current index into command queue
 296 * @base_addr:  Pointer to start of command queue
 297 *
 298 * Returns a pointer to a valid command element or NULL, if the command
 299 * queue is empty
 300 *
 301 * EXECUTION ENVIRONMENT:
 302 *      Interrupt environment, interrupt lock held
 303 */
 304static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
 305                                                  uint *current_index,
 306                                                  struct viosrp_crq *base_addr)
 307{
 308        struct viosrp_crq *ptr;
 309
 310        ptr = base_addr + *current_index;
 311
 312        if (ptr->valid) {
 313                *current_index = (*current_index + 1) & mask;
 314                dma_rmb();
 315        } else {
 316                ptr = NULL;
 317        }
 318
 319        return ptr;
 320}
 321
 322/**
 323 * ibmvscsis_send_init_message() - send initialize message to the client
 324 * @vscsi:      Pointer to our adapter structure
 325 * @format:     Which Init Message format to send
 326 *
 327 * EXECUTION ENVIRONMENT:
 328 *      Interrupt environment interrupt lock held
 329 */
 330static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format)
 331{
 332        struct viosrp_crq *crq;
 333        u64 buffer[2] = { 0, 0 };
 334        long rc;
 335
 336        crq = (struct viosrp_crq *)&buffer;
 337        crq->valid = VALID_INIT_MSG;
 338        crq->format = format;
 339        rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
 340                        cpu_to_be64(buffer[MSG_LOW]));
 341
 342        return rc;
 343}
 344
 345/**
 346 * ibmvscsis_check_init_msg() - Check init message valid
 347 * @vscsi:      Pointer to our adapter structure
 348 * @format:     Pointer to return format of Init Message, if any.
 349 *              Set to UNUSED_FORMAT if no Init Message in queue.
 350 *
 351 * Checks if an initialize message was queued by the initiatior
 352 * after the queue was created and before the interrupt was enabled.
 353 *
 354 * EXECUTION ENVIRONMENT:
 355 *      Process level only, interrupt lock held
 356 */
 357static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
 358{
 359        struct viosrp_crq *crq;
 360        long rc = ADAPT_SUCCESS;
 361
 362        crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index,
 363                                      vscsi->cmd_q.base_addr);
 364        if (!crq) {
 365                *format = (uint)UNUSED_FORMAT;
 366        } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) {
 367                *format = (uint)INIT_MSG;
 368                crq->valid = INVALIDATE_CMD_RESP_EL;
 369                dma_rmb();
 370
 371                /*
 372                 * the caller has ensured no initialize message was
 373                 * sent after the queue was
 374                 * created so there should be no other message on the queue.
 375                 */
 376                crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask,
 377                                              &vscsi->cmd_q.index,
 378                                              vscsi->cmd_q.base_addr);
 379                if (crq) {
 380                        *format = (uint)(crq->format);
 381                        rc = ERROR;
 382                        crq->valid = INVALIDATE_CMD_RESP_EL;
 383                        dma_rmb();
 384                }
 385        } else {
 386                *format = (uint)(crq->format);
 387                rc = ERROR;
 388                crq->valid = INVALIDATE_CMD_RESP_EL;
 389                dma_rmb();
 390        }
 391
 392        return rc;
 393}
 394
 395/**
 396 * ibmvscsis_disconnect() - Helper function to disconnect
 397 * @work:       Pointer to work_struct, gives access to our adapter structure
 398 *
 399 * An error has occurred or the driver received a Transport event,
 400 * and the driver is requesting that the command queue be de-registered
 401 * in a safe manner. If there is no outstanding I/O then we can stop the
 402 * queue. If we are restarting the queue it will be reflected in the
 403 * the state of the adapter.
 404 *
 405 * EXECUTION ENVIRONMENT:
 406 *      Process environment
 407 */
 408static void ibmvscsis_disconnect(struct work_struct *work)
 409{
 410        struct scsi_info *vscsi = container_of(work, struct scsi_info,
 411                                               proc_work);
 412        u16 new_state;
 413        bool wait_idle = false;
 414
 415        spin_lock_bh(&vscsi->intr_lock);
 416        new_state = vscsi->new_state;
 417        vscsi->new_state = 0;
 418
 419        vscsi->flags |= DISCONNECT_SCHEDULED;
 420        vscsi->flags &= ~SCHEDULE_DISCONNECT;
 421
 422        dev_dbg(&vscsi->dev, "disconnect: flags 0x%x, state 0x%hx\n",
 423                vscsi->flags, vscsi->state);
 424
 425        /*
 426         * check which state we are in and see if we
 427         * should transitition to the new state
 428         */
 429        switch (vscsi->state) {
 430        /* Should never be called while in this state. */
 431        case NO_QUEUE:
 432        /*
 433         * Can never transition from this state;
 434         * igonore errors and logout.
 435         */
 436        case UNCONFIGURING:
 437                break;
 438
 439        /* can transition from this state to UNCONFIGURING */
 440        case ERR_DISCONNECT:
 441                if (new_state == UNCONFIGURING)
 442                        vscsi->state = new_state;
 443                break;
 444
 445        /*
 446         * Can transition from this state to to unconfiguring
 447         * or err disconnect.
 448         */
 449        case ERR_DISCONNECT_RECONNECT:
 450                switch (new_state) {
 451                case UNCONFIGURING:
 452                case ERR_DISCONNECT:
 453                        vscsi->state = new_state;
 454                        break;
 455
 456                case WAIT_IDLE:
 457                        break;
 458                default:
 459                        break;
 460                }
 461                break;
 462
 463        /* can transition from this state to UNCONFIGURING */
 464        case ERR_DISCONNECTED:
 465                if (new_state == UNCONFIGURING)
 466                        vscsi->state = new_state;
 467                break;
 468
 469        case WAIT_ENABLED:
 470                switch (new_state) {
 471                case UNCONFIGURING:
 472                        vscsi->state = new_state;
 473                        vscsi->flags |= RESPONSE_Q_DOWN;
 474                        vscsi->flags &= ~(SCHEDULE_DISCONNECT |
 475                                          DISCONNECT_SCHEDULED);
 476                        dma_rmb();
 477                        if (vscsi->flags & CFG_SLEEPING) {
 478                                vscsi->flags &= ~CFG_SLEEPING;
 479                                complete(&vscsi->unconfig);
 480                        }
 481                        break;
 482
 483                /* should never happen */
 484                case ERR_DISCONNECT:
 485                case ERR_DISCONNECT_RECONNECT:
 486                case WAIT_IDLE:
 487                        dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
 488                                vscsi->state);
 489                        break;
 490                }
 491                break;
 492
 493        case WAIT_IDLE:
 494                switch (new_state) {
 495                case UNCONFIGURING:
 496                        vscsi->flags |= RESPONSE_Q_DOWN;
 497                        vscsi->state = new_state;
 498                        vscsi->flags &= ~(SCHEDULE_DISCONNECT |
 499                                          DISCONNECT_SCHEDULED);
 500                        ibmvscsis_free_command_q(vscsi);
 501                        break;
 502                case ERR_DISCONNECT:
 503                case ERR_DISCONNECT_RECONNECT:
 504                        vscsi->state = new_state;
 505                        break;
 506                }
 507                break;
 508
 509        /*
 510         * Initiator has not done a successful srp login
 511         * or has done a successful srp logout ( adapter was not
 512         * busy). In the first case there can be responses queued
 513         * waiting for space on the initiators response queue (MAD)
 514         * The second case the adapter is idle. Assume the worse case,
 515         * i.e. the second case.
 516         */
 517        case WAIT_CONNECTION:
 518        case CONNECTED:
 519        case SRP_PROCESSING:
 520                wait_idle = true;
 521                vscsi->state = new_state;
 522                break;
 523
 524        /* can transition from this state to UNCONFIGURING */
 525        case UNDEFINED:
 526                if (new_state == UNCONFIGURING)
 527                        vscsi->state = new_state;
 528                break;
 529        default:
 530                break;
 531        }
 532
 533        if (wait_idle) {
 534                dev_dbg(&vscsi->dev, "disconnect start wait, active %d, sched %d\n",
 535                        (int)list_empty(&vscsi->active_q),
 536                        (int)list_empty(&vscsi->schedule_q));
 537                if (!list_empty(&vscsi->active_q) ||
 538                    !list_empty(&vscsi->schedule_q)) {
 539                        vscsi->flags |= WAIT_FOR_IDLE;
 540                        dev_dbg(&vscsi->dev, "disconnect flags 0x%x\n",
 541                                vscsi->flags);
 542                        /*
 543                         * This routine is can not be called with the interrupt
 544                         * lock held.
 545                         */
 546                        spin_unlock_bh(&vscsi->intr_lock);
 547                        wait_for_completion(&vscsi->wait_idle);
 548                        spin_lock_bh(&vscsi->intr_lock);
 549                }
 550                dev_dbg(&vscsi->dev, "disconnect stop wait\n");
 551
 552                ibmvscsis_adapter_idle(vscsi);
 553        }
 554
 555        spin_unlock_bh(&vscsi->intr_lock);
 556}
 557
 558/**
 559 * ibmvscsis_post_disconnect() - Schedule the disconnect
 560 * @vscsi:      Pointer to our adapter structure
 561 * @new_state:  State to move to after disconnecting
 562 * @flag_bits:  Flags to turn on in adapter structure
 563 *
 564 * If it's already been scheduled, then see if we need to "upgrade"
 565 * the new state (if the one passed in is more "severe" than the
 566 * previous one).
 567 *
 568 * PRECONDITION:
 569 *      interrupt lock is held
 570 */
 571static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
 572                                      uint flag_bits)
 573{
 574        uint state;
 575
 576        /* check the validity of the new state */
 577        switch (new_state) {
 578        case UNCONFIGURING:
 579        case ERR_DISCONNECT:
 580        case ERR_DISCONNECT_RECONNECT:
 581        case WAIT_IDLE:
 582                break;
 583
 584        default:
 585                dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n",
 586                        new_state);
 587                return;
 588        }
 589
 590        vscsi->flags |= flag_bits;
 591
 592        dev_dbg(&vscsi->dev, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
 593                new_state, flag_bits, vscsi->flags, vscsi->state);
 594
 595        if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
 596                vscsi->flags |= SCHEDULE_DISCONNECT;
 597                vscsi->new_state = new_state;
 598
 599                INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect);
 600                (void)queue_work(vscsi->work_q, &vscsi->proc_work);
 601        } else {
 602                if (vscsi->new_state)
 603                        state = vscsi->new_state;
 604                else
 605                        state = vscsi->state;
 606
 607                switch (state) {
 608                case NO_QUEUE:
 609                case UNCONFIGURING:
 610                        break;
 611
 612                case ERR_DISCONNECTED:
 613                case ERR_DISCONNECT:
 614                case UNDEFINED:
 615                        if (new_state == UNCONFIGURING)
 616                                vscsi->new_state = new_state;
 617                        break;
 618
 619                case ERR_DISCONNECT_RECONNECT:
 620                        switch (new_state) {
 621                        case UNCONFIGURING:
 622                        case ERR_DISCONNECT:
 623                                vscsi->new_state = new_state;
 624                                break;
 625                        default:
 626                                break;
 627                        }
 628                        break;
 629
 630                case WAIT_ENABLED:
 631                case WAIT_IDLE:
 632                case WAIT_CONNECTION:
 633                case CONNECTED:
 634                case SRP_PROCESSING:
 635                        vscsi->new_state = new_state;
 636                        break;
 637
 638                default:
 639                        break;
 640                }
 641        }
 642
 643        dev_dbg(&vscsi->dev, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
 644                vscsi->flags, vscsi->new_state);
 645}
 646
 647/**
 648 * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
 649 * @vscsi:      Pointer to our adapter structure
 650 *
 651 * Must be called with interrupt lock held.
 652 */
 653static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
 654{
 655        long rc = ADAPT_SUCCESS;
 656
 657        switch (vscsi->state) {
 658        case NO_QUEUE:
 659        case ERR_DISCONNECT:
 660        case ERR_DISCONNECT_RECONNECT:
 661        case ERR_DISCONNECTED:
 662        case UNCONFIGURING:
 663        case UNDEFINED:
 664                rc = ERROR;
 665                break;
 666
 667        case WAIT_CONNECTION:
 668                vscsi->state = CONNECTED;
 669                break;
 670
 671        case WAIT_IDLE:
 672        case SRP_PROCESSING:
 673        case CONNECTED:
 674        case WAIT_ENABLED:
 675        default:
 676                rc = ERROR;
 677                dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
 678                        vscsi->state);
 679                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
 680                break;
 681        }
 682
 683        return rc;
 684}
 685
 686/**
 687 * ibmvscsis_handle_init_msg() - Respond to an Init Message
 688 * @vscsi:      Pointer to our adapter structure
 689 *
 690 * Must be called with interrupt lock held.
 691 */
 692static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
 693{
 694        long rc = ADAPT_SUCCESS;
 695
 696        switch (vscsi->state) {
 697        case WAIT_CONNECTION:
 698                rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
 699                switch (rc) {
 700                case H_SUCCESS:
 701                        vscsi->state = CONNECTED;
 702                        break;
 703
 704                case H_PARAMETER:
 705                        dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
 706                                rc);
 707                        ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
 708                        break;
 709
 710                case H_DROPPED:
 711                        dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
 712                                rc);
 713                        rc = ERROR;
 714                        ibmvscsis_post_disconnect(vscsi,
 715                                                  ERR_DISCONNECT_RECONNECT, 0);
 716                        break;
 717
 718                case H_CLOSED:
 719                        dev_warn(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
 720                                 rc);
 721                        rc = 0;
 722                        break;
 723                }
 724                break;
 725
 726        case UNDEFINED:
 727                rc = ERROR;
 728                break;
 729
 730        case UNCONFIGURING:
 731                break;
 732
 733        case WAIT_ENABLED:
 734        case CONNECTED:
 735        case SRP_PROCESSING:
 736        case WAIT_IDLE:
 737        case NO_QUEUE:
 738        case ERR_DISCONNECT:
 739        case ERR_DISCONNECT_RECONNECT:
 740        case ERR_DISCONNECTED:
 741        default:
 742                rc = ERROR;
 743                dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
 744                        vscsi->state);
 745                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
 746                break;
 747        }
 748
 749        return rc;
 750}
 751
 752/**
 753 * ibmvscsis_init_msg() - Respond to an init message
 754 * @vscsi:      Pointer to our adapter structure
 755 * @crq:        Pointer to CRQ element containing the Init Message
 756 *
 757 * EXECUTION ENVIRONMENT:
 758 *      Interrupt, interrupt lock held
 759 */
 760static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
 761{
 762        long rc = ADAPT_SUCCESS;
 763
 764        dev_dbg(&vscsi->dev, "init_msg: state 0x%hx\n", vscsi->state);
 765
 766        rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
 767                      (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
 768                      0);
 769        if (rc == H_SUCCESS) {
 770                vscsi->client_data.partition_number =
 771                        be64_to_cpu(*(u64 *)vscsi->map_buf);
 772                dev_dbg(&vscsi->dev, "init_msg, part num %d\n",
 773                        vscsi->client_data.partition_number);
 774        } else {
 775                dev_dbg(&vscsi->dev, "init_msg h_vioctl rc %ld\n", rc);
 776                rc = ADAPT_SUCCESS;
 777        }
 778
 779        if (crq->format == INIT_MSG) {
 780                rc = ibmvscsis_handle_init_msg(vscsi);
 781        } else if (crq->format == INIT_COMPLETE_MSG) {
 782                rc = ibmvscsis_handle_init_compl_msg(vscsi);
 783        } else {
 784                rc = ERROR;
 785                dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
 786                        (uint)crq->format);
 787                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
 788        }
 789
 790        return rc;
 791}
 792
 793/**
 794 * ibmvscsis_establish_new_q() - Establish new CRQ queue
 795 * @vscsi:      Pointer to our adapter structure
 796 *
 797 * Must be called with interrupt lock held.
 798 */
 799static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
 800{
 801        long rc = ADAPT_SUCCESS;
 802        uint format;
 803
 804        rc = h_vioctl(vscsi->dds.unit_id, H_ENABLE_PREPARE_FOR_SUSPEND, 30000,
 805                      0, 0, 0, 0);
 806        if (rc == H_SUCCESS)
 807                vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
 808        else if (rc != H_NOT_FOUND)
 809                dev_err(&vscsi->dev, "Error from Enable Prepare for Suspend: %ld\n",
 810                        rc);
 811
 812        vscsi->flags &= PRESERVE_FLAG_FIELDS;
 813        vscsi->rsp_q_timer.timer_pops = 0;
 814        vscsi->debit = 0;
 815        vscsi->credit = 0;
 816
 817        rc = vio_enable_interrupts(vscsi->dma_dev);
 818        if (rc) {
 819                dev_warn(&vscsi->dev, "establish_new_q: failed to enable interrupts, rc %ld\n",
 820                         rc);
 821                return rc;
 822        }
 823
 824        rc = ibmvscsis_check_init_msg(vscsi, &format);
 825        if (rc) {
 826                dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
 827                        rc);
 828                return rc;
 829        }
 830
 831        if (format == UNUSED_FORMAT) {
 832                rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
 833                switch (rc) {
 834                case H_SUCCESS:
 835                case H_DROPPED:
 836                case H_CLOSED:
 837                        rc = ADAPT_SUCCESS;
 838                        break;
 839
 840                case H_PARAMETER:
 841                case H_HARDWARE:
 842                        break;
 843
 844                default:
 845                        vscsi->state = UNDEFINED;
 846                        rc = H_HARDWARE;
 847                        break;
 848                }
 849        } else if (format == INIT_MSG) {
 850                rc = ibmvscsis_handle_init_msg(vscsi);
 851        }
 852
 853        return rc;
 854}
 855
 856/**
 857 * ibmvscsis_reset_queue() - Reset CRQ Queue
 858 * @vscsi:      Pointer to our adapter structure
 859 *
 860 * This function calls h_free_q and then calls h_reg_q and does all
 861 * of the bookkeeping to get us back to where we can communicate.
 862 *
 863 * Actually, we don't always call h_free_crq.  A problem was discovered
 864 * where one partition would close and reopen his queue, which would
 865 * cause his partner to get a transport event, which would cause him to
 866 * close and reopen his queue, which would cause the original partition
 867 * to get a transport event, etc., etc.  To prevent this, we don't
 868 * actually close our queue if the client initiated the reset, (i.e.
 869 * either we got a transport event or we have detected that the client's
 870 * queue is gone)
 871 *
 872 * EXECUTION ENVIRONMENT:
 873 *      Process environment, called with interrupt lock held
 874 */
 875static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
 876{
 877        int bytes;
 878        long rc = ADAPT_SUCCESS;
 879
 880        dev_dbg(&vscsi->dev, "reset_queue: flags 0x%x\n", vscsi->flags);
 881
 882        /* don't reset, the client did it for us */
 883        if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
 884                vscsi->flags &= PRESERVE_FLAG_FIELDS;
 885                vscsi->rsp_q_timer.timer_pops = 0;
 886                vscsi->debit = 0;
 887                vscsi->credit = 0;
 888                vscsi->state = WAIT_CONNECTION;
 889                vio_enable_interrupts(vscsi->dma_dev);
 890        } else {
 891                rc = ibmvscsis_free_command_q(vscsi);
 892                if (rc == ADAPT_SUCCESS) {
 893                        vscsi->state = WAIT_CONNECTION;
 894
 895                        bytes = vscsi->cmd_q.size * PAGE_SIZE;
 896                        rc = h_reg_crq(vscsi->dds.unit_id,
 897                                       vscsi->cmd_q.crq_token, bytes);
 898                        if (rc == H_CLOSED || rc == H_SUCCESS) {
 899                                rc = ibmvscsis_establish_new_q(vscsi);
 900                        }
 901
 902                        if (rc != ADAPT_SUCCESS) {
 903                                dev_dbg(&vscsi->dev, "reset_queue: reg_crq rc %ld\n",
 904                                        rc);
 905
 906                                vscsi->state = ERR_DISCONNECTED;
 907                                vscsi->flags |= RESPONSE_Q_DOWN;
 908                                ibmvscsis_free_command_q(vscsi);
 909                        }
 910                } else {
 911                        vscsi->state = ERR_DISCONNECTED;
 912                        vscsi->flags |= RESPONSE_Q_DOWN;
 913                }
 914        }
 915}
 916
 917/**
 918 * ibmvscsis_free_cmd_resources() - Free command resources
 919 * @vscsi:      Pointer to our adapter structure
 920 * @cmd:        Command which is not longer in use
 921 *
 922 * Must be called with interrupt lock held.
 923 */
 924static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
 925                                         struct ibmvscsis_cmd *cmd)
 926{
 927        struct iu_entry *iue = cmd->iue;
 928
 929        switch (cmd->type) {
 930        case TASK_MANAGEMENT:
 931        case SCSI_CDB:
 932                /*
 933                 * When the queue goes down this value is cleared, so it
 934                 * cannot be cleared in this general purpose function.
 935                 */
 936                if (vscsi->debit)
 937                        vscsi->debit -= 1;
 938                break;
 939        case ADAPTER_MAD:
 940                vscsi->flags &= ~PROCESSING_MAD;
 941                break;
 942        case UNSET_TYPE:
 943                break;
 944        default:
 945                dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
 946                        cmd->type);
 947                break;
 948        }
 949
 950        cmd->iue = NULL;
 951        list_add_tail(&cmd->list, &vscsi->free_cmd);
 952        srp_iu_put(iue);
 953
 954        if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
 955            list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
 956                vscsi->flags &= ~WAIT_FOR_IDLE;
 957                complete(&vscsi->wait_idle);
 958        }
 959}
 960
 961/**
 962 * ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL
 963 * @vscsi:      Pointer to our adapter structure
 964 * @idle:       Indicates whether we were called from adapter_idle.  This
 965 *              is important to know if we need to do a disconnect, since if
 966 *              we're called from adapter_idle, we're still processing the
 967 *              current disconnect, so we can't just call post_disconnect.
 968 *
 969 * This function is called when the adapter is idle when phyp has sent
 970 * us a Prepare for Suspend Transport Event.
 971 *
 972 * EXECUTION ENVIRONMENT:
 973 *      Process or interrupt environment called with interrupt lock held
 974 */
 975static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
 976{
 977        long rc = 0;
 978        struct viosrp_crq *crq;
 979
 980        /* See if there is a Resume event in the queue */
 981        crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
 982
 983        dev_dbg(&vscsi->dev, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
 984                vscsi->flags, vscsi->state, (int)crq->valid);
 985
 986        if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
 987                rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
 988                              0, 0);
 989                if (rc) {
 990                        dev_err(&vscsi->dev, "Ready for Suspend Vioctl failed: %ld\n",
 991                                rc);
 992                        rc = 0;
 993                }
 994        } else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
 995                    (vscsi->flags & PREP_FOR_SUSPEND_ABORTED)) ||
 996                   ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
 997                                     (crq->format != RESUME_FROM_SUSP)))) {
 998                if (idle) {
 999                        vscsi->state = ERR_DISCONNECT_RECONNECT;
1000                        ibmvscsis_reset_queue(vscsi);
1001                        rc = -1;
1002                } else if (vscsi->state == CONNECTED) {
1003                        ibmvscsis_post_disconnect(vscsi,
1004                                                  ERR_DISCONNECT_RECONNECT, 0);
1005                }
1006
1007                vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1008
1009                if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
1010                                     (crq->format != RESUME_FROM_SUSP)))
1011                        dev_err(&vscsi->dev, "Invalid element in CRQ after Prepare for Suspend");
1012        }
1013
1014        vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
1015
1016        return rc;
1017}
1018
1019/**
1020 * ibmvscsis_trans_event() - Handle a Transport Event
1021 * @vscsi:      Pointer to our adapter structure
1022 * @crq:        Pointer to CRQ entry containing the Transport Event
1023 *
1024 * Do the logic to close the I_T nexus.  This function may not
1025 * behave to specification.
1026 *
1027 * EXECUTION ENVIRONMENT:
1028 *      Interrupt, interrupt lock held
1029 */
1030static long ibmvscsis_trans_event(struct scsi_info *vscsi,
1031                                  struct viosrp_crq *crq)
1032{
1033        long rc = ADAPT_SUCCESS;
1034
1035        dev_dbg(&vscsi->dev, "trans_event: format %d, flags 0x%x, state 0x%hx\n",
1036                (int)crq->format, vscsi->flags, vscsi->state);
1037
1038        switch (crq->format) {
1039        case MIGRATED:
1040        case PARTNER_FAILED:
1041        case PARTNER_DEREGISTER:
1042                ibmvscsis_delete_client_info(vscsi, true);
1043                if (crq->format == MIGRATED)
1044                        vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1045                switch (vscsi->state) {
1046                case NO_QUEUE:
1047                case ERR_DISCONNECTED:
1048                case UNDEFINED:
1049                        break;
1050
1051                case UNCONFIGURING:
1052                        vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
1053                        break;
1054
1055                case WAIT_ENABLED:
1056                        break;
1057
1058                case WAIT_CONNECTION:
1059                        break;
1060
1061                case CONNECTED:
1062                        ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
1063                                                  (RESPONSE_Q_DOWN |
1064                                                   TRANS_EVENT));
1065                        break;
1066
1067                case SRP_PROCESSING:
1068                        if ((vscsi->debit > 0) ||
1069                            !list_empty(&vscsi->schedule_q) ||
1070                            !list_empty(&vscsi->waiting_rsp) ||
1071                            !list_empty(&vscsi->active_q)) {
1072                                dev_dbg(&vscsi->dev, "debit %d, sched %d, wait %d, active %d\n",
1073                                        vscsi->debit,
1074                                        (int)list_empty(&vscsi->schedule_q),
1075                                        (int)list_empty(&vscsi->waiting_rsp),
1076                                        (int)list_empty(&vscsi->active_q));
1077                                dev_warn(&vscsi->dev, "connection lost with outstanding work\n");
1078                        } else {
1079                                dev_dbg(&vscsi->dev, "trans_event: SRP Processing, but no outstanding work\n");
1080                        }
1081
1082                        ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
1083                                                  (RESPONSE_Q_DOWN |
1084                                                   TRANS_EVENT));
1085                        break;
1086
1087                case ERR_DISCONNECT:
1088                case ERR_DISCONNECT_RECONNECT:
1089                case WAIT_IDLE:
1090                        vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
1091                        break;
1092                }
1093                break;
1094
1095        case PREPARE_FOR_SUSPEND:
1096                dev_dbg(&vscsi->dev, "Prep for Suspend, crq status = 0x%x\n",
1097                        (int)crq->status);
1098                switch (vscsi->state) {
1099                case ERR_DISCONNECTED:
1100                case WAIT_CONNECTION:
1101                case CONNECTED:
1102                        ibmvscsis_ready_for_suspend(vscsi, false);
1103                        break;
1104                case SRP_PROCESSING:
1105                        vscsi->resume_state = vscsi->state;
1106                        vscsi->flags |= PREP_FOR_SUSPEND_PENDING;
1107                        if (crq->status == CRQ_ENTRY_OVERWRITTEN)
1108                                vscsi->flags |= PREP_FOR_SUSPEND_OVERWRITE;
1109                        ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
1110                        break;
1111                case NO_QUEUE:
1112                case UNDEFINED:
1113                case UNCONFIGURING:
1114                case WAIT_ENABLED:
1115                case ERR_DISCONNECT:
1116                case ERR_DISCONNECT_RECONNECT:
1117                case WAIT_IDLE:
1118                        dev_err(&vscsi->dev, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
1119                                vscsi->state);
1120                        break;
1121                }
1122                break;
1123
1124        case RESUME_FROM_SUSP:
1125                dev_dbg(&vscsi->dev, "Resume from Suspend, crq status = 0x%x\n",
1126                        (int)crq->status);
1127                if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
1128                        vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
1129                } else {
1130                        if ((crq->status == CRQ_ENTRY_OVERWRITTEN) ||
1131                            (vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE)) {
1132                                ibmvscsis_post_disconnect(vscsi,
1133                                                          ERR_DISCONNECT_RECONNECT,
1134                                                          0);
1135                                vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1136                        }
1137                }
1138                break;
1139
1140        default:
1141                rc = ERROR;
1142                dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
1143                        (uint)crq->format);
1144                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
1145                                          RESPONSE_Q_DOWN);
1146                break;
1147        }
1148
1149        rc = vscsi->flags & SCHEDULE_DISCONNECT;
1150
1151        dev_dbg(&vscsi->dev, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
1152                vscsi->flags, vscsi->state, rc);
1153
1154        return rc;
1155}
1156
1157/**
1158 * ibmvscsis_poll_cmd_q() - Poll Command Queue
1159 * @vscsi:      Pointer to our adapter structure
1160 *
1161 * Called to handle command elements that may have arrived while
1162 * interrupts were disabled.
1163 *
1164 * EXECUTION ENVIRONMENT:
1165 *      intr_lock must be held
1166 */
1167static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
1168{
1169        struct viosrp_crq *crq;
1170        long rc;
1171        bool ack = true;
1172        volatile u8 valid;
1173
1174        dev_dbg(&vscsi->dev, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
1175                vscsi->flags, vscsi->state, vscsi->cmd_q.index);
1176
1177        rc = vscsi->flags & SCHEDULE_DISCONNECT;
1178        crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
1179        valid = crq->valid;
1180        dma_rmb();
1181
1182        while (valid) {
1183poll_work:
1184                vscsi->cmd_q.index =
1185                        (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
1186
1187                if (!rc) {
1188                        rc = ibmvscsis_parse_command(vscsi, crq);
1189                } else {
1190                        if ((uint)crq->valid == VALID_TRANS_EVENT) {
1191                                /*
1192                                 * must service the transport layer events even
1193                                 * in an error state, dont break out until all
1194                                 * the consecutive transport events have been
1195                                 * processed
1196                                 */
1197                                rc = ibmvscsis_trans_event(vscsi, crq);
1198                        } else if (vscsi->flags & TRANS_EVENT) {
1199                                /*
1200                                 * if a tranport event has occurred leave
1201                                 * everything but transport events on the queue
1202                                 */
1203                                dev_dbg(&vscsi->dev, "poll_cmd_q, ignoring\n");
1204
1205                                /*
1206                                 * need to decrement the queue index so we can
1207                                 * look at the elment again
1208                                 */
1209                                if (vscsi->cmd_q.index)
1210                                        vscsi->cmd_q.index -= 1;
1211                                else
1212                                        /*
1213                                         * index is at 0 it just wrapped.
1214                                         * have it index last element in q
1215                                         */
1216                                        vscsi->cmd_q.index = vscsi->cmd_q.mask;
1217                                break;
1218                        }
1219                }
1220
1221                crq->valid = INVALIDATE_CMD_RESP_EL;
1222
1223                crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
1224                valid = crq->valid;
1225                dma_rmb();
1226        }
1227
1228        if (!rc) {
1229                if (ack) {
1230                        vio_enable_interrupts(vscsi->dma_dev);
1231                        ack = false;
1232                        dev_dbg(&vscsi->dev, "poll_cmd_q, reenabling interrupts\n");
1233                }
1234                valid = crq->valid;
1235                dma_rmb();
1236                if (valid)
1237                        goto poll_work;
1238        }
1239
1240        dev_dbg(&vscsi->dev, "Leaving poll_cmd_q: rc %ld\n", rc);
1241}
1242
1243/**
1244 * ibmvscsis_free_cmd_qs() - Free elements in queue
1245 * @vscsi:      Pointer to our adapter structure
1246 *
1247 * Free all of the elements on all queues that are waiting for
1248 * whatever reason.
1249 *
1250 * PRECONDITION:
1251 *      Called with interrupt lock held
1252 */
1253static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
1254{
1255        struct ibmvscsis_cmd *cmd, *nxt;
1256
1257        dev_dbg(&vscsi->dev, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
1258                (int)list_empty(&vscsi->waiting_rsp),
1259                vscsi->rsp_q_timer.started);
1260
1261        list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
1262                list_del(&cmd->list);
1263                ibmvscsis_free_cmd_resources(vscsi, cmd);
1264        }
1265}
1266
1267/**
1268 * ibmvscsis_get_free_cmd() - Get free command from list
1269 * @vscsi:      Pointer to our adapter structure
1270 *
1271 * Must be called with interrupt lock held.
1272 */
1273static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
1274{
1275        struct ibmvscsis_cmd *cmd = NULL;
1276        struct iu_entry *iue;
1277
1278        iue = srp_iu_get(&vscsi->target);
1279        if (iue) {
1280                cmd = list_first_entry_or_null(&vscsi->free_cmd,
1281                                               struct ibmvscsis_cmd, list);
1282                if (cmd) {
1283                        if (cmd->abort_cmd)
1284                                cmd->abort_cmd = NULL;
1285                        cmd->flags &= ~(DELAY_SEND);
1286                        list_del(&cmd->list);
1287                        cmd->iue = iue;
1288                        cmd->type = UNSET_TYPE;
1289                        memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd));
1290                } else {
1291                        srp_iu_put(iue);
1292                }
1293        }
1294
1295        return cmd;
1296}
1297
1298/**
1299 * ibmvscsis_adapter_idle() - Helper function to handle idle adapter
1300 * @vscsi:      Pointer to our adapter structure
1301 *
1302 * This function is called when the adapter is idle when the driver
1303 * is attempting to clear an error condition.
1304 * The adapter is considered busy if any of its cmd queues
1305 * are non-empty. This function can be invoked
1306 * from the off level disconnect function.
1307 *
1308 * EXECUTION ENVIRONMENT:
1309 *      Process environment called with interrupt lock held
1310 */
1311static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1312{
1313        int free_qs = false;
1314        long rc = 0;
1315
1316        dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx\n",
1317                vscsi->flags, vscsi->state);
1318
1319        /* Only need to free qs if we're disconnecting from client */
1320        if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
1321                free_qs = true;
1322
1323        switch (vscsi->state) {
1324        case UNCONFIGURING:
1325                ibmvscsis_free_command_q(vscsi);
1326                dma_rmb();
1327                isync();
1328                if (vscsi->flags & CFG_SLEEPING) {
1329                        vscsi->flags &= ~CFG_SLEEPING;
1330                        complete(&vscsi->unconfig);
1331                }
1332                break;
1333        case ERR_DISCONNECT_RECONNECT:
1334                ibmvscsis_reset_queue(vscsi);
1335                dev_dbg(&vscsi->dev, "adapter_idle, disc_rec: flags 0x%x\n",
1336                        vscsi->flags);
1337                break;
1338
1339        case ERR_DISCONNECT:
1340                ibmvscsis_free_command_q(vscsi);
1341                vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
1342                vscsi->flags |= RESPONSE_Q_DOWN;
1343                if (vscsi->tport.enabled)
1344                        vscsi->state = ERR_DISCONNECTED;
1345                else
1346                        vscsi->state = WAIT_ENABLED;
1347                dev_dbg(&vscsi->dev, "adapter_idle, disc: flags 0x%x, state 0x%hx\n",
1348                        vscsi->flags, vscsi->state);
1349                break;
1350
1351        case WAIT_IDLE:
1352                vscsi->rsp_q_timer.timer_pops = 0;
1353                vscsi->debit = 0;
1354                vscsi->credit = 0;
1355                if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
1356                        vscsi->state = vscsi->resume_state;
1357                        vscsi->resume_state = 0;
1358                        rc = ibmvscsis_ready_for_suspend(vscsi, true);
1359                        vscsi->flags &= ~DISCONNECT_SCHEDULED;
1360                        if (rc)
1361                                break;
1362                } else if (vscsi->flags & TRANS_EVENT) {
1363                        vscsi->state = WAIT_CONNECTION;
1364                        vscsi->flags &= PRESERVE_FLAG_FIELDS;
1365                } else {
1366                        vscsi->state = CONNECTED;
1367                        vscsi->flags &= ~DISCONNECT_SCHEDULED;
1368                }
1369
1370                dev_dbg(&vscsi->dev, "adapter_idle, wait: flags 0x%x, state 0x%hx\n",
1371                        vscsi->flags, vscsi->state);
1372                ibmvscsis_poll_cmd_q(vscsi);
1373                break;
1374
1375        case ERR_DISCONNECTED:
1376                vscsi->flags &= ~DISCONNECT_SCHEDULED;
1377                dev_dbg(&vscsi->dev, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
1378                        vscsi->flags, vscsi->state);
1379                break;
1380
1381        default:
1382                dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n",
1383                        vscsi->state);
1384                break;
1385        }
1386
1387        if (free_qs)
1388                ibmvscsis_free_cmd_qs(vscsi);
1389
1390        /*
1391         * There is a timing window where we could lose a disconnect request.
1392         * The known path to this window occurs during the DISCONNECT_RECONNECT
1393         * case above: reset_queue calls free_command_q, which will release the
1394         * interrupt lock.  During that time, a new post_disconnect call can be
1395         * made with a "more severe" state (DISCONNECT or UNCONFIGURING).
1396         * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect
1397         * will only set the new_state.  Now free_command_q reacquires the intr
1398         * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_
1399         * FIELDS), and the disconnect is lost.  This is particularly bad when
1400         * the new disconnect was for UNCONFIGURING, since the unconfigure hangs
1401         * forever.
1402         * Fix is that free command queue sets acr state and acr flags if there
1403         * is a change under the lock
1404         * note free command queue writes to this state it clears it
1405         * before releasing the lock, different drivers call the free command
1406         * queue different times so dont initialize above
1407         */
1408        if (vscsi->phyp_acr_state != 0) {
1409                /*
1410                 * set any bits in flags that may have been cleared by
1411                 * a call to free command queue in switch statement
1412                 * or reset queue
1413                 */
1414                vscsi->flags |= vscsi->phyp_acr_flags;
1415                ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0);
1416                vscsi->phyp_acr_state = 0;
1417                vscsi->phyp_acr_flags = 0;
1418
1419                dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
1420                        vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
1421                        vscsi->phyp_acr_state);
1422        }
1423
1424        dev_dbg(&vscsi->dev, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
1425                vscsi->flags, vscsi->state, vscsi->new_state);
1426}
1427
1428/**
1429 * ibmvscsis_copy_crq_packet() - Copy CRQ Packet
1430 * @vscsi:      Pointer to our adapter structure
1431 * @cmd:        Pointer to command element to use to process the request
1432 * @crq:        Pointer to CRQ entry containing the request
1433 *
1434 * Copy the srp information unit from the hosted
1435 * partition using remote dma
1436 *
1437 * EXECUTION ENVIRONMENT:
1438 *      Interrupt, interrupt lock held
1439 */
1440static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
1441                                      struct ibmvscsis_cmd *cmd,
1442                                      struct viosrp_crq *crq)
1443{
1444        struct iu_entry *iue = cmd->iue;
1445        long rc = 0;
1446        u16 len;
1447
1448        len = be16_to_cpu(crq->IU_length);
1449        if ((len > SRP_MAX_IU_LEN) || (len == 0)) {
1450                dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len);
1451                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1452                return SRP_VIOLATION;
1453        }
1454
1455        rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn,
1456                         be64_to_cpu(crq->IU_data_ptr),
1457                         vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma);
1458
1459        switch (rc) {
1460        case H_SUCCESS:
1461                cmd->init_time = mftb();
1462                iue->remote_token = crq->IU_data_ptr;
1463                iue->iu_len = len;
1464                dev_dbg(&vscsi->dev, "copy_crq: ioba 0x%llx, init_time 0x%llx\n",
1465                        be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
1466                break;
1467        case H_PERMISSION:
1468                if (connection_broken(vscsi))
1469                        ibmvscsis_post_disconnect(vscsi,
1470                                                  ERR_DISCONNECT_RECONNECT,
1471                                                  (RESPONSE_Q_DOWN |
1472                                                   CLIENT_FAILED));
1473                else
1474                        ibmvscsis_post_disconnect(vscsi,
1475                                                  ERR_DISCONNECT_RECONNECT, 0);
1476
1477                dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1478                        rc);
1479                break;
1480        case H_DEST_PARM:
1481        case H_SOURCE_PARM:
1482        default:
1483                dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1484                        rc);
1485                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1486                break;
1487        }
1488
1489        return rc;
1490}
1491
1492/**
1493 * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram
1494 * @vscsi:      Pointer to our adapter structure
1495 * @iue:        Information Unit containing the Adapter Info MAD request
1496 *
1497 * EXECUTION ENVIRONMENT:
1498 *      Interrupt adapter lock is held
1499 */
1500static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1501                                   struct iu_entry *iue)
1502{
1503        struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info;
1504        struct mad_adapter_info_data *info;
1505        uint flag_bits = 0;
1506        dma_addr_t token;
1507        long rc;
1508
1509        mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1510
1511        if (be16_to_cpu(mad->common.length) > sizeof(*info)) {
1512                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1513                return 0;
1514        }
1515
1516        info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
1517                                  GFP_ATOMIC);
1518        if (!info) {
1519                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1520                        iue->target);
1521                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1522                return 0;
1523        }
1524
1525        /* Get remote info */
1526        rc = h_copy_rdma(be16_to_cpu(mad->common.length),
1527                         vscsi->dds.window[REMOTE].liobn,
1528                         be64_to_cpu(mad->buffer),
1529                         vscsi->dds.window[LOCAL].liobn, token);
1530
1531        if (rc != H_SUCCESS) {
1532                if (rc == H_PERMISSION) {
1533                        if (connection_broken(vscsi))
1534                                flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1535                }
1536                dev_warn(&vscsi->dev, "adapter_info: h_copy_rdma from client failed, rc %ld\n",
1537                         rc);
1538                dev_dbg(&vscsi->dev, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
1539                        be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
1540                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1541                                          flag_bits);
1542                goto free_dma;
1543        }
1544
1545        /*
1546         * Copy client info, but ignore partition number, which we
1547         * already got from phyp - unless we failed to get it from
1548         * phyp (e.g. if we're running on a p5 system).
1549         */
1550        if (vscsi->client_data.partition_number == 0)
1551                vscsi->client_data.partition_number =
1552                        be32_to_cpu(info->partition_number);
1553        strncpy(vscsi->client_data.srp_version, info->srp_version,
1554                sizeof(vscsi->client_data.srp_version));
1555        strncpy(vscsi->client_data.partition_name, info->partition_name,
1556                sizeof(vscsi->client_data.partition_name));
1557        vscsi->client_data.mad_version = be32_to_cpu(info->mad_version);
1558        vscsi->client_data.os_type = be32_to_cpu(info->os_type);
1559
1560        /* Copy our info */
1561        strncpy(info->srp_version, SRP_VERSION,
1562                sizeof(info->srp_version));
1563        strncpy(info->partition_name, vscsi->dds.partition_name,
1564                sizeof(info->partition_name));
1565        info->partition_number = cpu_to_be32(vscsi->dds.partition_num);
1566        info->mad_version = cpu_to_be32(MAD_VERSION_1);
1567        info->os_type = cpu_to_be32(LINUX);
1568        memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
1569        info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
1570
1571        dma_wmb();
1572        rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
1573                         token, vscsi->dds.window[REMOTE].liobn,
1574                         be64_to_cpu(mad->buffer));
1575        switch (rc) {
1576        case H_SUCCESS:
1577                break;
1578
1579        case H_SOURCE_PARM:
1580        case H_DEST_PARM:
1581        case H_PERMISSION:
1582                if (connection_broken(vscsi))
1583                        flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1584        default:
1585                dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
1586                        rc);
1587                ibmvscsis_post_disconnect(vscsi,
1588                                          ERR_DISCONNECT_RECONNECT,
1589                                          flag_bits);
1590                break;
1591        }
1592
1593free_dma:
1594        dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
1595        dev_dbg(&vscsi->dev, "Leaving adapter_info, rc %ld\n", rc);
1596
1597        return rc;
1598}
1599
1600/**
1601 * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram
1602 * @vscsi:      Pointer to our adapter structure
1603 * @iue:        Information Unit containing the Capabilities MAD request
1604 *
1605 * NOTE: if you return an error from this routine you must be
1606 * disconnecting or you will cause a hang
1607 *
1608 * EXECUTION ENVIRONMENT:
1609 *      Interrupt called with adapter lock held
1610 */
1611static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1612{
1613        struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities;
1614        struct capabilities *cap;
1615        struct mad_capability_common *common;
1616        dma_addr_t token;
1617        u16 olen, len, status, min_len, cap_len;
1618        u32 flag;
1619        uint flag_bits = 0;
1620        long rc = 0;
1621
1622        olen = be16_to_cpu(mad->common.length);
1623        /*
1624         * struct capabilities hardcodes a couple capabilities after the
1625         * header, but the capabilities can actually be in any order.
1626         */
1627        min_len = offsetof(struct capabilities, migration);
1628        if ((olen < min_len) || (olen > PAGE_SIZE)) {
1629                dev_warn(&vscsi->dev, "cap_mad: invalid len %d\n", olen);
1630                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1631                return 0;
1632        }
1633
1634        cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
1635                                 GFP_ATOMIC);
1636        if (!cap) {
1637                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1638                        iue->target);
1639                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1640                return 0;
1641        }
1642        rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn,
1643                         be64_to_cpu(mad->buffer),
1644                         vscsi->dds.window[LOCAL].liobn, token);
1645        if (rc == H_SUCCESS) {
1646                strncpy(cap->name, dev_name(&vscsi->dma_dev->dev),
1647                        SRP_MAX_LOC_LEN);
1648
1649                len = olen - min_len;
1650                status = VIOSRP_MAD_SUCCESS;
1651                common = (struct mad_capability_common *)&cap->migration;
1652
1653                while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
1654                        dev_dbg(&vscsi->dev, "cap_mad: len left %hd, cap type %d, cap len %hd\n",
1655                                len, be32_to_cpu(common->cap_type),
1656                                be16_to_cpu(common->length));
1657
1658                        cap_len = be16_to_cpu(common->length);
1659                        if (cap_len > len) {
1660                                dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n");
1661                                status = VIOSRP_MAD_FAILED;
1662                                break;
1663                        }
1664
1665                        if (cap_len == 0) {
1666                                dev_err(&vscsi->dev, "cap_mad: cap len is 0\n");
1667                                status = VIOSRP_MAD_FAILED;
1668                                break;
1669                        }
1670
1671                        switch (common->cap_type) {
1672                        default:
1673                                dev_dbg(&vscsi->dev, "cap_mad: unsupported capability\n");
1674                                common->server_support = 0;
1675                                flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
1676                                cap->flags &= ~flag;
1677                                break;
1678                        }
1679
1680                        len = len - cap_len;
1681                        common = (struct mad_capability_common *)
1682                                ((char *)common + cap_len);
1683                }
1684
1685                mad->common.status = cpu_to_be16(status);
1686
1687                dma_wmb();
1688                rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token,
1689                                 vscsi->dds.window[REMOTE].liobn,
1690                                 be64_to_cpu(mad->buffer));
1691
1692                if (rc != H_SUCCESS) {
1693                        dev_dbg(&vscsi->dev, "cap_mad: failed to copy to client, rc %ld\n",
1694                                rc);
1695
1696                        if (rc == H_PERMISSION) {
1697                                if (connection_broken(vscsi))
1698                                        flag_bits = (RESPONSE_Q_DOWN |
1699                                                     CLIENT_FAILED);
1700                        }
1701
1702                        dev_warn(&vscsi->dev, "cap_mad: error copying data to client, rc %ld\n",
1703                                 rc);
1704                        ibmvscsis_post_disconnect(vscsi,
1705                                                  ERR_DISCONNECT_RECONNECT,
1706                                                  flag_bits);
1707                }
1708        }
1709
1710        dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
1711
1712        dev_dbg(&vscsi->dev, "Leaving cap_mad, rc %ld, client_cap 0x%x\n",
1713                rc, vscsi->client_cap);
1714
1715        return rc;
1716}
1717
1718/**
1719 * ibmvscsis_process_mad() - Service a MAnagement Data gram
1720 * @vscsi:      Pointer to our adapter structure
1721 * @iue:        Information Unit containing the MAD request
1722 *
1723 * Must be called with interrupt lock held.
1724 */
1725static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1726{
1727        struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1728        struct viosrp_empty_iu *empty;
1729        long rc = ADAPT_SUCCESS;
1730
1731        switch (be32_to_cpu(mad->type)) {
1732        case VIOSRP_EMPTY_IU_TYPE:
1733                empty = &vio_iu(iue)->mad.empty_iu;
1734                vscsi->empty_iu_id = be64_to_cpu(empty->buffer);
1735                vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag);
1736                mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1737                break;
1738        case VIOSRP_ADAPTER_INFO_TYPE:
1739                rc = ibmvscsis_adapter_info(vscsi, iue);
1740                break;
1741        case VIOSRP_CAPABILITIES_TYPE:
1742                rc = ibmvscsis_cap_mad(vscsi, iue);
1743                break;
1744        case VIOSRP_ENABLE_FAST_FAIL:
1745                if (vscsi->state == CONNECTED) {
1746                        vscsi->fast_fail = true;
1747                        mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1748                } else {
1749                        dev_warn(&vscsi->dev, "fast fail mad sent after login\n");
1750                        mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
1751                }
1752                break;
1753        default:
1754                mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED);
1755                break;
1756        }
1757
1758        return rc;
1759}
1760
1761/**
1762 * srp_snd_msg_failed() - Handle an error when sending a response
1763 * @vscsi:      Pointer to our adapter structure
1764 * @rc:         The return code from the h_send_crq command
1765 *
1766 * Must be called with interrupt lock held.
1767 */
1768static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
1769{
1770        ktime_t kt;
1771
1772        if (rc != H_DROPPED) {
1773                ibmvscsis_free_cmd_qs(vscsi);
1774
1775                if (rc == H_CLOSED)
1776                        vscsi->flags |= CLIENT_FAILED;
1777
1778                /* don't flag the same problem multiple times */
1779                if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1780                        vscsi->flags |= RESPONSE_Q_DOWN;
1781                        if (!(vscsi->state & (ERR_DISCONNECT |
1782                                              ERR_DISCONNECT_RECONNECT |
1783                                              ERR_DISCONNECTED | UNDEFINED))) {
1784                                dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n",
1785                                        vscsi->state, vscsi->flags, rc);
1786                        }
1787                        ibmvscsis_post_disconnect(vscsi,
1788                                                  ERR_DISCONNECT_RECONNECT, 0);
1789                }
1790                return;
1791        }
1792
1793        /*
1794         * The response queue is full.
1795         * If the server is processing SRP requests, i.e.
1796         * the client has successfully done an
1797         * SRP_LOGIN, then it will wait forever for room in
1798         * the queue.  However if the system admin
1799         * is attempting to unconfigure the server then one
1800         * or more children will be in a state where
1801         * they are being removed. So if there is even one
1802         * child being removed then the driver assumes
1803         * the system admin is attempting to break the
1804         * connection with the client and MAX_TIMER_POPS
1805         * is honored.
1806         */
1807        if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
1808            (vscsi->state == SRP_PROCESSING)) {
1809                dev_dbg(&vscsi->dev, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
1810                        vscsi->flags, (int)vscsi->rsp_q_timer.started,
1811                        vscsi->rsp_q_timer.timer_pops);
1812
1813                /*
1814                 * Check if the timer is running; if it
1815                 * is not then start it up.
1816                 */
1817                if (!vscsi->rsp_q_timer.started) {
1818                        if (vscsi->rsp_q_timer.timer_pops <
1819                            MAX_TIMER_POPS) {
1820                                kt = WAIT_NANO_SECONDS;
1821                        } else {
1822                                /*
1823                                 * slide the timeslice if the maximum
1824                                 * timer pops have already happened
1825                                 */
1826                                kt = ktime_set(WAIT_SECONDS, 0);
1827                        }
1828
1829                        vscsi->rsp_q_timer.started = true;
1830                        hrtimer_start(&vscsi->rsp_q_timer.timer, kt,
1831                                      HRTIMER_MODE_REL);
1832                }
1833        } else {
1834                /*
1835                 * TBD: Do we need to worry about this? Need to get
1836                 *      remove working.
1837                 */
1838                /*
1839                 * waited a long time and it appears the system admin
1840                 * is bring this driver down
1841                 */
1842                vscsi->flags |= RESPONSE_Q_DOWN;
1843                ibmvscsis_free_cmd_qs(vscsi);
1844                /*
1845                 * if the driver is already attempting to disconnect
1846                 * from the client and has already logged an error
1847                 * trace this event but don't put it in the error log
1848                 */
1849                if (!(vscsi->state & (ERR_DISCONNECT |
1850                                      ERR_DISCONNECT_RECONNECT |
1851                                      ERR_DISCONNECTED | UNDEFINED))) {
1852                        dev_err(&vscsi->dev, "client crq full too long\n");
1853                        ibmvscsis_post_disconnect(vscsi,
1854                                                  ERR_DISCONNECT_RECONNECT,
1855                                                  0);
1856                }
1857        }
1858}
1859
1860/**
1861 * ibmvscsis_send_messages() - Send a Response
1862 * @vscsi:      Pointer to our adapter structure
1863 *
1864 * Send a response, first checking the waiting queue. Responses are
1865 * sent in order they are received. If the response cannot be sent,
1866 * because the client queue is full, it stays on the waiting queue.
1867 *
1868 * PRECONDITION:
1869 *      Called with interrupt lock held
1870 */
1871static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1872{
1873        u64 msg_hi = 0;
1874        /* note do not attempt to access the IU_data_ptr with this pointer
1875         * it is not valid
1876         */
1877        struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
1878        struct ibmvscsis_cmd *cmd, *nxt;
1879        struct iu_entry *iue;
1880        long rc = ADAPT_SUCCESS;
1881        bool retry = false;
1882
1883        if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1884                do {
1885                        retry = false;
1886                        list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp,
1887                                                 list) {
1888                                /*
1889                                 * Check to make sure abort cmd gets processed
1890                                 * prior to the abort tmr cmd
1891                                 */
1892                                if (cmd->flags & DELAY_SEND)
1893                                        continue;
1894
1895                                if (cmd->abort_cmd) {
1896                                        retry = true;
1897                                        cmd->abort_cmd->flags &= ~(DELAY_SEND);
1898                                        cmd->abort_cmd = NULL;
1899                                }
1900
1901                                /*
1902                                 * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and
1903                                 * the case where LIO issued a
1904                                 * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST
1905                                 * case then we dont send a response, since it
1906                                 * was already done.
1907                                 */
1908                                if (cmd->se_cmd.transport_state & CMD_T_ABORTED &&
1909                                    !(cmd->se_cmd.transport_state & CMD_T_TAS)) {
1910                                        list_del(&cmd->list);
1911                                        ibmvscsis_free_cmd_resources(vscsi,
1912                                                                     cmd);
1913                                        /*
1914                                         * With a successfully aborted op
1915                                         * through LIO we want to increment the
1916                                         * the vscsi credit so that when we dont
1917                                         * send a rsp to the original scsi abort
1918                                         * op (h_send_crq), but the tm rsp to
1919                                         * the abort is sent, the credit is
1920                                         * correctly sent with the abort tm rsp.
1921                                         * We would need 1 for the abort tm rsp
1922                                         * and 1 credit for the aborted scsi op.
1923                                         * Thus we need to increment here.
1924                                         * Also we want to increment the credit
1925                                         * here because we want to make sure
1926                                         * cmd is actually released first
1927                                         * otherwise the client will think it
1928                                         * it can send a new cmd, and we could
1929                                         * find ourselves short of cmd elements.
1930                                         */
1931                                        vscsi->credit += 1;
1932                                } else {
1933                                        iue = cmd->iue;
1934
1935                                        crq->valid = VALID_CMD_RESP_EL;
1936                                        crq->format = cmd->rsp.format;
1937
1938                                        if (cmd->flags & CMD_FAST_FAIL)
1939                                                crq->status = VIOSRP_ADAPTER_FAIL;
1940
1941                                        crq->IU_length = cpu_to_be16(cmd->rsp.len);
1942
1943                                        rc = h_send_crq(vscsi->dma_dev->unit_address,
1944                                                        be64_to_cpu(msg_hi),
1945                                                        be64_to_cpu(cmd->rsp.tag));
1946
1947                                        dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
1948                                                cmd, be64_to_cpu(cmd->rsp.tag),
1949                                                rc);
1950
1951                                        /* if all ok free up the command
1952                                         * element resources
1953                                         */
1954                                        if (rc == H_SUCCESS) {
1955                                                /* some movement has occurred */
1956                                                vscsi->rsp_q_timer.timer_pops = 0;
1957                                                list_del(&cmd->list);
1958
1959                                                ibmvscsis_free_cmd_resources(vscsi,
1960                                                                             cmd);
1961                                        } else {
1962                                                srp_snd_msg_failed(vscsi, rc);
1963                                                break;
1964                                        }
1965                                }
1966                        }
1967                } while (retry);
1968
1969                if (!rc) {
1970                        /*
1971                         * The timer could pop with the queue empty.  If
1972                         * this happens, rc will always indicate a
1973                         * success; clear the pop count.
1974                         */
1975                        vscsi->rsp_q_timer.timer_pops = 0;
1976                }
1977        } else {
1978                ibmvscsis_free_cmd_qs(vscsi);
1979        }
1980}
1981
1982/* Called with intr lock held */
1983static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
1984                                    struct ibmvscsis_cmd *cmd,
1985                                    struct viosrp_crq *crq)
1986{
1987        struct iu_entry *iue = cmd->iue;
1988        struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1989        uint flag_bits = 0;
1990        long rc;
1991
1992        dma_wmb();
1993        rc = h_copy_rdma(sizeof(struct mad_common),
1994                         vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
1995                         vscsi->dds.window[REMOTE].liobn,
1996                         be64_to_cpu(crq->IU_data_ptr));
1997        if (!rc) {
1998                cmd->rsp.format = VIOSRP_MAD_FORMAT;
1999                cmd->rsp.len = sizeof(struct mad_common);
2000                cmd->rsp.tag = mad->tag;
2001                list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2002                ibmvscsis_send_messages(vscsi);
2003        } else {
2004                dev_dbg(&vscsi->dev, "Error sending mad response, rc %ld\n",
2005                        rc);
2006                if (rc == H_PERMISSION) {
2007                        if (connection_broken(vscsi))
2008                                flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
2009                }
2010                dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n",
2011                        rc);
2012
2013                ibmvscsis_free_cmd_resources(vscsi, cmd);
2014                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
2015                                          flag_bits);
2016        }
2017}
2018
2019/**
2020 * ibmvscsis_mad() - Service a MAnagement Data gram.
2021 * @vscsi:      Pointer to our adapter structure
2022 * @crq:        Pointer to the CRQ entry containing the MAD request
2023 *
2024 * EXECUTION ENVIRONMENT:
2025 *      Interrupt, called with adapter lock held
2026 */
2027static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
2028{
2029        struct iu_entry *iue;
2030        struct ibmvscsis_cmd *cmd;
2031        struct mad_common *mad;
2032        long rc = ADAPT_SUCCESS;
2033
2034        switch (vscsi->state) {
2035                /*
2036                 * We have not exchanged Init Msgs yet, so this MAD was sent
2037                 * before the last Transport Event; client will not be
2038                 * expecting a response.
2039                 */
2040        case WAIT_CONNECTION:
2041                dev_dbg(&vscsi->dev, "mad: in Wait Connection state, ignoring MAD, flags %d\n",
2042                        vscsi->flags);
2043                return ADAPT_SUCCESS;
2044
2045        case SRP_PROCESSING:
2046        case CONNECTED:
2047                break;
2048
2049                /*
2050                 * We should never get here while we're in these states.
2051                 * Just log an error and get out.
2052                 */
2053        case UNCONFIGURING:
2054        case WAIT_IDLE:
2055        case ERR_DISCONNECT:
2056        case ERR_DISCONNECT_RECONNECT:
2057        default:
2058                dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n",
2059                        vscsi->state);
2060                return ADAPT_SUCCESS;
2061        }
2062
2063        cmd = ibmvscsis_get_free_cmd(vscsi);
2064        if (!cmd) {
2065                dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n",
2066                        vscsi->debit);
2067                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2068                return ERROR;
2069        }
2070        iue = cmd->iue;
2071        cmd->type = ADAPTER_MAD;
2072
2073        rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
2074        if (!rc) {
2075                mad = (struct mad_common *)&vio_iu(iue)->mad;
2076
2077                dev_dbg(&vscsi->dev, "mad: type %d\n", be32_to_cpu(mad->type));
2078
2079                rc = ibmvscsis_process_mad(vscsi, iue);
2080
2081                dev_dbg(&vscsi->dev, "mad: status %hd, rc %ld\n",
2082                        be16_to_cpu(mad->status), rc);
2083
2084                if (!rc)
2085                        ibmvscsis_send_mad_resp(vscsi, cmd, crq);
2086        } else {
2087                ibmvscsis_free_cmd_resources(vscsi, cmd);
2088        }
2089
2090        dev_dbg(&vscsi->dev, "Leaving mad, rc %ld\n", rc);
2091        return rc;
2092}
2093
2094/**
2095 * ibmvscsis_login_rsp() - Create/copy a login response notice to the client
2096 * @vscsi:      Pointer to our adapter structure
2097 * @cmd:        Pointer to the command for the SRP Login request
2098 *
2099 * EXECUTION ENVIRONMENT:
2100 *      Interrupt, interrupt lock held
2101 */
2102static long ibmvscsis_login_rsp(struct scsi_info *vscsi,
2103                                struct ibmvscsis_cmd *cmd)
2104{
2105        struct iu_entry *iue = cmd->iue;
2106        struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp;
2107        struct format_code *fmt;
2108        uint flag_bits = 0;
2109        long rc = ADAPT_SUCCESS;
2110
2111        memset(rsp, 0, sizeof(struct srp_login_rsp));
2112
2113        rsp->opcode = SRP_LOGIN_RSP;
2114        rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit);
2115        rsp->tag = cmd->rsp.tag;
2116        rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
2117        rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
2118        fmt = (struct format_code *)&rsp->buf_fmt;
2119        fmt->buffers = SUPPORTED_FORMATS;
2120        vscsi->credit = 0;
2121
2122        cmd->rsp.len = sizeof(struct srp_login_rsp);
2123
2124        dma_wmb();
2125        rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
2126                         iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
2127                         be64_to_cpu(iue->remote_token));
2128
2129        switch (rc) {
2130        case H_SUCCESS:
2131                break;
2132
2133        case H_PERMISSION:
2134                if (connection_broken(vscsi))
2135                        flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
2136                dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
2137                        rc);
2138                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
2139                                          flag_bits);
2140                break;
2141        case H_SOURCE_PARM:
2142        case H_DEST_PARM:
2143        default:
2144                dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
2145                        rc);
2146                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2147                break;
2148        }
2149
2150        return rc;
2151}
2152
2153/**
2154 * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client
2155 * @vscsi:      Pointer to our adapter structure
2156 * @cmd:        Pointer to the command for the SRP Login request
2157 * @reason:     The reason the SRP Login is being rejected, per SRP protocol
2158 *
2159 * EXECUTION ENVIRONMENT:
2160 *      Interrupt, interrupt lock held
2161 */
2162static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
2163                                    struct ibmvscsis_cmd *cmd, u32 reason)
2164{
2165        struct iu_entry *iue = cmd->iue;
2166        struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej;
2167        struct format_code *fmt;
2168        uint flag_bits = 0;
2169        long rc = ADAPT_SUCCESS;
2170
2171        memset(rej, 0, sizeof(*rej));
2172
2173        rej->opcode = SRP_LOGIN_REJ;
2174        rej->reason = cpu_to_be32(reason);
2175        rej->tag = cmd->rsp.tag;
2176        fmt = (struct format_code *)&rej->buf_fmt;
2177        fmt->buffers = SUPPORTED_FORMATS;
2178
2179        cmd->rsp.len = sizeof(*rej);
2180
2181        dma_wmb();
2182        rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
2183                         iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
2184                         be64_to_cpu(iue->remote_token));
2185
2186        switch (rc) {
2187        case H_SUCCESS:
2188                break;
2189        case H_PERMISSION:
2190                if (connection_broken(vscsi))
2191                        flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
2192                dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
2193                        rc);
2194                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
2195                                          flag_bits);
2196                break;
2197        case H_SOURCE_PARM:
2198        case H_DEST_PARM:
2199        default:
2200                dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
2201                        rc);
2202                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2203                break;
2204        }
2205
2206        return rc;
2207}
2208
2209static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
2210{
2211        char *name = tport->tport_name;
2212        struct ibmvscsis_nexus *nexus;
2213        struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
2214        int rc;
2215
2216        if (tport->ibmv_nexus) {
2217                dev_dbg(&vscsi->dev, "tport->ibmv_nexus already exists\n");
2218                return 0;
2219        }
2220
2221        nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
2222        if (!nexus) {
2223                dev_err(&vscsi->dev, "Unable to allocate struct ibmvscsis_nexus\n");
2224                return -ENOMEM;
2225        }
2226
2227        nexus->se_sess = target_setup_session(&tport->se_tpg, 0, 0,
2228                                              TARGET_PROT_NORMAL, name, nexus,
2229                                              NULL);
2230        if (IS_ERR(nexus->se_sess)) {
2231                rc = PTR_ERR(nexus->se_sess);
2232                goto transport_init_fail;
2233        }
2234
2235        tport->ibmv_nexus = nexus;
2236
2237        return 0;
2238
2239transport_init_fail:
2240        kfree(nexus);
2241        return rc;
2242}
2243
2244static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
2245{
2246        struct se_session *se_sess;
2247        struct ibmvscsis_nexus *nexus;
2248
2249        nexus = tport->ibmv_nexus;
2250        if (!nexus)
2251                return -ENODEV;
2252
2253        se_sess = nexus->se_sess;
2254        if (!se_sess)
2255                return -ENODEV;
2256
2257        /*
2258         * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
2259         */
2260        target_remove_session(se_sess);
2261        tport->ibmv_nexus = NULL;
2262        kfree(nexus);
2263
2264        return 0;
2265}
2266
2267/**
2268 * ibmvscsis_srp_login() - Process an SRP Login Request
2269 * @vscsi:      Pointer to our adapter structure
2270 * @cmd:        Command element to use to process the SRP Login request
2271 * @crq:        Pointer to CRQ entry containing the SRP Login request
2272 *
2273 * EXECUTION ENVIRONMENT:
2274 *      Interrupt, called with interrupt lock held
2275 */
2276static long ibmvscsis_srp_login(struct scsi_info *vscsi,
2277                                struct ibmvscsis_cmd *cmd,
2278                                struct viosrp_crq *crq)
2279{
2280        struct iu_entry *iue = cmd->iue;
2281        struct srp_login_req *req = &vio_iu(iue)->srp.login_req;
2282        struct port_id {
2283                __be64 id_extension;
2284                __be64 io_guid;
2285        } *iport, *tport;
2286        struct format_code *fmt;
2287        u32 reason = 0x0;
2288        long rc = ADAPT_SUCCESS;
2289
2290        iport = (struct port_id *)req->initiator_port_id;
2291        tport = (struct port_id *)req->target_port_id;
2292        fmt = (struct format_code *)&req->req_buf_fmt;
2293        if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN)
2294                reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE;
2295        else if (be32_to_cpu(req->req_it_iu_len) < 64)
2296                reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
2297        else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) ||
2298                 (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1)))
2299                reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL;
2300        else if (req->req_flags & SRP_MULTICHAN_MULTI)
2301                reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
2302        else if (fmt->buffers & (~SUPPORTED_FORMATS))
2303                reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
2304        else if ((fmt->buffers & SUPPORTED_FORMATS) == 0)
2305                reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
2306
2307        if (vscsi->state == SRP_PROCESSING)
2308                reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED;
2309
2310        rc = ibmvscsis_make_nexus(&vscsi->tport);
2311        if (rc)
2312                reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
2313
2314        cmd->rsp.format = VIOSRP_SRP_FORMAT;
2315        cmd->rsp.tag = req->tag;
2316
2317        dev_dbg(&vscsi->dev, "srp_login: reason 0x%x\n", reason);
2318
2319        if (reason)
2320                rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
2321        else
2322                rc = ibmvscsis_login_rsp(vscsi, cmd);
2323
2324        if (!rc) {
2325                if (!reason)
2326                        vscsi->state = SRP_PROCESSING;
2327
2328                list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2329                ibmvscsis_send_messages(vscsi);
2330        } else {
2331                ibmvscsis_free_cmd_resources(vscsi, cmd);
2332        }
2333
2334        dev_dbg(&vscsi->dev, "Leaving srp_login, rc %ld\n", rc);
2335        return rc;
2336}
2337
2338/**
2339 * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus
2340 * @vscsi:      Pointer to our adapter structure
2341 * @cmd:        Command element to use to process the Implicit Logout request
2342 * @crq:        Pointer to CRQ entry containing the Implicit Logout request
2343 *
2344 * Do the logic to close the I_T nexus.  This function may not
2345 * behave to specification.
2346 *
2347 * EXECUTION ENVIRONMENT:
2348 *      Interrupt, interrupt lock held
2349 */
2350static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi,
2351                                   struct ibmvscsis_cmd *cmd,
2352                                   struct viosrp_crq *crq)
2353{
2354        struct iu_entry *iue = cmd->iue;
2355        struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout;
2356        long rc = ADAPT_SUCCESS;
2357
2358        if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) ||
2359            !list_empty(&vscsi->waiting_rsp)) {
2360                dev_err(&vscsi->dev, "i_logout: outstanding work\n");
2361                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2362        } else {
2363                cmd->rsp.format = SRP_FORMAT;
2364                cmd->rsp.tag = log_out->tag;
2365                cmd->rsp.len = sizeof(struct mad_common);
2366                list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2367                ibmvscsis_send_messages(vscsi);
2368
2369                ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
2370        }
2371
2372        return rc;
2373}
2374
2375/* Called with intr lock held */
2376static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
2377{
2378        struct ibmvscsis_cmd *cmd;
2379        struct iu_entry *iue;
2380        struct srp_cmd *srp;
2381        struct srp_tsk_mgmt *tsk;
2382        long rc;
2383
2384        if (vscsi->request_limit - vscsi->debit <= 0) {
2385                /* Client has exceeded request limit */
2386                dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n",
2387                        vscsi->request_limit, vscsi->debit);
2388                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2389                return;
2390        }
2391
2392        cmd = ibmvscsis_get_free_cmd(vscsi);
2393        if (!cmd) {
2394                dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n",
2395                        vscsi->debit);
2396                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2397                return;
2398        }
2399        iue = cmd->iue;
2400        srp = &vio_iu(iue)->srp.cmd;
2401
2402        rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
2403        if (rc) {
2404                ibmvscsis_free_cmd_resources(vscsi, cmd);
2405                return;
2406        }
2407
2408        if (vscsi->state == SRP_PROCESSING) {
2409                switch (srp->opcode) {
2410                case SRP_LOGIN_REQ:
2411                        rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2412                        break;
2413
2414                case SRP_TSK_MGMT:
2415                        tsk = &vio_iu(iue)->srp.tsk_mgmt;
2416                        dev_dbg(&vscsi->dev, "tsk_mgmt tag: %llu (0x%llx)\n",
2417                                tsk->tag, tsk->tag);
2418                        cmd->rsp.tag = tsk->tag;
2419                        vscsi->debit += 1;
2420                        cmd->type = TASK_MANAGEMENT;
2421                        list_add_tail(&cmd->list, &vscsi->schedule_q);
2422                        queue_work(vscsi->work_q, &cmd->work);
2423                        break;
2424
2425                case SRP_CMD:
2426                        dev_dbg(&vscsi->dev, "srp_cmd tag: %llu (0x%llx)\n",
2427                                srp->tag, srp->tag);
2428                        cmd->rsp.tag = srp->tag;
2429                        vscsi->debit += 1;
2430                        cmd->type = SCSI_CDB;
2431                        /*
2432                         * We want to keep track of work waiting for
2433                         * the workqueue.
2434                         */
2435                        list_add_tail(&cmd->list, &vscsi->schedule_q);
2436                        queue_work(vscsi->work_q, &cmd->work);
2437                        break;
2438
2439                case SRP_I_LOGOUT:
2440                        rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq);
2441                        break;
2442
2443                case SRP_CRED_RSP:
2444                case SRP_AER_RSP:
2445                default:
2446                        ibmvscsis_free_cmd_resources(vscsi, cmd);
2447                        dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n",
2448                                (uint)srp->opcode);
2449                        ibmvscsis_post_disconnect(vscsi,
2450                                                  ERR_DISCONNECT_RECONNECT, 0);
2451                        break;
2452                }
2453        } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) {
2454                rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2455        } else {
2456                ibmvscsis_free_cmd_resources(vscsi, cmd);
2457                dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n",
2458                        vscsi->state);
2459                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2460        }
2461}
2462
2463/**
2464 * ibmvscsis_ping_response() - Respond to a ping request
2465 * @vscsi:      Pointer to our adapter structure
2466 *
2467 * Let the client know that the server is alive and waiting on
2468 * its native I/O stack.
2469 * If any type of error occurs from the call to queue a ping
2470 * response then the client is either not accepting or receiving
2471 * interrupts.  Disconnect with an error.
2472 *
2473 * EXECUTION ENVIRONMENT:
2474 *      Interrupt, interrupt lock held
2475 */
2476static long ibmvscsis_ping_response(struct scsi_info *vscsi)
2477{
2478        struct viosrp_crq *crq;
2479        u64 buffer[2] = { 0, 0 };
2480        long rc;
2481
2482        crq = (struct viosrp_crq *)&buffer;
2483        crq->valid = VALID_CMD_RESP_EL;
2484        crq->format = (u8)MESSAGE_IN_CRQ;
2485        crq->status = PING_RESPONSE;
2486
2487        rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
2488                        cpu_to_be64(buffer[MSG_LOW]));
2489
2490        switch (rc) {
2491        case H_SUCCESS:
2492                break;
2493        case H_CLOSED:
2494                vscsi->flags |= CLIENT_FAILED;
2495        case H_DROPPED:
2496                vscsi->flags |= RESPONSE_Q_DOWN;
2497        case H_REMOTE_PARM:
2498                dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
2499                        rc);
2500                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2501                break;
2502        default:
2503                dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n",
2504                        rc);
2505                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2506                break;
2507        }
2508
2509        return rc;
2510}
2511
2512/**
2513 * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
2514 * @vscsi:      Pointer to our adapter structure
2515 * @crq:        Pointer to CRQ element containing the SRP request
2516 *
2517 * This function will return success if the command queue element is valid
2518 * and the srp iu or MAD request it pointed to was also valid.  That does
2519 * not mean that an error was not returned to the client.
2520 *
2521 * EXECUTION ENVIRONMENT:
2522 *      Interrupt, intr lock held
2523 */
2524static long ibmvscsis_parse_command(struct scsi_info *vscsi,
2525                                    struct viosrp_crq *crq)
2526{
2527        long rc = ADAPT_SUCCESS;
2528
2529        switch (crq->valid) {
2530        case VALID_CMD_RESP_EL:
2531                switch (crq->format) {
2532                case OS400_FORMAT:
2533                case AIX_FORMAT:
2534                case LINUX_FORMAT:
2535                case MAD_FORMAT:
2536                        if (vscsi->flags & PROCESSING_MAD) {
2537                                rc = ERROR;
2538                                dev_err(&vscsi->dev, "parse_command: already processing mad\n");
2539                                ibmvscsis_post_disconnect(vscsi,
2540                                                       ERR_DISCONNECT_RECONNECT,
2541                                                       0);
2542                        } else {
2543                                vscsi->flags |= PROCESSING_MAD;
2544                                rc = ibmvscsis_mad(vscsi, crq);
2545                        }
2546                        break;
2547
2548                case SRP_FORMAT:
2549                        ibmvscsis_srp_cmd(vscsi, crq);
2550                        break;
2551
2552                case MESSAGE_IN_CRQ:
2553                        if (crq->status == PING)
2554                                ibmvscsis_ping_response(vscsi);
2555                        break;
2556
2557                default:
2558                        dev_err(&vscsi->dev, "parse_command: invalid format %d\n",
2559                                (uint)crq->format);
2560                        ibmvscsis_post_disconnect(vscsi,
2561                                                  ERR_DISCONNECT_RECONNECT, 0);
2562                        break;
2563                }
2564                break;
2565
2566        case VALID_TRANS_EVENT:
2567                rc = ibmvscsis_trans_event(vscsi, crq);
2568                break;
2569
2570        case VALID_INIT_MSG:
2571                rc = ibmvscsis_init_msg(vscsi, crq);
2572                break;
2573
2574        default:
2575                dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n",
2576                        (uint)crq->valid);
2577                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2578                break;
2579        }
2580
2581        /*
2582         * Return only what the interrupt handler cares
2583         * about. Most errors we keep right on trucking.
2584         */
2585        rc = vscsi->flags & SCHEDULE_DISCONNECT;
2586
2587        return rc;
2588}
2589
2590static int read_dma_window(struct scsi_info *vscsi)
2591{
2592        struct vio_dev *vdev = vscsi->dma_dev;
2593        const __be32 *dma_window;
2594        const __be32 *prop;
2595
2596        /* TODO Using of_parse_dma_window would be better, but it doesn't give
2597         * a way to read multiple windows without already knowing the size of
2598         * a window or the number of windows.
2599         */
2600        dma_window = (const __be32 *)vio_get_attribute(vdev,
2601                                                       "ibm,my-dma-window",
2602                                                       NULL);
2603        if (!dma_window) {
2604                dev_err(&vscsi->dev, "Couldn't find ibm,my-dma-window property\n");
2605                return -1;
2606        }
2607
2608        vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window);
2609        dma_window++;
2610
2611        prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2612                                                 NULL);
2613        if (!prop) {
2614                dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-address-cells property\n");
2615                dma_window++;
2616        } else {
2617                dma_window += be32_to_cpu(*prop);
2618        }
2619
2620        prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2621                                                 NULL);
2622        if (!prop) {
2623                dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-size-cells property\n");
2624                dma_window++;
2625        } else {
2626                dma_window += be32_to_cpu(*prop);
2627        }
2628
2629        /* dma_window should point to the second window now */
2630        vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window);
2631
2632        return 0;
2633}
2634
2635static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name)
2636{
2637        struct ibmvscsis_tport *tport = NULL;
2638        struct vio_dev *vdev;
2639        struct scsi_info *vscsi;
2640
2641        spin_lock_bh(&ibmvscsis_dev_lock);
2642        list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) {
2643                vdev = vscsi->dma_dev;
2644                if (!strcmp(dev_name(&vdev->dev), name)) {
2645                        tport = &vscsi->tport;
2646                        break;
2647                }
2648        }
2649        spin_unlock_bh(&ibmvscsis_dev_lock);
2650
2651        return tport;
2652}
2653
2654/**
2655 * ibmvscsis_parse_cmd() - Parse SRP Command
2656 * @vscsi:      Pointer to our adapter structure
2657 * @cmd:        Pointer to command element with SRP command
2658 *
2659 * Parse the srp command; if it is valid then submit it to tcm.
2660 * Note: The return code does not reflect the status of the SCSI CDB.
2661 *
2662 * EXECUTION ENVIRONMENT:
2663 *      Process level
2664 */
2665static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
2666                                struct ibmvscsis_cmd *cmd)
2667{
2668        struct iu_entry *iue = cmd->iue;
2669        struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
2670        struct ibmvscsis_nexus *nexus;
2671        u64 data_len = 0;
2672        enum dma_data_direction dir;
2673        int attr = 0;
2674        int rc = 0;
2675
2676        nexus = vscsi->tport.ibmv_nexus;
2677        /*
2678         * additional length in bytes.  Note that the SRP spec says that
2679         * additional length is in 4-byte words, but technically the
2680         * additional length field is only the upper 6 bits of the byte.
2681         * The lower 2 bits are reserved.  If the lower 2 bits are 0 (as
2682         * all reserved fields should be), then interpreting the byte as
2683         * an int will yield the length in bytes.
2684         */
2685        if (srp->add_cdb_len & 0x03) {
2686                dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n");
2687                spin_lock_bh(&vscsi->intr_lock);
2688                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2689                ibmvscsis_free_cmd_resources(vscsi, cmd);
2690                spin_unlock_bh(&vscsi->intr_lock);
2691                return;
2692        }
2693
2694        if (srp_get_desc_table(srp, &dir, &data_len)) {
2695                dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
2696                        srp->tag);
2697                goto fail;
2698        }
2699
2700        cmd->rsp.sol_not = srp->sol_not;
2701
2702        switch (srp->task_attr) {
2703        case SRP_SIMPLE_TASK:
2704                attr = TCM_SIMPLE_TAG;
2705                break;
2706        case SRP_ORDERED_TASK:
2707                attr = TCM_ORDERED_TAG;
2708                break;
2709        case SRP_HEAD_TASK:
2710                attr = TCM_HEAD_TAG;
2711                break;
2712        case SRP_ACA_TASK:
2713                attr = TCM_ACA_TAG;
2714                break;
2715        default:
2716                dev_err(&vscsi->dev, "Invalid task attribute %d\n",
2717                        srp->task_attr);
2718                goto fail;
2719        }
2720
2721        cmd->se_cmd.tag = be64_to_cpu(srp->tag);
2722
2723        spin_lock_bh(&vscsi->intr_lock);
2724        list_add_tail(&cmd->list, &vscsi->active_q);
2725        spin_unlock_bh(&vscsi->intr_lock);
2726
2727        srp->lun.scsi_lun[0] &= 0x3f;
2728
2729        rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
2730                               cmd->sense_buf, scsilun_to_int(&srp->lun),
2731                               data_len, attr, dir, 0);
2732        if (rc) {
2733                dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
2734                spin_lock_bh(&vscsi->intr_lock);
2735                list_del(&cmd->list);
2736                ibmvscsis_free_cmd_resources(vscsi, cmd);
2737                spin_unlock_bh(&vscsi->intr_lock);
2738                goto fail;
2739        }
2740        return;
2741
2742fail:
2743        spin_lock_bh(&vscsi->intr_lock);
2744        ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2745        spin_unlock_bh(&vscsi->intr_lock);
2746}
2747
2748/**
2749 * ibmvscsis_parse_task() - Parse SRP Task Management Request
2750 * @vscsi:      Pointer to our adapter structure
2751 * @cmd:        Pointer to command element with SRP task management request
2752 *
2753 * Parse the srp task management request; if it is valid then submit it to tcm.
2754 * Note: The return code does not reflect the status of the task management
2755 * request.
2756 *
2757 * EXECUTION ENVIRONMENT:
2758 *      Processor level
2759 */
2760static void ibmvscsis_parse_task(struct scsi_info *vscsi,
2761                                 struct ibmvscsis_cmd *cmd)
2762{
2763        struct iu_entry *iue = cmd->iue;
2764        struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
2765        int tcm_type;
2766        u64 tag_to_abort = 0;
2767        int rc = 0;
2768        struct ibmvscsis_nexus *nexus;
2769
2770        nexus = vscsi->tport.ibmv_nexus;
2771
2772        cmd->rsp.sol_not = srp_tsk->sol_not;
2773
2774        switch (srp_tsk->tsk_mgmt_func) {
2775        case SRP_TSK_ABORT_TASK:
2776                tcm_type = TMR_ABORT_TASK;
2777                tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
2778                break;
2779        case SRP_TSK_ABORT_TASK_SET:
2780                tcm_type = TMR_ABORT_TASK_SET;
2781                break;
2782        case SRP_TSK_CLEAR_TASK_SET:
2783                tcm_type = TMR_CLEAR_TASK_SET;
2784                break;
2785        case SRP_TSK_LUN_RESET:
2786                tcm_type = TMR_LUN_RESET;
2787                break;
2788        case SRP_TSK_CLEAR_ACA:
2789                tcm_type = TMR_CLEAR_ACA;
2790                break;
2791        default:
2792                dev_err(&vscsi->dev, "unknown task mgmt func %d\n",
2793                        srp_tsk->tsk_mgmt_func);
2794                cmd->se_cmd.se_tmr_req->response =
2795                        TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
2796                rc = -1;
2797                break;
2798        }
2799
2800        if (!rc) {
2801                cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag);
2802
2803                spin_lock_bh(&vscsi->intr_lock);
2804                list_add_tail(&cmd->list, &vscsi->active_q);
2805                spin_unlock_bh(&vscsi->intr_lock);
2806
2807                srp_tsk->lun.scsi_lun[0] &= 0x3f;
2808
2809                dev_dbg(&vscsi->dev, "calling submit_tmr, func %d\n",
2810                        srp_tsk->tsk_mgmt_func);
2811                rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
2812                                       scsilun_to_int(&srp_tsk->lun), srp_tsk,
2813                                       tcm_type, GFP_KERNEL, tag_to_abort, 0);
2814                if (rc) {
2815                        dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
2816                                rc);
2817                        spin_lock_bh(&vscsi->intr_lock);
2818                        list_del(&cmd->list);
2819                        spin_unlock_bh(&vscsi->intr_lock);
2820                        cmd->se_cmd.se_tmr_req->response =
2821                                TMR_FUNCTION_REJECTED;
2822                }
2823        }
2824
2825        if (rc)
2826                transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0);
2827}
2828
2829static void ibmvscsis_scheduler(struct work_struct *work)
2830{
2831        struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd,
2832                                                 work);
2833        struct scsi_info *vscsi = cmd->adapter;
2834
2835        spin_lock_bh(&vscsi->intr_lock);
2836
2837        /* Remove from schedule_q */
2838        list_del(&cmd->list);
2839
2840        /* Don't submit cmd if we're disconnecting */
2841        if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) {
2842                ibmvscsis_free_cmd_resources(vscsi, cmd);
2843
2844                /* ibmvscsis_disconnect might be waiting for us */
2845                if (list_empty(&vscsi->active_q) &&
2846                    list_empty(&vscsi->schedule_q) &&
2847                    (vscsi->flags & WAIT_FOR_IDLE)) {
2848                        vscsi->flags &= ~WAIT_FOR_IDLE;
2849                        complete(&vscsi->wait_idle);
2850                }
2851
2852                spin_unlock_bh(&vscsi->intr_lock);
2853                return;
2854        }
2855
2856        spin_unlock_bh(&vscsi->intr_lock);
2857
2858        switch (cmd->type) {
2859        case SCSI_CDB:
2860                ibmvscsis_parse_cmd(vscsi, cmd);
2861                break;
2862        case TASK_MANAGEMENT:
2863                ibmvscsis_parse_task(vscsi, cmd);
2864                break;
2865        default:
2866                dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n",
2867                        cmd->type);
2868                spin_lock_bh(&vscsi->intr_lock);
2869                ibmvscsis_free_cmd_resources(vscsi, cmd);
2870                spin_unlock_bh(&vscsi->intr_lock);
2871                break;
2872        }
2873}
2874
2875static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num)
2876{
2877        struct ibmvscsis_cmd *cmd;
2878        int i;
2879
2880        INIT_LIST_HEAD(&vscsi->free_cmd);
2881        vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd),
2882                                  GFP_KERNEL);
2883        if (!vscsi->cmd_pool)
2884                return -ENOMEM;
2885
2886        for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
2887             i++, cmd++) {
2888                cmd->abort_cmd = NULL;
2889                cmd->adapter = vscsi;
2890                INIT_WORK(&cmd->work, ibmvscsis_scheduler);
2891                list_add_tail(&cmd->list, &vscsi->free_cmd);
2892        }
2893
2894        return 0;
2895}
2896
2897static void ibmvscsis_free_cmds(struct scsi_info *vscsi)
2898{
2899        kfree(vscsi->cmd_pool);
2900        vscsi->cmd_pool = NULL;
2901        INIT_LIST_HEAD(&vscsi->free_cmd);
2902}
2903
2904/**
2905 * ibmvscsis_service_wait_q() - Service Waiting Queue
2906 * @timer:      Pointer to timer which has expired
2907 *
2908 * This routine is called when the timer pops to service the waiting
2909 * queue. Elements on the queue have completed, their responses have been
2910 * copied to the client, but the client's response queue was full so
2911 * the queue message could not be sent. The routine grabs the proper locks
2912 * and calls send messages.
2913 *
2914 * EXECUTION ENVIRONMENT:
2915 *      called at interrupt level
2916 */
2917static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer)
2918{
2919        struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer);
2920        struct scsi_info *vscsi = container_of(p_timer, struct scsi_info,
2921                                               rsp_q_timer);
2922
2923        spin_lock_bh(&vscsi->intr_lock);
2924        p_timer->timer_pops += 1;
2925        p_timer->started = false;
2926        ibmvscsis_send_messages(vscsi);
2927        spin_unlock_bh(&vscsi->intr_lock);
2928
2929        return HRTIMER_NORESTART;
2930}
2931
2932static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
2933{
2934        struct timer_cb *p_timer;
2935
2936        p_timer = &vscsi->rsp_q_timer;
2937        hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2938
2939        p_timer->timer.function = ibmvscsis_service_wait_q;
2940        p_timer->started = false;
2941        p_timer->timer_pops = 0;
2942
2943        return ADAPT_SUCCESS;
2944}
2945
2946static void ibmvscsis_freetimer(struct scsi_info *vscsi)
2947{
2948        struct timer_cb *p_timer;
2949
2950        p_timer = &vscsi->rsp_q_timer;
2951
2952        (void)hrtimer_cancel(&p_timer->timer);
2953
2954        p_timer->started = false;
2955        p_timer->timer_pops = 0;
2956}
2957
2958static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
2959{
2960        struct scsi_info *vscsi = data;
2961
2962        vio_disable_interrupts(vscsi->dma_dev);
2963        tasklet_schedule(&vscsi->work_task);
2964
2965        return IRQ_HANDLED;
2966}
2967
2968/**
2969 * ibmvscsis_enable_change_state() - Set new state based on enabled status
2970 * @vscsi:      Pointer to our adapter structure
2971 *
2972 * This function determines our new state now that we are enabled.  This
2973 * may involve sending an Init Complete message to the client.
2974 *
2975 * Must be called with interrupt lock held.
2976 */
2977static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
2978{
2979        int bytes;
2980        long rc = ADAPT_SUCCESS;
2981
2982        bytes = vscsi->cmd_q.size * PAGE_SIZE;
2983        rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
2984        if (rc == H_CLOSED || rc == H_SUCCESS) {
2985                vscsi->state = WAIT_CONNECTION;
2986                rc = ibmvscsis_establish_new_q(vscsi);
2987        }
2988
2989        if (rc != ADAPT_SUCCESS) {
2990                vscsi->state = ERR_DISCONNECTED;
2991                vscsi->flags |= RESPONSE_Q_DOWN;
2992        }
2993
2994        return rc;
2995}
2996
2997/**
2998 * ibmvscsis_create_command_q() - Create Command Queue
2999 * @vscsi:      Pointer to our adapter structure
3000 * @num_cmds:   Currently unused.  In the future, may be used to determine
3001 *              the size of the CRQ.
3002 *
3003 * Allocates memory for command queue maps remote memory into an ioba
3004 * initializes the command response queue
3005 *
3006 * EXECUTION ENVIRONMENT:
3007 *      Process level only
3008 */
3009static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
3010{
3011        int pages;
3012        struct vio_dev *vdev = vscsi->dma_dev;
3013
3014        /* We might support multiple pages in the future, but just 1 for now */
3015        pages = 1;
3016
3017        vscsi->cmd_q.size = pages;
3018
3019        vscsi->cmd_q.base_addr =
3020                (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
3021        if (!vscsi->cmd_q.base_addr)
3022                return -ENOMEM;
3023
3024        vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1;
3025
3026        vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev,
3027                                                vscsi->cmd_q.base_addr,
3028                                                PAGE_SIZE, DMA_BIDIRECTIONAL);
3029        if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) {
3030                free_page((unsigned long)vscsi->cmd_q.base_addr);
3031                return -ENOMEM;
3032        }
3033
3034        return 0;
3035}
3036
3037/**
3038 * ibmvscsis_destroy_command_q - Destroy Command Queue
3039 * @vscsi:      Pointer to our adapter structure
3040 *
3041 * Releases memory for command queue and unmaps mapped remote memory.
3042 *
3043 * EXECUTION ENVIRONMENT:
3044 *      Process level only
3045 */
3046static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi)
3047{
3048        dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token,
3049                         PAGE_SIZE, DMA_BIDIRECTIONAL);
3050        free_page((unsigned long)vscsi->cmd_q.base_addr);
3051        vscsi->cmd_q.base_addr = NULL;
3052        vscsi->state = NO_QUEUE;
3053}
3054
3055static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi,
3056                              struct ibmvscsis_cmd *cmd)
3057{
3058        struct iu_entry *iue = cmd->iue;
3059        struct se_cmd *se_cmd = &cmd->se_cmd;
3060        struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
3061        struct scsi_sense_hdr sshdr;
3062        u8 rc = se_cmd->scsi_status;
3063
3064        if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb)))
3065                if (scsi_normalize_sense(se_cmd->sense_buffer,
3066                                         se_cmd->scsi_sense_length, &sshdr))
3067                        if (sshdr.sense_key == HARDWARE_ERROR &&
3068                            (se_cmd->residual_count == 0 ||
3069                             se_cmd->residual_count == se_cmd->data_length)) {
3070                                rc = NO_SENSE;
3071                                cmd->flags |= CMD_FAST_FAIL;
3072                        }
3073
3074        return rc;
3075}
3076
3077/**
3078 * srp_build_response() - Build an SRP response buffer
3079 * @vscsi:      Pointer to our adapter structure
3080 * @cmd:        Pointer to command for which to send the response
3081 * @len_p:      Where to return the length of the IU response sent.  This
3082 *              is needed to construct the CRQ response.
3083 *
3084 * Build the SRP response buffer and copy it to the client's memory space.
3085 */
3086static long srp_build_response(struct scsi_info *vscsi,
3087                               struct ibmvscsis_cmd *cmd, uint *len_p)
3088{
3089        struct iu_entry *iue = cmd->iue;
3090        struct se_cmd *se_cmd = &cmd->se_cmd;
3091        struct srp_rsp *rsp;
3092        uint len;
3093        u32 rsp_code;
3094        char *data;
3095        u32 *tsk_status;
3096        long rc = ADAPT_SUCCESS;
3097
3098        spin_lock_bh(&vscsi->intr_lock);
3099
3100        rsp = &vio_iu(iue)->srp.rsp;
3101        len = sizeof(*rsp);
3102        memset(rsp, 0, len);
3103        data = rsp->data;
3104
3105        rsp->opcode = SRP_RSP;
3106
3107        rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
3108        rsp->tag = cmd->rsp.tag;
3109        rsp->flags = 0;
3110
3111        if (cmd->type == SCSI_CDB) {
3112                rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
3113                if (rsp->status) {
3114                        dev_dbg(&vscsi->dev, "build_resp: cmd %p, scsi status %d\n",
3115                                cmd, (int)rsp->status);
3116                        ibmvscsis_determine_resid(se_cmd, rsp);
3117                        if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
3118                                rsp->sense_data_len =
3119                                        cpu_to_be32(se_cmd->scsi_sense_length);
3120                                rsp->flags |= SRP_RSP_FLAG_SNSVALID;
3121                                len += se_cmd->scsi_sense_length;
3122                                memcpy(data, se_cmd->sense_buffer,
3123                                       se_cmd->scsi_sense_length);
3124                        }
3125                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3126                                UCSOLNT_RESP_SHIFT;
3127                } else if (cmd->flags & CMD_FAST_FAIL) {
3128                        dev_dbg(&vscsi->dev, "build_resp: cmd %p, fast fail\n",
3129                                cmd);
3130                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3131                                UCSOLNT_RESP_SHIFT;
3132                } else {
3133                        rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3134                                SCSOLNT_RESP_SHIFT;
3135                }
3136        } else {
3137                /* this is task management */
3138                rsp->status = 0;
3139                rsp->resp_data_len = cpu_to_be32(4);
3140                rsp->flags |= SRP_RSP_FLAG_RSPVALID;
3141
3142                switch (se_cmd->se_tmr_req->response) {
3143                case TMR_FUNCTION_COMPLETE:
3144                case TMR_TASK_DOES_NOT_EXIST:
3145                        rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE;
3146                        rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3147                                SCSOLNT_RESP_SHIFT;
3148                        break;
3149                case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3150                case TMR_LUN_DOES_NOT_EXIST:
3151                        rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED;
3152                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3153                                UCSOLNT_RESP_SHIFT;
3154                        break;
3155                case TMR_FUNCTION_FAILED:
3156                case TMR_FUNCTION_REJECTED:
3157                default:
3158                        rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED;
3159                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3160                                UCSOLNT_RESP_SHIFT;
3161                        break;
3162                }
3163
3164                tsk_status = (u32 *)data;
3165                *tsk_status = cpu_to_be32(rsp_code);
3166                data = (char *)(tsk_status + 1);
3167                len += 4;
3168        }
3169
3170        dma_wmb();
3171        rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
3172                         vscsi->dds.window[REMOTE].liobn,
3173                         be64_to_cpu(iue->remote_token));
3174
3175        switch (rc) {
3176        case H_SUCCESS:
3177                vscsi->credit = 0;
3178                *len_p = len;
3179                break;
3180        case H_PERMISSION:
3181                if (connection_broken(vscsi))
3182                        vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED;
3183
3184                dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n",
3185                        rc, vscsi->flags, vscsi->state);
3186                break;
3187        case H_SOURCE_PARM:
3188        case H_DEST_PARM:
3189        default:
3190                dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n",
3191                        rc);
3192                break;
3193        }
3194
3195        spin_unlock_bh(&vscsi->intr_lock);
3196
3197        return rc;
3198}
3199
3200static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
3201                          int nsg, struct srp_direct_buf *md, int nmd,
3202                          enum dma_data_direction dir, unsigned int bytes)
3203{
3204        struct iu_entry *iue = cmd->iue;
3205        struct srp_target *target = iue->target;
3206        struct scsi_info *vscsi = target->ldata;
3207        struct scatterlist *sgp;
3208        dma_addr_t client_ioba, server_ioba;
3209        ulong buf_len;
3210        ulong client_len, server_len;
3211        int md_idx;
3212        long tx_len;
3213        long rc = 0;
3214
3215        if (bytes == 0)
3216                return 0;
3217
3218        sgp = sg;
3219        client_len = 0;
3220        server_len = 0;
3221        md_idx = 0;
3222        tx_len = bytes;
3223
3224        do {
3225                if (client_len == 0) {
3226                        if (md_idx >= nmd) {
3227                                dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n");
3228                                rc = -EIO;
3229                                break;
3230                        }
3231                        client_ioba = be64_to_cpu(md[md_idx].va);
3232                        client_len = be32_to_cpu(md[md_idx].len);
3233                }
3234                if (server_len == 0) {
3235                        if (!sgp) {
3236                                dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n");
3237                                rc = -EIO;
3238                                break;
3239                        }
3240                        server_ioba = sg_dma_address(sgp);
3241                        server_len = sg_dma_len(sgp);
3242                }
3243
3244                buf_len = tx_len;
3245
3246                if (buf_len > client_len)
3247                        buf_len = client_len;
3248
3249                if (buf_len > server_len)
3250                        buf_len = server_len;
3251
3252                if (buf_len > max_vdma_size)
3253                        buf_len = max_vdma_size;
3254
3255                if (dir == DMA_TO_DEVICE) {
3256                        /* read from client */
3257                        rc = h_copy_rdma(buf_len,
3258                                         vscsi->dds.window[REMOTE].liobn,
3259                                         client_ioba,
3260                                         vscsi->dds.window[LOCAL].liobn,
3261                                         server_ioba);
3262                } else {
3263                        /* The h_copy_rdma will cause phyp, running in another
3264                         * partition, to read memory, so we need to make sure
3265                         * the data has been written out, hence these syncs.
3266                         */
3267                        /* ensure that everything is in memory */
3268                        isync();
3269                        /* ensure that memory has been made visible */
3270                        dma_wmb();
3271                        rc = h_copy_rdma(buf_len,
3272                                         vscsi->dds.window[LOCAL].liobn,
3273                                         server_ioba,
3274                                         vscsi->dds.window[REMOTE].liobn,
3275                                         client_ioba);
3276                }
3277                switch (rc) {
3278                case H_SUCCESS:
3279                        break;
3280                case H_PERMISSION:
3281                case H_SOURCE_PARM:
3282                case H_DEST_PARM:
3283                        if (connection_broken(vscsi)) {
3284                                spin_lock_bh(&vscsi->intr_lock);
3285                                vscsi->flags |=
3286                                        (RESPONSE_Q_DOWN | CLIENT_FAILED);
3287                                spin_unlock_bh(&vscsi->intr_lock);
3288                        }
3289                        dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n",
3290                                rc);
3291                        break;
3292
3293                default:
3294                        dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n",
3295                                rc);
3296                        break;
3297                }
3298
3299                if (!rc) {
3300                        tx_len -= buf_len;
3301                        if (tx_len) {
3302                                client_len -= buf_len;
3303                                if (client_len == 0)
3304                                        md_idx++;
3305                                else
3306                                        client_ioba += buf_len;
3307
3308                                server_len -= buf_len;
3309                                if (server_len == 0)
3310                                        sgp = sg_next(sgp);
3311                                else
3312                                        server_ioba += buf_len;
3313                        } else {
3314                                break;
3315                        }
3316                }
3317        } while (!rc);
3318
3319        return rc;
3320}
3321
3322/**
3323 * ibmvscsis_handle_crq() - Handle CRQ
3324 * @data:       Pointer to our adapter structure
3325 *
3326 * Read the command elements from the command queue and copy the payloads
3327 * associated with the command elements to local memory and execute the
3328 * SRP requests.
3329 *
3330 * Note: this is an edge triggered interrupt. It can not be shared.
3331 */
3332static void ibmvscsis_handle_crq(unsigned long data)
3333{
3334        struct scsi_info *vscsi = (struct scsi_info *)data;
3335        struct viosrp_crq *crq;
3336        long rc;
3337        bool ack = true;
3338        volatile u8 valid;
3339
3340        spin_lock_bh(&vscsi->intr_lock);
3341
3342        dev_dbg(&vscsi->dev, "got interrupt\n");
3343
3344        /*
3345         * if we are in a path where we are waiting for all pending commands
3346         * to complete because we received a transport event and anything in
3347         * the command queue is for a new connection, do nothing
3348         */
3349        if (TARGET_STOP(vscsi)) {
3350                vio_enable_interrupts(vscsi->dma_dev);
3351
3352                dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
3353                        vscsi->flags, vscsi->state);
3354                spin_unlock_bh(&vscsi->intr_lock);
3355                return;
3356        }
3357
3358        rc = vscsi->flags & SCHEDULE_DISCONNECT;
3359        crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3360        valid = crq->valid;
3361        dma_rmb();
3362
3363        while (valid) {
3364                /*
3365                 * These are edege triggered interrupts. After dropping out of
3366                 * the while loop, the code must check for work since an
3367                 * interrupt could be lost, and an elment be left on the queue,
3368                 * hence the label.
3369                 */
3370cmd_work:
3371                vscsi->cmd_q.index =
3372                        (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
3373
3374                if (!rc) {
3375                        rc = ibmvscsis_parse_command(vscsi, crq);
3376                } else {
3377                        if ((uint)crq->valid == VALID_TRANS_EVENT) {
3378                                /*
3379                                 * must service the transport layer events even
3380                                 * in an error state, dont break out until all
3381                                 * the consecutive transport events have been
3382                                 * processed
3383                                 */
3384                                rc = ibmvscsis_trans_event(vscsi, crq);
3385                        } else if (vscsi->flags & TRANS_EVENT) {
3386                                /*
3387                                 * if a transport event has occurred leave
3388                                 * everything but transport events on the queue
3389                                 *
3390                                 * need to decrement the queue index so we can
3391                                 * look at the element again
3392                                 */
3393                                if (vscsi->cmd_q.index)
3394                                        vscsi->cmd_q.index -= 1;
3395                                else
3396                                        /*
3397                                         * index is at 0 it just wrapped.
3398                                         * have it index last element in q
3399                                         */
3400                                        vscsi->cmd_q.index = vscsi->cmd_q.mask;
3401                                break;
3402                        }
3403                }
3404
3405                crq->valid = INVALIDATE_CMD_RESP_EL;
3406
3407                crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3408                valid = crq->valid;
3409                dma_rmb();
3410        }
3411
3412        if (!rc) {
3413                if (ack) {
3414                        vio_enable_interrupts(vscsi->dma_dev);
3415                        ack = false;
3416                        dev_dbg(&vscsi->dev, "handle_crq, reenabling interrupts\n");
3417                }
3418                valid = crq->valid;
3419                dma_rmb();
3420                if (valid)
3421                        goto cmd_work;
3422        } else {
3423                dev_dbg(&vscsi->dev, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
3424                        vscsi->flags, vscsi->state, vscsi->cmd_q.index);
3425        }
3426
3427        dev_dbg(&vscsi->dev, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
3428                (int)list_empty(&vscsi->schedule_q), vscsi->flags,
3429                vscsi->state);
3430
3431        spin_unlock_bh(&vscsi->intr_lock);
3432}
3433
3434static int ibmvscsis_probe(struct vio_dev *vdev,
3435                           const struct vio_device_id *id)
3436{
3437        struct scsi_info *vscsi;
3438        int rc = 0;
3439        long hrc = 0;
3440        char wq_name[24];
3441
3442        vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
3443        if (!vscsi) {
3444                rc = -ENOMEM;
3445                dev_err(&vdev->dev, "probe: allocation of adapter failed\n");
3446                return rc;
3447        }
3448
3449        vscsi->dma_dev = vdev;
3450        vscsi->dev = vdev->dev;
3451        INIT_LIST_HEAD(&vscsi->schedule_q);
3452        INIT_LIST_HEAD(&vscsi->waiting_rsp);
3453        INIT_LIST_HEAD(&vscsi->active_q);
3454
3455        snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
3456                 dev_name(&vdev->dev));
3457
3458        dev_dbg(&vscsi->dev, "probe tport_name: %s\n", vscsi->tport.tport_name);
3459
3460        rc = read_dma_window(vscsi);
3461        if (rc)
3462                goto free_adapter;
3463        dev_dbg(&vscsi->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
3464                vscsi->dds.window[LOCAL].liobn,
3465                vscsi->dds.window[REMOTE].liobn);
3466
3467        snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
3468
3469        vscsi->dds.unit_id = vdev->unit_address;
3470        strscpy(vscsi->dds.partition_name, partition_name,
3471                sizeof(vscsi->dds.partition_name));
3472        vscsi->dds.partition_num = partition_number;
3473
3474        spin_lock_bh(&ibmvscsis_dev_lock);
3475        list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
3476        spin_unlock_bh(&ibmvscsis_dev_lock);
3477
3478        /*
3479         * TBD: How do we determine # of cmds to request?  Do we know how
3480         * many "children" we have?
3481         */
3482        vscsi->request_limit = INITIAL_SRP_LIMIT;
3483        rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit,
3484                              SRP_MAX_IU_LEN);
3485        if (rc)
3486                goto rem_list;
3487
3488        vscsi->target.ldata = vscsi;
3489
3490        rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit);
3491        if (rc) {
3492                dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n",
3493                        rc, vscsi->request_limit);
3494                goto free_target;
3495        }
3496
3497        /*
3498         * Note: the lock is used in freeing timers, so must initialize
3499         * first so that ordering in case of error is correct.
3500         */
3501        spin_lock_init(&vscsi->intr_lock);
3502
3503        rc = ibmvscsis_alloctimer(vscsi);
3504        if (rc) {
3505                dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc);
3506                goto free_cmds;
3507        }
3508
3509        rc = ibmvscsis_create_command_q(vscsi, 256);
3510        if (rc) {
3511                dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n",
3512                        rc);
3513                goto free_timer;
3514        }
3515
3516        vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
3517        if (!vscsi->map_buf) {
3518                rc = -ENOMEM;
3519                dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n");
3520                goto destroy_queue;
3521        }
3522
3523        vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
3524                                         DMA_BIDIRECTIONAL);
3525        if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
3526                rc = -ENOMEM;
3527                dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
3528                goto free_buf;
3529        }
3530
3531        hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
3532                       (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
3533                       0);
3534        if (hrc == H_SUCCESS)
3535                vscsi->client_data.partition_number =
3536                        be64_to_cpu(*(u64 *)vscsi->map_buf);
3537        /*
3538         * We expect the VIOCTL to fail if we're configured as "any
3539         * client can connect" and the client isn't activated yet.
3540         * We'll make the call again when he sends an init msg.
3541         */
3542        dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n",
3543                hrc, vscsi->client_data.partition_number);
3544
3545        tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
3546                     (unsigned long)vscsi);
3547
3548        init_completion(&vscsi->wait_idle);
3549        init_completion(&vscsi->unconfig);
3550
3551        snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
3552        vscsi->work_q = create_workqueue(wq_name);
3553        if (!vscsi->work_q) {
3554                rc = -ENOMEM;
3555                dev_err(&vscsi->dev, "create_workqueue failed\n");
3556                goto unmap_buf;
3557        }
3558
3559        rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi);
3560        if (rc) {
3561                rc = -EPERM;
3562                dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc);
3563                goto destroy_WQ;
3564        }
3565
3566        vscsi->state = WAIT_ENABLED;
3567
3568        dev_set_drvdata(&vdev->dev, vscsi);
3569
3570        return 0;
3571
3572destroy_WQ:
3573        destroy_workqueue(vscsi->work_q);
3574unmap_buf:
3575        dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3576                         DMA_BIDIRECTIONAL);
3577free_buf:
3578        kfree(vscsi->map_buf);
3579destroy_queue:
3580        tasklet_kill(&vscsi->work_task);
3581        ibmvscsis_unregister_command_q(vscsi);
3582        ibmvscsis_destroy_command_q(vscsi);
3583free_timer:
3584        ibmvscsis_freetimer(vscsi);
3585free_cmds:
3586        ibmvscsis_free_cmds(vscsi);
3587free_target:
3588        srp_target_free(&vscsi->target);
3589rem_list:
3590        spin_lock_bh(&ibmvscsis_dev_lock);
3591        list_del(&vscsi->list);
3592        spin_unlock_bh(&ibmvscsis_dev_lock);
3593free_adapter:
3594        kfree(vscsi);
3595
3596        return rc;
3597}
3598
3599static int ibmvscsis_remove(struct vio_dev *vdev)
3600{
3601        struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
3602
3603        dev_dbg(&vscsi->dev, "remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
3604
3605        spin_lock_bh(&vscsi->intr_lock);
3606        ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
3607        vscsi->flags |= CFG_SLEEPING;
3608        spin_unlock_bh(&vscsi->intr_lock);
3609        wait_for_completion(&vscsi->unconfig);
3610
3611        vio_disable_interrupts(vdev);
3612        free_irq(vdev->irq, vscsi);
3613        destroy_workqueue(vscsi->work_q);
3614        dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3615                         DMA_BIDIRECTIONAL);
3616        kfree(vscsi->map_buf);
3617        tasklet_kill(&vscsi->work_task);
3618        ibmvscsis_destroy_command_q(vscsi);
3619        ibmvscsis_freetimer(vscsi);
3620        ibmvscsis_free_cmds(vscsi);
3621        srp_target_free(&vscsi->target);
3622        spin_lock_bh(&ibmvscsis_dev_lock);
3623        list_del(&vscsi->list);
3624        spin_unlock_bh(&ibmvscsis_dev_lock);
3625        kfree(vscsi);
3626
3627        return 0;
3628}
3629
3630static ssize_t system_id_show(struct device *dev,
3631                              struct device_attribute *attr, char *buf)
3632{
3633        return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
3634}
3635
3636static ssize_t partition_number_show(struct device *dev,
3637                                     struct device_attribute *attr, char *buf)
3638{
3639        return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
3640}
3641
3642static ssize_t unit_address_show(struct device *dev,
3643                                 struct device_attribute *attr, char *buf)
3644{
3645        struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev);
3646
3647        return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address);
3648}
3649
3650static int ibmvscsis_get_system_info(void)
3651{
3652        struct device_node *rootdn, *vdevdn;
3653        const char *id, *model, *name;
3654        const uint *num;
3655
3656        rootdn = of_find_node_by_path("/");
3657        if (!rootdn)
3658                return -ENOENT;
3659
3660        model = of_get_property(rootdn, "model", NULL);
3661        id = of_get_property(rootdn, "system-id", NULL);
3662        if (model && id)
3663                snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
3664
3665        name = of_get_property(rootdn, "ibm,partition-name", NULL);
3666        if (name)
3667                strncpy(partition_name, name, sizeof(partition_name));
3668
3669        num = of_get_property(rootdn, "ibm,partition-no", NULL);
3670        if (num)
3671                partition_number = of_read_number(num, 1);
3672
3673        of_node_put(rootdn);
3674
3675        vdevdn = of_find_node_by_path("/vdevice");
3676        if (vdevdn) {
3677                const uint *mvds;
3678
3679                mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size",
3680                                       NULL);
3681                if (mvds)
3682                        max_vdma_size = *mvds;
3683                of_node_put(vdevdn);
3684        }
3685
3686        return 0;
3687}
3688
3689static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
3690{
3691        struct ibmvscsis_tport *tport =
3692                container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3693
3694        return tport->tport_name;
3695}
3696
3697static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg)
3698{
3699        struct ibmvscsis_tport *tport =
3700                container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3701
3702        return tport->tport_tpgt;
3703}
3704
3705static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg)
3706{
3707        return 1;
3708}
3709
3710static int ibmvscsis_check_true(struct se_portal_group *se_tpg)
3711{
3712        return 1;
3713}
3714
3715static int ibmvscsis_check_false(struct se_portal_group *se_tpg)
3716{
3717        return 0;
3718}
3719
3720static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg)
3721{
3722        return 1;
3723}
3724
3725static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd)
3726{
3727        return target_put_sess_cmd(se_cmd);
3728}
3729
3730static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
3731{
3732        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3733                                                 se_cmd);
3734        struct scsi_info *vscsi = cmd->adapter;
3735
3736        spin_lock_bh(&vscsi->intr_lock);
3737        /* Remove from active_q */
3738        list_move_tail(&cmd->list, &vscsi->waiting_rsp);
3739        ibmvscsis_send_messages(vscsi);
3740        spin_unlock_bh(&vscsi->intr_lock);
3741}
3742
3743static u32 ibmvscsis_sess_get_index(struct se_session *se_sess)
3744{
3745        return 0;
3746}
3747
3748static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3749{
3750        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3751                                                 se_cmd);
3752        struct scsi_info *vscsi = cmd->adapter;
3753        struct iu_entry *iue = cmd->iue;
3754        int rc;
3755
3756        /*
3757         * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success
3758         * since LIO can't do anything about it, and we dont want to
3759         * attempt an srp_transfer_data.
3760         */
3761        if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
3762                dev_err(&vscsi->dev, "write_pending failed since: %d\n",
3763                        vscsi->flags);
3764                return -EIO;
3765
3766        }
3767
3768        rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
3769                               1, 1);
3770        if (rc) {
3771                dev_err(&vscsi->dev, "srp_transfer_data() failed: %d\n", rc);
3772                return -EIO;
3773        }
3774        /*
3775         * We now tell TCM to add this WRITE CDB directly into the TCM storage
3776         * object execution queue.
3777         */
3778        target_execute_cmd(se_cmd);
3779        return 0;
3780}
3781
3782static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
3783{
3784}
3785
3786static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd)
3787{
3788        return 0;
3789}
3790
3791static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
3792{
3793        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3794                                                 se_cmd);
3795        struct iu_entry *iue = cmd->iue;
3796        struct scsi_info *vscsi = cmd->adapter;
3797        char *sd;
3798        uint len = 0;
3799        int rc;
3800
3801        rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
3802                               1);
3803        if (rc) {
3804                dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
3805                sd = se_cmd->sense_buffer;
3806                se_cmd->scsi_sense_length = 18;
3807                memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
3808                /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
3809                scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR,
3810                                        0x08, 0x01);
3811        }
3812
3813        srp_build_response(vscsi, cmd, &len);
3814        cmd->rsp.format = SRP_FORMAT;
3815        cmd->rsp.len = len;
3816
3817        return 0;
3818}
3819
3820static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
3821{
3822        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3823                                                 se_cmd);
3824        struct scsi_info *vscsi = cmd->adapter;
3825        uint len;
3826
3827        dev_dbg(&vscsi->dev, "queue_status %p\n", se_cmd);
3828
3829        srp_build_response(vscsi, cmd, &len);
3830        cmd->rsp.format = SRP_FORMAT;
3831        cmd->rsp.len = len;
3832
3833        return 0;
3834}
3835
3836static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
3837{
3838        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3839                                                 se_cmd);
3840        struct scsi_info *vscsi = cmd->adapter;
3841        struct ibmvscsis_cmd *cmd_itr;
3842        struct iu_entry *iue = iue = cmd->iue;
3843        struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
3844        u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
3845        uint len;
3846
3847        dev_dbg(&vscsi->dev, "queue_tm_rsp %p, status %d\n",
3848                se_cmd, (int)se_cmd->se_tmr_req->response);
3849
3850        if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
3851            cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
3852                spin_lock_bh(&vscsi->intr_lock);
3853                list_for_each_entry(cmd_itr, &vscsi->active_q, list) {
3854                        if (tag_to_abort == cmd_itr->se_cmd.tag) {
3855                                cmd_itr->abort_cmd = cmd;
3856                                cmd->flags |= DELAY_SEND;
3857                                break;
3858                        }
3859                }
3860                spin_unlock_bh(&vscsi->intr_lock);
3861        }
3862
3863        srp_build_response(vscsi, cmd, &len);
3864        cmd->rsp.format = SRP_FORMAT;
3865        cmd->rsp.len = len;
3866}
3867
3868static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
3869{
3870        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3871                                                 se_cmd);
3872        struct scsi_info *vscsi = cmd->adapter;
3873
3874        dev_dbg(&vscsi->dev, "ibmvscsis_aborted_task %p task_tag: %llu\n",
3875                se_cmd, se_cmd->tag);
3876}
3877
3878static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
3879                                           struct config_group *group,
3880                                           const char *name)
3881{
3882        struct ibmvscsis_tport *tport;
3883        struct scsi_info *vscsi;
3884
3885        tport = ibmvscsis_lookup_port(name);
3886        if (tport) {
3887                vscsi = container_of(tport, struct scsi_info, tport);
3888                tport->tport_proto_id = SCSI_PROTOCOL_SRP;
3889                dev_dbg(&vscsi->dev, "make_tport(%s), pointer:%p, tport_id:%x\n",
3890                        name, tport, tport->tport_proto_id);
3891                return &tport->tport_wwn;
3892        }
3893
3894        return ERR_PTR(-EINVAL);
3895}
3896
3897static void ibmvscsis_drop_tport(struct se_wwn *wwn)
3898{
3899        struct ibmvscsis_tport *tport = container_of(wwn,
3900                                                     struct ibmvscsis_tport,
3901                                                     tport_wwn);
3902        struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
3903
3904        dev_dbg(&vscsi->dev, "drop_tport(%s)\n",
3905                config_item_name(&tport->tport_wwn.wwn_group.cg_item));
3906}
3907
3908static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
3909                                                  const char *name)
3910{
3911        struct ibmvscsis_tport *tport =
3912                container_of(wwn, struct ibmvscsis_tport, tport_wwn);
3913        u16 tpgt;
3914        int rc;
3915
3916        if (strstr(name, "tpgt_") != name)
3917                return ERR_PTR(-EINVAL);
3918        rc = kstrtou16(name + 5, 0, &tpgt);
3919        if (rc)
3920                return ERR_PTR(rc);
3921        tport->tport_tpgt = tpgt;
3922
3923        tport->releasing = false;
3924
3925        rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
3926                               tport->tport_proto_id);
3927        if (rc)
3928                return ERR_PTR(rc);
3929
3930        return &tport->se_tpg;
3931}
3932
3933static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg)
3934{
3935        struct ibmvscsis_tport *tport = container_of(se_tpg,
3936                                                     struct ibmvscsis_tport,
3937                                                     se_tpg);
3938
3939        tport->releasing = true;
3940        tport->enabled = false;
3941
3942        /*
3943         * Release the virtual I_T Nexus for this ibmvscsis TPG
3944         */
3945        ibmvscsis_drop_nexus(tport);
3946        /*
3947         * Deregister the se_tpg from TCM..
3948         */
3949        core_tpg_deregister(se_tpg);
3950}
3951
3952static ssize_t ibmvscsis_wwn_version_show(struct config_item *item,
3953                                          char *page)
3954{
3955        return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION);
3956}
3957CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version);
3958
3959static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
3960        &ibmvscsis_wwn_attr_version,
3961        NULL,
3962};
3963
3964static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item,
3965                                         char *page)
3966{
3967        struct se_portal_group *se_tpg = to_tpg(item);
3968        struct ibmvscsis_tport *tport = container_of(se_tpg,
3969                                                     struct ibmvscsis_tport,
3970                                                     se_tpg);
3971
3972        return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
3973}
3974
3975static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
3976                                          const char *page, size_t count)
3977{
3978        struct se_portal_group *se_tpg = to_tpg(item);
3979        struct ibmvscsis_tport *tport = container_of(se_tpg,
3980                                                     struct ibmvscsis_tport,
3981                                                     se_tpg);
3982        struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
3983        unsigned long tmp;
3984        int rc;
3985        long lrc;
3986
3987        rc = kstrtoul(page, 0, &tmp);
3988        if (rc < 0) {
3989                dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n");
3990                return -EINVAL;
3991        }
3992
3993        if ((tmp != 0) && (tmp != 1)) {
3994                dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n");
3995                return -EINVAL;
3996        }
3997
3998        if (tmp) {
3999                spin_lock_bh(&vscsi->intr_lock);
4000                tport->enabled = true;
4001                lrc = ibmvscsis_enable_change_state(vscsi);
4002                if (lrc)
4003                        dev_err(&vscsi->dev, "enable_change_state failed, rc %ld state %d\n",
4004                                lrc, vscsi->state);
4005                spin_unlock_bh(&vscsi->intr_lock);
4006        } else {
4007                spin_lock_bh(&vscsi->intr_lock);
4008                tport->enabled = false;
4009                /* This simulates the server going down */
4010                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
4011                spin_unlock_bh(&vscsi->intr_lock);
4012        }
4013
4014        dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp,
4015                vscsi->state);
4016
4017        return count;
4018}
4019CONFIGFS_ATTR(ibmvscsis_tpg_, enable);
4020
4021static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
4022        &ibmvscsis_tpg_attr_enable,
4023        NULL,
4024};
4025
4026static const struct target_core_fabric_ops ibmvscsis_ops = {
4027        .module                         = THIS_MODULE,
4028        .fabric_name                    = "ibmvscsis",
4029        .max_data_sg_nents              = MAX_TXU / PAGE_SIZE,
4030        .tpg_get_wwn                    = ibmvscsis_get_fabric_wwn,
4031        .tpg_get_tag                    = ibmvscsis_get_tag,
4032        .tpg_get_default_depth          = ibmvscsis_get_default_depth,
4033        .tpg_check_demo_mode            = ibmvscsis_check_true,
4034        .tpg_check_demo_mode_cache      = ibmvscsis_check_true,
4035        .tpg_check_demo_mode_write_protect = ibmvscsis_check_false,
4036        .tpg_check_prod_mode_write_protect = ibmvscsis_check_false,
4037        .tpg_get_inst_index             = ibmvscsis_tpg_get_inst_index,
4038        .check_stop_free                = ibmvscsis_check_stop_free,
4039        .release_cmd                    = ibmvscsis_release_cmd,
4040        .sess_get_index                 = ibmvscsis_sess_get_index,
4041        .write_pending                  = ibmvscsis_write_pending,
4042        .set_default_node_attributes    = ibmvscsis_set_default_node_attrs,
4043        .get_cmd_state                  = ibmvscsis_get_cmd_state,
4044        .queue_data_in                  = ibmvscsis_queue_data_in,
4045        .queue_status                   = ibmvscsis_queue_status,
4046        .queue_tm_rsp                   = ibmvscsis_queue_tm_rsp,
4047        .aborted_task                   = ibmvscsis_aborted_task,
4048        /*
4049         * Setup function pointers for logic in target_core_fabric_configfs.c
4050         */
4051        .fabric_make_wwn                = ibmvscsis_make_tport,
4052        .fabric_drop_wwn                = ibmvscsis_drop_tport,
4053        .fabric_make_tpg                = ibmvscsis_make_tpg,
4054        .fabric_drop_tpg                = ibmvscsis_drop_tpg,
4055
4056        .tfc_wwn_attrs                  = ibmvscsis_wwn_attrs,
4057        .tfc_tpg_base_attrs             = ibmvscsis_tpg_attrs,
4058};
4059
4060static void ibmvscsis_dev_release(struct device *dev) {};
4061
4062static struct device_attribute dev_attr_system_id =
4063        __ATTR(system_id, S_IRUGO, system_id_show, NULL);
4064
4065static struct device_attribute dev_attr_partition_number =
4066        __ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
4067
4068static struct device_attribute dev_attr_unit_address =
4069        __ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
4070
4071static struct attribute *ibmvscsis_dev_attrs[] = {
4072        &dev_attr_system_id.attr,
4073        &dev_attr_partition_number.attr,
4074        &dev_attr_unit_address.attr,
4075};
4076ATTRIBUTE_GROUPS(ibmvscsis_dev);
4077
4078static struct class ibmvscsis_class = {
4079        .name           = "ibmvscsis",
4080        .dev_release    = ibmvscsis_dev_release,
4081        .dev_groups     = ibmvscsis_dev_groups,
4082};
4083
4084static const struct vio_device_id ibmvscsis_device_table[] = {
4085        { "v-scsi-host", "IBM,v-scsi-host" },
4086        { "", "" }
4087};
4088MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table);
4089
4090static struct vio_driver ibmvscsis_driver = {
4091        .name = "ibmvscsis",
4092        .id_table = ibmvscsis_device_table,
4093        .probe = ibmvscsis_probe,
4094        .remove = ibmvscsis_remove,
4095};
4096
4097/*
4098 * ibmvscsis_init() - Kernel Module initialization
4099 *
4100 * Note: vio_register_driver() registers callback functions, and at least one
4101 * of those callback functions calls TCM - Linux IO Target Subsystem, thus
4102 * the SCSI Target template must be registered before vio_register_driver()
4103 * is called.
4104 */
4105static int __init ibmvscsis_init(void)
4106{
4107        int rc = 0;
4108
4109        rc = ibmvscsis_get_system_info();
4110        if (rc) {
4111                pr_err("rc %d from get_system_info\n", rc);
4112                goto out;
4113        }
4114
4115        rc = class_register(&ibmvscsis_class);
4116        if (rc) {
4117                pr_err("failed class register\n");
4118                goto out;
4119        }
4120
4121        rc = target_register_template(&ibmvscsis_ops);
4122        if (rc) {
4123                pr_err("rc %d from target_register_template\n", rc);
4124                goto unregister_class;
4125        }
4126
4127        rc = vio_register_driver(&ibmvscsis_driver);
4128        if (rc) {
4129                pr_err("rc %d from vio_register_driver\n", rc);
4130                goto unregister_target;
4131        }
4132
4133        return 0;
4134
4135unregister_target:
4136        target_unregister_template(&ibmvscsis_ops);
4137unregister_class:
4138        class_unregister(&ibmvscsis_class);
4139out:
4140        return rc;
4141}
4142
4143static void __exit ibmvscsis_exit(void)
4144{
4145        pr_info("Unregister IBM virtual SCSI host driver\n");
4146        vio_unregister_driver(&ibmvscsis_driver);
4147        target_unregister_template(&ibmvscsis_ops);
4148        class_unregister(&ibmvscsis_class);
4149}
4150
4151MODULE_DESCRIPTION("IBMVSCSIS fabric driver");
4152MODULE_AUTHOR("Bryant G. Ly and Michael Cyr");
4153MODULE_LICENSE("GPL");
4154MODULE_VERSION(IBMVSCSIS_VERSION);
4155module_init(ibmvscsis_init);
4156module_exit(ibmvscsis_exit);
4157