linux/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * IBM Virtual SCSI Target Driver
   3 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
   4 *                         Santiago Leon (santil@us.ibm.com) IBM Corp.
   5 *                         Linda Xie (lxie@us.ibm.com) IBM Corp.
   6 *
   7 * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
   8 * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
   9 *
  10 * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
  11 * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License as published by
  15 * the Free Software Foundation; either version 2 of the License, or
  16 * (at your option) any later version.
  17 *
  18 * This program is distributed in the hope that it will be useful,
  19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  21 * GNU General Public License for more details.
  22 *
  23 ****************************************************************************/
  24
  25#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
  26
  27#include <linux/module.h>
  28#include <linux/kernel.h>
  29#include <linux/slab.h>
  30#include <linux/types.h>
  31#include <linux/list.h>
  32#include <linux/string.h>
  33#include <linux/delay.h>
  34
  35#include <target/target_core_base.h>
  36#include <target/target_core_fabric.h>
  37
  38#include <asm/hvcall.h>
  39#include <asm/vio.h>
  40
  41#include <scsi/viosrp.h>
  42
  43#include "ibmvscsi_tgt.h"
  44
  45#define IBMVSCSIS_VERSION       "v0.2"
  46
  47#define INITIAL_SRP_LIMIT       800
  48#define DEFAULT_MAX_SECTORS     256
  49#define MAX_TXU                 1024 * 1024
  50
  51static uint max_vdma_size = MAX_H_COPY_RDMA;
  52
  53static char system_id[SYS_ID_NAME_LEN] = "";
  54static char partition_name[PARTITION_NAMELEN] = "UNKNOWN";
  55static uint partition_number = -1;
  56
  57/* Adapter list and lock to control it */
  58static DEFINE_SPINLOCK(ibmvscsis_dev_lock);
  59static LIST_HEAD(ibmvscsis_dev_list);
  60
  61static long ibmvscsis_parse_command(struct scsi_info *vscsi,
  62                                    struct viosrp_crq *crq);
  63
  64static void ibmvscsis_adapter_idle(struct scsi_info *vscsi);
  65
  66static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
  67                                      struct srp_rsp *rsp)
  68{
  69        u32 residual_count = se_cmd->residual_count;
  70
  71        if (!residual_count)
  72                return;
  73
  74        if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
  75                if (se_cmd->data_direction == DMA_TO_DEVICE) {
  76                        /* residual data from an underflow write */
  77                        rsp->flags = SRP_RSP_FLAG_DOUNDER;
  78                        rsp->data_out_res_cnt = cpu_to_be32(residual_count);
  79                } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
  80                        /* residual data from an underflow read */
  81                        rsp->flags = SRP_RSP_FLAG_DIUNDER;
  82                        rsp->data_in_res_cnt = cpu_to_be32(residual_count);
  83                }
  84        } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
  85                if (se_cmd->data_direction == DMA_TO_DEVICE) {
  86                        /* residual data from an overflow write */
  87                        rsp->flags = SRP_RSP_FLAG_DOOVER;
  88                        rsp->data_out_res_cnt = cpu_to_be32(residual_count);
  89                } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
  90                        /* residual data from an overflow read */
  91                        rsp->flags = SRP_RSP_FLAG_DIOVER;
  92                        rsp->data_in_res_cnt = cpu_to_be32(residual_count);
  93                }
  94        }
  95}
  96
  97/**
  98 * connection_broken() - Determine if the connection to the client is good
  99 * @vscsi:      Pointer to our adapter structure
 100 *
 101 * This function attempts to send a ping MAD to the client. If the call to
 102 * queue the request returns H_CLOSED then the connection has been broken
 103 * and the function returns TRUE.
 104 *
 105 * EXECUTION ENVIRONMENT:
 106 *      Interrupt or Process environment
 107 */
 108static bool connection_broken(struct scsi_info *vscsi)
 109{
 110        struct viosrp_crq *crq;
 111        u64 buffer[2] = { 0, 0 };
 112        long h_return_code;
 113        bool rc = false;
 114
 115        /* create a PING crq */
 116        crq = (struct viosrp_crq *)&buffer;
 117        crq->valid = VALID_CMD_RESP_EL;
 118        crq->format = MESSAGE_IN_CRQ;
 119        crq->status = PING;
 120
 121        h_return_code = h_send_crq(vscsi->dds.unit_id,
 122                                   cpu_to_be64(buffer[MSG_HI]),
 123                                   cpu_to_be64(buffer[MSG_LOW]));
 124
 125        pr_debug("connection_broken: rc %ld\n", h_return_code);
 126
 127        if (h_return_code == H_CLOSED)
 128                rc = true;
 129
 130        return rc;
 131}
 132
 133/**
 134 * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue
 135 * @vscsi:      Pointer to our adapter structure
 136 *
 137 * This function calls h_free_q then frees the interrupt bit etc.
 138 * It must release the lock before doing so because of the time it can take
 139 * for h_free_crq in PHYP
 140 * NOTE: the caller must make sure that state and or flags will prevent
 141 *       interrupt handler from scheduling work.
 142 * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
 143 *       we can't do it here, because we don't have the lock
 144 *
 145 * EXECUTION ENVIRONMENT:
 146 *      Process level
 147 */
 148static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
 149{
 150        long qrc;
 151        long rc = ADAPT_SUCCESS;
 152        int ticks = 0;
 153
 154        do {
 155                qrc = h_free_crq(vscsi->dds.unit_id);
 156                switch (qrc) {
 157                case H_SUCCESS:
 158                        spin_lock_bh(&vscsi->intr_lock);
 159                        vscsi->flags &= ~PREP_FOR_SUSPEND_FLAGS;
 160                        spin_unlock_bh(&vscsi->intr_lock);
 161                        break;
 162
 163                case H_HARDWARE:
 164                case H_PARAMETER:
 165                        dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n",
 166                                qrc);
 167                        rc = ERROR;
 168                        break;
 169
 170                case H_BUSY:
 171                case H_LONG_BUSY_ORDER_1_MSEC:
 172                        /* msleep not good for small values */
 173                        usleep_range(1000, 2000);
 174                        ticks += 1;
 175                        break;
 176                case H_LONG_BUSY_ORDER_10_MSEC:
 177                        usleep_range(10000, 20000);
 178                        ticks += 10;
 179                        break;
 180                case H_LONG_BUSY_ORDER_100_MSEC:
 181                        msleep(100);
 182                        ticks += 100;
 183                        break;
 184                case H_LONG_BUSY_ORDER_1_SEC:
 185                        ssleep(1);
 186                        ticks += 1000;
 187                        break;
 188                case H_LONG_BUSY_ORDER_10_SEC:
 189                        ssleep(10);
 190                        ticks += 10000;
 191                        break;
 192                case H_LONG_BUSY_ORDER_100_SEC:
 193                        ssleep(100);
 194                        ticks += 100000;
 195                        break;
 196                default:
 197                        dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n",
 198                                qrc);
 199                        rc = ERROR;
 200                        break;
 201                }
 202
 203                /*
 204                 * dont wait more then 300 seconds
 205                 * ticks are in milliseconds more or less
 206                 */
 207                if (ticks > 300000 && qrc != H_SUCCESS) {
 208                        rc = ERROR;
 209                        dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n");
 210                }
 211        } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
 212
 213        pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
 214
 215        return rc;
 216}
 217
 218/**
 219 * ibmvscsis_delete_client_info() - Helper function to Delete Client Info
 220 * @vscsi:      Pointer to our adapter structure
 221 * @client_closed:      True if client closed its queue
 222 *
 223 * Deletes information specific to the client when the client goes away
 224 *
 225 * EXECUTION ENVIRONMENT:
 226 *      Interrupt or Process
 227 */
 228static void ibmvscsis_delete_client_info(struct scsi_info *vscsi,
 229                                         bool client_closed)
 230{
 231        vscsi->client_cap = 0;
 232
 233        /*
 234         * Some things we don't want to clear if we're closing the queue,
 235         * because some clients don't resend the host handshake when they
 236         * get a transport event.
 237         */
 238        if (client_closed)
 239                vscsi->client_data.os_type = 0;
 240}
 241
 242/**
 243 * ibmvscsis_free_command_q() - Free Command Queue
 244 * @vscsi:      Pointer to our adapter structure
 245 *
 246 * This function calls unregister_command_q, then clears interrupts and
 247 * any pending interrupt acknowledgments associated with the command q.
 248 * It also clears memory if there is no error.
 249 *
 250 * PHYP did not meet the PAPR architecture so that we must give up the
 251 * lock. This causes a timing hole regarding state change.  To close the
 252 * hole this routine does accounting on any change that occurred during
 253 * the time the lock is not held.
 254 * NOTE: must give up and then acquire the interrupt lock, the caller must
 255 *       make sure that state and or flags will prevent interrupt handler from
 256 *       scheduling work.
 257 *
 258 * EXECUTION ENVIRONMENT:
 259 *      Process level, interrupt lock is held
 260 */
 261static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
 262{
 263        int bytes;
 264        u32 flags_under_lock;
 265        u16 state_under_lock;
 266        long rc = ADAPT_SUCCESS;
 267
 268        if (!(vscsi->flags & CRQ_CLOSED)) {
 269                vio_disable_interrupts(vscsi->dma_dev);
 270
 271                state_under_lock = vscsi->new_state;
 272                flags_under_lock = vscsi->flags;
 273                vscsi->phyp_acr_state = 0;
 274                vscsi->phyp_acr_flags = 0;
 275
 276                spin_unlock_bh(&vscsi->intr_lock);
 277                rc = ibmvscsis_unregister_command_q(vscsi);
 278                spin_lock_bh(&vscsi->intr_lock);
 279
 280                if (state_under_lock != vscsi->new_state)
 281                        vscsi->phyp_acr_state = vscsi->new_state;
 282
 283                vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags);
 284
 285                if (rc == ADAPT_SUCCESS) {
 286                        bytes = vscsi->cmd_q.size * PAGE_SIZE;
 287                        memset(vscsi->cmd_q.base_addr, 0, bytes);
 288                        vscsi->cmd_q.index = 0;
 289                        vscsi->flags |= CRQ_CLOSED;
 290
 291                        ibmvscsis_delete_client_info(vscsi, false);
 292                }
 293
 294                pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
 295                         vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
 296                         vscsi->phyp_acr_state);
 297        }
 298        return rc;
 299}
 300
 301/**
 302 * ibmvscsis_cmd_q_dequeue() - Get valid Command element
 303 * @mask:       Mask to use in case index wraps
 304 * @current_index:      Current index into command queue
 305 * @base_addr:  Pointer to start of command queue
 306 *
 307 * Returns a pointer to a valid command element or NULL, if the command
 308 * queue is empty
 309 *
 310 * EXECUTION ENVIRONMENT:
 311 *      Interrupt environment, interrupt lock held
 312 */
 313static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
 314                                                  uint *current_index,
 315                                                  struct viosrp_crq *base_addr)
 316{
 317        struct viosrp_crq *ptr;
 318
 319        ptr = base_addr + *current_index;
 320
 321        if (ptr->valid) {
 322                *current_index = (*current_index + 1) & mask;
 323                dma_rmb();
 324        } else {
 325                ptr = NULL;
 326        }
 327
 328        return ptr;
 329}
 330
 331/**
 332 * ibmvscsis_send_init_message() - send initialize message to the client
 333 * @vscsi:      Pointer to our adapter structure
 334 * @format:     Which Init Message format to send
 335 *
 336 * EXECUTION ENVIRONMENT:
 337 *      Interrupt environment interrupt lock held
 338 */
 339static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format)
 340{
 341        struct viosrp_crq *crq;
 342        u64 buffer[2] = { 0, 0 };
 343        long rc;
 344
 345        crq = (struct viosrp_crq *)&buffer;
 346        crq->valid = VALID_INIT_MSG;
 347        crq->format = format;
 348        rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
 349                        cpu_to_be64(buffer[MSG_LOW]));
 350
 351        return rc;
 352}
 353
 354/**
 355 * ibmvscsis_check_init_msg() - Check init message valid
 356 * @vscsi:      Pointer to our adapter structure
 357 * @format:     Pointer to return format of Init Message, if any.
 358 *              Set to UNUSED_FORMAT if no Init Message in queue.
 359 *
 360 * Checks if an initialize message was queued by the initiatior
 361 * after the queue was created and before the interrupt was enabled.
 362 *
 363 * EXECUTION ENVIRONMENT:
 364 *      Process level only, interrupt lock held
 365 */
 366static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
 367{
 368        struct viosrp_crq *crq;
 369        long rc = ADAPT_SUCCESS;
 370
 371        crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index,
 372                                      vscsi->cmd_q.base_addr);
 373        if (!crq) {
 374                *format = (uint)UNUSED_FORMAT;
 375        } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) {
 376                *format = (uint)INIT_MSG;
 377                crq->valid = INVALIDATE_CMD_RESP_EL;
 378                dma_rmb();
 379
 380                /*
 381                 * the caller has ensured no initialize message was
 382                 * sent after the queue was
 383                 * created so there should be no other message on the queue.
 384                 */
 385                crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask,
 386                                              &vscsi->cmd_q.index,
 387                                              vscsi->cmd_q.base_addr);
 388                if (crq) {
 389                        *format = (uint)(crq->format);
 390                        rc = ERROR;
 391                        crq->valid = INVALIDATE_CMD_RESP_EL;
 392                        dma_rmb();
 393                }
 394        } else {
 395                *format = (uint)(crq->format);
 396                rc = ERROR;
 397                crq->valid = INVALIDATE_CMD_RESP_EL;
 398                dma_rmb();
 399        }
 400
 401        return rc;
 402}
 403
 404/**
 405 * ibmvscsis_disconnect() - Helper function to disconnect
 406 * @work:       Pointer to work_struct, gives access to our adapter structure
 407 *
 408 * An error has occurred or the driver received a Transport event,
 409 * and the driver is requesting that the command queue be de-registered
 410 * in a safe manner. If there is no outstanding I/O then we can stop the
 411 * queue. If we are restarting the queue it will be reflected in the
 412 * the state of the adapter.
 413 *
 414 * EXECUTION ENVIRONMENT:
 415 *      Process environment
 416 */
 417static void ibmvscsis_disconnect(struct work_struct *work)
 418{
 419        struct scsi_info *vscsi = container_of(work, struct scsi_info,
 420                                               proc_work);
 421        u16 new_state;
 422        bool wait_idle = false;
 423
 424        spin_lock_bh(&vscsi->intr_lock);
 425        new_state = vscsi->new_state;
 426        vscsi->new_state = 0;
 427
 428        vscsi->flags |= DISCONNECT_SCHEDULED;
 429        vscsi->flags &= ~SCHEDULE_DISCONNECT;
 430
 431        pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
 432                 vscsi->state);
 433
 434        /*
 435         * check which state we are in and see if we
 436         * should transitition to the new state
 437         */
 438        switch (vscsi->state) {
 439        /* Should never be called while in this state. */
 440        case NO_QUEUE:
 441        /*
 442         * Can never transition from this state;
 443         * igonore errors and logout.
 444         */
 445        case UNCONFIGURING:
 446                break;
 447
 448        /* can transition from this state to UNCONFIGURING */
 449        case ERR_DISCONNECT:
 450                if (new_state == UNCONFIGURING)
 451                        vscsi->state = new_state;
 452                break;
 453
 454        /*
 455         * Can transition from this state to to unconfiguring
 456         * or err disconnect.
 457         */
 458        case ERR_DISCONNECT_RECONNECT:
 459                switch (new_state) {
 460                case UNCONFIGURING:
 461                case ERR_DISCONNECT:
 462                        vscsi->state = new_state;
 463                        break;
 464
 465                case WAIT_IDLE:
 466                        break;
 467                default:
 468                        break;
 469                }
 470                break;
 471
 472        /* can transition from this state to UNCONFIGURING */
 473        case ERR_DISCONNECTED:
 474                if (new_state == UNCONFIGURING)
 475                        vscsi->state = new_state;
 476                break;
 477
 478        case WAIT_ENABLED:
 479                switch (new_state) {
 480                case UNCONFIGURING:
 481                        vscsi->state = new_state;
 482                        vscsi->flags |= RESPONSE_Q_DOWN;
 483                        vscsi->flags &= ~(SCHEDULE_DISCONNECT |
 484                                          DISCONNECT_SCHEDULED);
 485                        dma_rmb();
 486                        if (vscsi->flags & CFG_SLEEPING) {
 487                                vscsi->flags &= ~CFG_SLEEPING;
 488                                complete(&vscsi->unconfig);
 489                        }
 490                        break;
 491
 492                /* should never happen */
 493                case ERR_DISCONNECT:
 494                case ERR_DISCONNECT_RECONNECT:
 495                case WAIT_IDLE:
 496                        dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
 497                                vscsi->state);
 498                        break;
 499                }
 500                break;
 501
 502        case WAIT_IDLE:
 503                switch (new_state) {
 504                case UNCONFIGURING:
 505                        vscsi->flags |= RESPONSE_Q_DOWN;
 506                        vscsi->state = new_state;
 507                        vscsi->flags &= ~(SCHEDULE_DISCONNECT |
 508                                          DISCONNECT_SCHEDULED);
 509                        ibmvscsis_free_command_q(vscsi);
 510                        break;
 511                case ERR_DISCONNECT:
 512                case ERR_DISCONNECT_RECONNECT:
 513                        vscsi->state = new_state;
 514                        break;
 515                }
 516                break;
 517
 518        /*
 519         * Initiator has not done a successful srp login
 520         * or has done a successful srp logout ( adapter was not
 521         * busy). In the first case there can be responses queued
 522         * waiting for space on the initiators response queue (MAD)
 523         * The second case the adapter is idle. Assume the worse case,
 524         * i.e. the second case.
 525         */
 526        case WAIT_CONNECTION:
 527        case CONNECTED:
 528        case SRP_PROCESSING:
 529                wait_idle = true;
 530                vscsi->state = new_state;
 531                break;
 532
 533        /* can transition from this state to UNCONFIGURING */
 534        case UNDEFINED:
 535                if (new_state == UNCONFIGURING)
 536                        vscsi->state = new_state;
 537                break;
 538        default:
 539                break;
 540        }
 541
 542        if (wait_idle) {
 543                pr_debug("disconnect start wait, active %d, sched %d\n",
 544                         (int)list_empty(&vscsi->active_q),
 545                         (int)list_empty(&vscsi->schedule_q));
 546                if (!list_empty(&vscsi->active_q) ||
 547                    !list_empty(&vscsi->schedule_q)) {
 548                        vscsi->flags |= WAIT_FOR_IDLE;
 549                        pr_debug("disconnect flags 0x%x\n", vscsi->flags);
 550                        /*
 551                         * This routine is can not be called with the interrupt
 552                         * lock held.
 553                         */
 554                        spin_unlock_bh(&vscsi->intr_lock);
 555                        wait_for_completion(&vscsi->wait_idle);
 556                        spin_lock_bh(&vscsi->intr_lock);
 557                }
 558                pr_debug("disconnect stop wait\n");
 559
 560                ibmvscsis_adapter_idle(vscsi);
 561        }
 562
 563        spin_unlock_bh(&vscsi->intr_lock);
 564}
 565
 566/**
 567 * ibmvscsis_post_disconnect() - Schedule the disconnect
 568 * @vscsi:      Pointer to our adapter structure
 569 * @new_state:  State to move to after disconnecting
 570 * @flag_bits:  Flags to turn on in adapter structure
 571 *
 572 * If it's already been scheduled, then see if we need to "upgrade"
 573 * the new state (if the one passed in is more "severe" than the
 574 * previous one).
 575 *
 576 * PRECONDITION:
 577 *      interrupt lock is held
 578 */
 579static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
 580                                      uint flag_bits)
 581{
 582        uint state;
 583
 584        /* check the validity of the new state */
 585        switch (new_state) {
 586        case UNCONFIGURING:
 587        case ERR_DISCONNECT:
 588        case ERR_DISCONNECT_RECONNECT:
 589        case WAIT_IDLE:
 590                break;
 591
 592        default:
 593                dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n",
 594                        new_state);
 595                return;
 596        }
 597
 598        vscsi->flags |= flag_bits;
 599
 600        pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
 601                 new_state, flag_bits, vscsi->flags, vscsi->state);
 602
 603        if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
 604                vscsi->flags |= SCHEDULE_DISCONNECT;
 605                vscsi->new_state = new_state;
 606
 607                INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect);
 608                (void)queue_work(vscsi->work_q, &vscsi->proc_work);
 609        } else {
 610                if (vscsi->new_state)
 611                        state = vscsi->new_state;
 612                else
 613                        state = vscsi->state;
 614
 615                switch (state) {
 616                case NO_QUEUE:
 617                case UNCONFIGURING:
 618                        break;
 619
 620                case ERR_DISCONNECTED:
 621                case ERR_DISCONNECT:
 622                case UNDEFINED:
 623                        if (new_state == UNCONFIGURING)
 624                                vscsi->new_state = new_state;
 625                        break;
 626
 627                case ERR_DISCONNECT_RECONNECT:
 628                        switch (new_state) {
 629                        case UNCONFIGURING:
 630                        case ERR_DISCONNECT:
 631                                vscsi->new_state = new_state;
 632                                break;
 633                        default:
 634                                break;
 635                        }
 636                        break;
 637
 638                case WAIT_ENABLED:
 639                case WAIT_IDLE:
 640                case WAIT_CONNECTION:
 641                case CONNECTED:
 642                case SRP_PROCESSING:
 643                        vscsi->new_state = new_state;
 644                        break;
 645
 646                default:
 647                        break;
 648                }
 649        }
 650
 651        pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
 652                 vscsi->flags, vscsi->new_state);
 653}
 654
 655/**
 656 * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
 657 * @vscsi:      Pointer to our adapter structure
 658 *
 659 * Must be called with interrupt lock held.
 660 */
 661static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
 662{
 663        long rc = ADAPT_SUCCESS;
 664
 665        switch (vscsi->state) {
 666        case NO_QUEUE:
 667        case ERR_DISCONNECT:
 668        case ERR_DISCONNECT_RECONNECT:
 669        case ERR_DISCONNECTED:
 670        case UNCONFIGURING:
 671        case UNDEFINED:
 672                rc = ERROR;
 673                break;
 674
 675        case WAIT_CONNECTION:
 676                vscsi->state = CONNECTED;
 677                break;
 678
 679        case WAIT_IDLE:
 680        case SRP_PROCESSING:
 681        case CONNECTED:
 682        case WAIT_ENABLED:
 683        default:
 684                rc = ERROR;
 685                dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
 686                        vscsi->state);
 687                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
 688                break;
 689        }
 690
 691        return rc;
 692}
 693
 694/**
 695 * ibmvscsis_handle_init_msg() - Respond to an Init Message
 696 * @vscsi:      Pointer to our adapter structure
 697 *
 698 * Must be called with interrupt lock held.
 699 */
 700static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
 701{
 702        long rc = ADAPT_SUCCESS;
 703
 704        switch (vscsi->state) {
 705        case WAIT_CONNECTION:
 706                rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
 707                switch (rc) {
 708                case H_SUCCESS:
 709                        vscsi->state = CONNECTED;
 710                        break;
 711
 712                case H_PARAMETER:
 713                        dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
 714                                rc);
 715                        ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
 716                        break;
 717
 718                case H_DROPPED:
 719                        dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
 720                                rc);
 721                        rc = ERROR;
 722                        ibmvscsis_post_disconnect(vscsi,
 723                                                  ERR_DISCONNECT_RECONNECT, 0);
 724                        break;
 725
 726                case H_CLOSED:
 727                        pr_warn("init_msg: failed to send, rc %ld\n", rc);
 728                        rc = 0;
 729                        break;
 730                }
 731                break;
 732
 733        case UNDEFINED:
 734                rc = ERROR;
 735                break;
 736
 737        case UNCONFIGURING:
 738                break;
 739
 740        case WAIT_ENABLED:
 741        case CONNECTED:
 742        case SRP_PROCESSING:
 743        case WAIT_IDLE:
 744        case NO_QUEUE:
 745        case ERR_DISCONNECT:
 746        case ERR_DISCONNECT_RECONNECT:
 747        case ERR_DISCONNECTED:
 748        default:
 749                rc = ERROR;
 750                dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
 751                        vscsi->state);
 752                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
 753                break;
 754        }
 755
 756        return rc;
 757}
 758
 759/**
 760 * ibmvscsis_init_msg() - Respond to an init message
 761 * @vscsi:      Pointer to our adapter structure
 762 * @crq:        Pointer to CRQ element containing the Init Message
 763 *
 764 * EXECUTION ENVIRONMENT:
 765 *      Interrupt, interrupt lock held
 766 */
 767static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
 768{
 769        long rc = ADAPT_SUCCESS;
 770
 771        pr_debug("init_msg: state 0x%hx\n", vscsi->state);
 772
 773        rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
 774                      (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
 775                      0);
 776        if (rc == H_SUCCESS) {
 777                vscsi->client_data.partition_number =
 778                        be64_to_cpu(*(u64 *)vscsi->map_buf);
 779                pr_debug("init_msg, part num %d\n",
 780                         vscsi->client_data.partition_number);
 781        } else {
 782                pr_debug("init_msg h_vioctl rc %ld\n", rc);
 783                rc = ADAPT_SUCCESS;
 784        }
 785
 786        if (crq->format == INIT_MSG) {
 787                rc = ibmvscsis_handle_init_msg(vscsi);
 788        } else if (crq->format == INIT_COMPLETE_MSG) {
 789                rc = ibmvscsis_handle_init_compl_msg(vscsi);
 790        } else {
 791                rc = ERROR;
 792                dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
 793                        (uint)crq->format);
 794                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
 795        }
 796
 797        return rc;
 798}
 799
 800/**
 801 * ibmvscsis_establish_new_q() - Establish new CRQ queue
 802 * @vscsi:      Pointer to our adapter structure
 803 *
 804 * Must be called with interrupt lock held.
 805 */
 806static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
 807{
 808        long rc = ADAPT_SUCCESS;
 809        uint format;
 810
 811        rc = h_vioctl(vscsi->dds.unit_id, H_ENABLE_PREPARE_FOR_SUSPEND, 30000,
 812                      0, 0, 0, 0);
 813        if (rc == H_SUCCESS)
 814                vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
 815        else if (rc != H_NOT_FOUND)
 816                pr_err("Error from Enable Prepare for Suspend: %ld\n", rc);
 817
 818        vscsi->flags &= PRESERVE_FLAG_FIELDS;
 819        vscsi->rsp_q_timer.timer_pops = 0;
 820        vscsi->debit = 0;
 821        vscsi->credit = 0;
 822
 823        rc = vio_enable_interrupts(vscsi->dma_dev);
 824        if (rc) {
 825                pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
 826                        rc);
 827                return rc;
 828        }
 829
 830        rc = ibmvscsis_check_init_msg(vscsi, &format);
 831        if (rc) {
 832                dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
 833                        rc);
 834                return rc;
 835        }
 836
 837        if (format == UNUSED_FORMAT) {
 838                rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
 839                switch (rc) {
 840                case H_SUCCESS:
 841                case H_DROPPED:
 842                case H_CLOSED:
 843                        rc = ADAPT_SUCCESS;
 844                        break;
 845
 846                case H_PARAMETER:
 847                case H_HARDWARE:
 848                        break;
 849
 850                default:
 851                        vscsi->state = UNDEFINED;
 852                        rc = H_HARDWARE;
 853                        break;
 854                }
 855        } else if (format == INIT_MSG) {
 856                rc = ibmvscsis_handle_init_msg(vscsi);
 857        }
 858
 859        return rc;
 860}
 861
 862/**
 863 * ibmvscsis_reset_queue() - Reset CRQ Queue
 864 * @vscsi:      Pointer to our adapter structure
 865 *
 866 * This function calls h_free_q and then calls h_reg_q and does all
 867 * of the bookkeeping to get us back to where we can communicate.
 868 *
 869 * Actually, we don't always call h_free_crq.  A problem was discovered
 870 * where one partition would close and reopen his queue, which would
 871 * cause his partner to get a transport event, which would cause him to
 872 * close and reopen his queue, which would cause the original partition
 873 * to get a transport event, etc., etc.  To prevent this, we don't
 874 * actually close our queue if the client initiated the reset, (i.e.
 875 * either we got a transport event or we have detected that the client's
 876 * queue is gone)
 877 *
 878 * EXECUTION ENVIRONMENT:
 879 *      Process environment, called with interrupt lock held
 880 */
 881static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
 882{
 883        int bytes;
 884        long rc = ADAPT_SUCCESS;
 885
 886        pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
 887
 888        /* don't reset, the client did it for us */
 889        if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
 890                vscsi->flags &= PRESERVE_FLAG_FIELDS;
 891                vscsi->rsp_q_timer.timer_pops = 0;
 892                vscsi->debit = 0;
 893                vscsi->credit = 0;
 894                vscsi->state = WAIT_CONNECTION;
 895                vio_enable_interrupts(vscsi->dma_dev);
 896        } else {
 897                rc = ibmvscsis_free_command_q(vscsi);
 898                if (rc == ADAPT_SUCCESS) {
 899                        vscsi->state = WAIT_CONNECTION;
 900
 901                        bytes = vscsi->cmd_q.size * PAGE_SIZE;
 902                        rc = h_reg_crq(vscsi->dds.unit_id,
 903                                       vscsi->cmd_q.crq_token, bytes);
 904                        if (rc == H_CLOSED || rc == H_SUCCESS) {
 905                                rc = ibmvscsis_establish_new_q(vscsi);
 906                        }
 907
 908                        if (rc != ADAPT_SUCCESS) {
 909                                pr_debug("reset_queue: reg_crq rc %ld\n", rc);
 910
 911                                vscsi->state = ERR_DISCONNECTED;
 912                                vscsi->flags |= RESPONSE_Q_DOWN;
 913                                ibmvscsis_free_command_q(vscsi);
 914                        }
 915                } else {
 916                        vscsi->state = ERR_DISCONNECTED;
 917                        vscsi->flags |= RESPONSE_Q_DOWN;
 918                }
 919        }
 920}
 921
 922/**
 923 * ibmvscsis_free_cmd_resources() - Free command resources
 924 * @vscsi:      Pointer to our adapter structure
 925 * @cmd:        Command which is not longer in use
 926 *
 927 * Must be called with interrupt lock held.
 928 */
 929static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
 930                                         struct ibmvscsis_cmd *cmd)
 931{
 932        struct iu_entry *iue = cmd->iue;
 933
 934        switch (cmd->type) {
 935        case TASK_MANAGEMENT:
 936        case SCSI_CDB:
 937                /*
 938                 * When the queue goes down this value is cleared, so it
 939                 * cannot be cleared in this general purpose function.
 940                 */
 941                if (vscsi->debit)
 942                        vscsi->debit -= 1;
 943                break;
 944        case ADAPTER_MAD:
 945                vscsi->flags &= ~PROCESSING_MAD;
 946                break;
 947        case UNSET_TYPE:
 948                break;
 949        default:
 950                dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
 951                        cmd->type);
 952                break;
 953        }
 954
 955        cmd->iue = NULL;
 956        list_add_tail(&cmd->list, &vscsi->free_cmd);
 957        srp_iu_put(iue);
 958
 959        if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
 960            list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
 961                vscsi->flags &= ~WAIT_FOR_IDLE;
 962                complete(&vscsi->wait_idle);
 963        }
 964}
 965
 966/**
 967 * ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL
 968 * @vscsi:      Pointer to our adapter structure
 969 * @idle:       Indicates whether we were called from adapter_idle.  This
 970 *              is important to know if we need to do a disconnect, since if
 971 *              we're called from adapter_idle, we're still processing the
 972 *              current disconnect, so we can't just call post_disconnect.
 973 *
 974 * This function is called when the adapter is idle when phyp has sent
 975 * us a Prepare for Suspend Transport Event.
 976 *
 977 * EXECUTION ENVIRONMENT:
 978 *      Process or interrupt environment called with interrupt lock held
 979 */
 980static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
 981{
 982        long rc = 0;
 983        struct viosrp_crq *crq;
 984
 985        /* See if there is a Resume event in the queue */
 986        crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
 987
 988        pr_debug("ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
 989                 vscsi->flags, vscsi->state, (int)crq->valid);
 990
 991        if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
 992                rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
 993                              0, 0);
 994                if (rc) {
 995                        pr_err("Ready for Suspend Vioctl failed: %ld\n", rc);
 996                        rc = 0;
 997                }
 998        } else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
 999                    (vscsi->flags & PREP_FOR_SUSPEND_ABORTED)) ||
1000                   ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
1001                                     (crq->format != RESUME_FROM_SUSP)))) {
1002                if (idle) {
1003                        vscsi->state = ERR_DISCONNECT_RECONNECT;
1004                        ibmvscsis_reset_queue(vscsi);
1005                        rc = -1;
1006                } else if (vscsi->state == CONNECTED) {
1007                        ibmvscsis_post_disconnect(vscsi,
1008                                                  ERR_DISCONNECT_RECONNECT, 0);
1009                }
1010
1011                vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1012
1013                if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
1014                                     (crq->format != RESUME_FROM_SUSP)))
1015                        pr_err("Invalid element in CRQ after Prepare for Suspend");
1016        }
1017
1018        vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
1019
1020        return rc;
1021}
1022
1023/**
1024 * ibmvscsis_trans_event() - Handle a Transport Event
1025 * @vscsi:      Pointer to our adapter structure
1026 * @crq:        Pointer to CRQ entry containing the Transport Event
1027 *
1028 * Do the logic to close the I_T nexus.  This function may not
1029 * behave to specification.
1030 *
1031 * EXECUTION ENVIRONMENT:
1032 *      Interrupt, interrupt lock held
1033 */
1034static long ibmvscsis_trans_event(struct scsi_info *vscsi,
1035                                  struct viosrp_crq *crq)
1036{
1037        long rc = ADAPT_SUCCESS;
1038
1039        pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n",
1040                 (int)crq->format, vscsi->flags, vscsi->state);
1041
1042        switch (crq->format) {
1043        case MIGRATED:
1044        case PARTNER_FAILED:
1045        case PARTNER_DEREGISTER:
1046                ibmvscsis_delete_client_info(vscsi, true);
1047                if (crq->format == MIGRATED)
1048                        vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1049                switch (vscsi->state) {
1050                case NO_QUEUE:
1051                case ERR_DISCONNECTED:
1052                case UNDEFINED:
1053                        break;
1054
1055                case UNCONFIGURING:
1056                        vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
1057                        break;
1058
1059                case WAIT_ENABLED:
1060                        break;
1061
1062                case WAIT_CONNECTION:
1063                        break;
1064
1065                case CONNECTED:
1066                        ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
1067                                                  (RESPONSE_Q_DOWN |
1068                                                   TRANS_EVENT));
1069                        break;
1070
1071                case SRP_PROCESSING:
1072                        if ((vscsi->debit > 0) ||
1073                            !list_empty(&vscsi->schedule_q) ||
1074                            !list_empty(&vscsi->waiting_rsp) ||
1075                            !list_empty(&vscsi->active_q)) {
1076                                pr_debug("debit %d, sched %d, wait %d, active %d\n",
1077                                         vscsi->debit,
1078                                         (int)list_empty(&vscsi->schedule_q),
1079                                         (int)list_empty(&vscsi->waiting_rsp),
1080                                         (int)list_empty(&vscsi->active_q));
1081                                pr_warn("connection lost with outstanding work\n");
1082                        } else {
1083                                pr_debug("trans_event: SRP Processing, but no outstanding work\n");
1084                        }
1085
1086                        ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
1087                                                  (RESPONSE_Q_DOWN |
1088                                                   TRANS_EVENT));
1089                        break;
1090
1091                case ERR_DISCONNECT:
1092                case ERR_DISCONNECT_RECONNECT:
1093                case WAIT_IDLE:
1094                        vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
1095                        break;
1096                }
1097                break;
1098
1099        case PREPARE_FOR_SUSPEND:
1100                pr_debug("Prep for Suspend, crq status = 0x%x\n",
1101                         (int)crq->status);
1102                switch (vscsi->state) {
1103                case ERR_DISCONNECTED:
1104                case WAIT_CONNECTION:
1105                case CONNECTED:
1106                        ibmvscsis_ready_for_suspend(vscsi, false);
1107                        break;
1108                case SRP_PROCESSING:
1109                        vscsi->resume_state = vscsi->state;
1110                        vscsi->flags |= PREP_FOR_SUSPEND_PENDING;
1111                        if (crq->status == CRQ_ENTRY_OVERWRITTEN)
1112                                vscsi->flags |= PREP_FOR_SUSPEND_OVERWRITE;
1113                        ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
1114                        break;
1115                case NO_QUEUE:
1116                case UNDEFINED:
1117                case UNCONFIGURING:
1118                case WAIT_ENABLED:
1119                case ERR_DISCONNECT:
1120                case ERR_DISCONNECT_RECONNECT:
1121                case WAIT_IDLE:
1122                        pr_err("Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
1123                               vscsi->state);
1124                        break;
1125                }
1126                break;
1127
1128        case RESUME_FROM_SUSP:
1129                pr_debug("Resume from Suspend, crq status = 0x%x\n",
1130                         (int)crq->status);
1131                if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
1132                        vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
1133                } else {
1134                        if ((crq->status == CRQ_ENTRY_OVERWRITTEN) ||
1135                            (vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE)) {
1136                                ibmvscsis_post_disconnect(vscsi,
1137                                                          ERR_DISCONNECT_RECONNECT,
1138                                                          0);
1139                                vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1140                        }
1141                }
1142                break;
1143
1144        default:
1145                rc = ERROR;
1146                dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
1147                        (uint)crq->format);
1148                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
1149                                          RESPONSE_Q_DOWN);
1150                break;
1151        }
1152
1153        rc = vscsi->flags & SCHEDULE_DISCONNECT;
1154
1155        pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
1156                 vscsi->flags, vscsi->state, rc);
1157
1158        return rc;
1159}
1160
1161/**
1162 * ibmvscsis_poll_cmd_q() - Poll Command Queue
1163 * @vscsi:      Pointer to our adapter structure
1164 *
1165 * Called to handle command elements that may have arrived while
1166 * interrupts were disabled.
1167 *
1168 * EXECUTION ENVIRONMENT:
1169 *      intr_lock must be held
1170 */
1171static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
1172{
1173        struct viosrp_crq *crq;
1174        long rc;
1175        bool ack = true;
1176        volatile u8 valid;
1177
1178        pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
1179                 vscsi->flags, vscsi->state, vscsi->cmd_q.index);
1180
1181        rc = vscsi->flags & SCHEDULE_DISCONNECT;
1182        crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
1183        valid = crq->valid;
1184        dma_rmb();
1185
1186        while (valid) {
1187poll_work:
1188                vscsi->cmd_q.index =
1189                        (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
1190
1191                if (!rc) {
1192                        rc = ibmvscsis_parse_command(vscsi, crq);
1193                } else {
1194                        if ((uint)crq->valid == VALID_TRANS_EVENT) {
1195                                /*
1196                                 * must service the transport layer events even
1197                                 * in an error state, dont break out until all
1198                                 * the consecutive transport events have been
1199                                 * processed
1200                                 */
1201                                rc = ibmvscsis_trans_event(vscsi, crq);
1202                        } else if (vscsi->flags & TRANS_EVENT) {
1203                                /*
1204                                 * if a tranport event has occurred leave
1205                                 * everything but transport events on the queue
1206                                 */
1207                                pr_debug("poll_cmd_q, ignoring\n");
1208
1209                                /*
1210                                 * need to decrement the queue index so we can
1211                                 * look at the elment again
1212                                 */
1213                                if (vscsi->cmd_q.index)
1214                                        vscsi->cmd_q.index -= 1;
1215                                else
1216                                        /*
1217                                         * index is at 0 it just wrapped.
1218                                         * have it index last element in q
1219                                         */
1220                                        vscsi->cmd_q.index = vscsi->cmd_q.mask;
1221                                break;
1222                        }
1223                }
1224
1225                crq->valid = INVALIDATE_CMD_RESP_EL;
1226
1227                crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
1228                valid = crq->valid;
1229                dma_rmb();
1230        }
1231
1232        if (!rc) {
1233                if (ack) {
1234                        vio_enable_interrupts(vscsi->dma_dev);
1235                        ack = false;
1236                        pr_debug("poll_cmd_q, reenabling interrupts\n");
1237                }
1238                valid = crq->valid;
1239                dma_rmb();
1240                if (valid)
1241                        goto poll_work;
1242        }
1243
1244        pr_debug("Leaving poll_cmd_q: rc %ld\n", rc);
1245}
1246
1247/**
1248 * ibmvscsis_free_cmd_qs() - Free elements in queue
1249 * @vscsi:      Pointer to our adapter structure
1250 *
1251 * Free all of the elements on all queues that are waiting for
1252 * whatever reason.
1253 *
1254 * PRECONDITION:
1255 *      Called with interrupt lock held
1256 */
1257static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
1258{
1259        struct ibmvscsis_cmd *cmd, *nxt;
1260
1261        pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
1262                 (int)list_empty(&vscsi->waiting_rsp),
1263                 vscsi->rsp_q_timer.started);
1264
1265        list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
1266                list_del(&cmd->list);
1267                ibmvscsis_free_cmd_resources(vscsi, cmd);
1268        }
1269}
1270
1271/**
1272 * ibmvscsis_get_free_cmd() - Get free command from list
1273 * @vscsi:      Pointer to our adapter structure
1274 *
1275 * Must be called with interrupt lock held.
1276 */
1277static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
1278{
1279        struct ibmvscsis_cmd *cmd = NULL;
1280        struct iu_entry *iue;
1281
1282        iue = srp_iu_get(&vscsi->target);
1283        if (iue) {
1284                cmd = list_first_entry_or_null(&vscsi->free_cmd,
1285                                               struct ibmvscsis_cmd, list);
1286                if (cmd) {
1287                        if (cmd->abort_cmd)
1288                                cmd->abort_cmd = NULL;
1289                        cmd->flags &= ~(DELAY_SEND);
1290                        list_del(&cmd->list);
1291                        cmd->iue = iue;
1292                        cmd->type = UNSET_TYPE;
1293                        memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd));
1294                } else {
1295                        srp_iu_put(iue);
1296                }
1297        }
1298
1299        return cmd;
1300}
1301
1302/**
1303 * ibmvscsis_adapter_idle() - Helper function to handle idle adapter
1304 * @vscsi:      Pointer to our adapter structure
1305 *
1306 * This function is called when the adapter is idle when the driver
1307 * is attempting to clear an error condition.
1308 * The adapter is considered busy if any of its cmd queues
1309 * are non-empty. This function can be invoked
1310 * from the off level disconnect function.
1311 *
1312 * EXECUTION ENVIRONMENT:
1313 *      Process environment called with interrupt lock held
1314 */
1315static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1316{
1317        int free_qs = false;
1318        long rc = 0;
1319
1320        pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
1321                 vscsi->state);
1322
1323        /* Only need to free qs if we're disconnecting from client */
1324        if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
1325                free_qs = true;
1326
1327        switch (vscsi->state) {
1328        case UNCONFIGURING:
1329                ibmvscsis_free_command_q(vscsi);
1330                dma_rmb();
1331                isync();
1332                if (vscsi->flags & CFG_SLEEPING) {
1333                        vscsi->flags &= ~CFG_SLEEPING;
1334                        complete(&vscsi->unconfig);
1335                }
1336                break;
1337        case ERR_DISCONNECT_RECONNECT:
1338                ibmvscsis_reset_queue(vscsi);
1339                pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
1340                break;
1341
1342        case ERR_DISCONNECT:
1343                ibmvscsis_free_command_q(vscsi);
1344                vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
1345                vscsi->flags |= RESPONSE_Q_DOWN;
1346                if (vscsi->tport.enabled)
1347                        vscsi->state = ERR_DISCONNECTED;
1348                else
1349                        vscsi->state = WAIT_ENABLED;
1350                pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
1351                         vscsi->flags, vscsi->state);
1352                break;
1353
1354        case WAIT_IDLE:
1355                vscsi->rsp_q_timer.timer_pops = 0;
1356                vscsi->debit = 0;
1357                vscsi->credit = 0;
1358                if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
1359                        vscsi->state = vscsi->resume_state;
1360                        vscsi->resume_state = 0;
1361                        rc = ibmvscsis_ready_for_suspend(vscsi, true);
1362                        vscsi->flags &= ~DISCONNECT_SCHEDULED;
1363                        if (rc)
1364                                break;
1365                } else if (vscsi->flags & TRANS_EVENT) {
1366                        vscsi->state = WAIT_CONNECTION;
1367                        vscsi->flags &= PRESERVE_FLAG_FIELDS;
1368                } else {
1369                        vscsi->state = CONNECTED;
1370                        vscsi->flags &= ~DISCONNECT_SCHEDULED;
1371                }
1372
1373                pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n",
1374                         vscsi->flags, vscsi->state);
1375                ibmvscsis_poll_cmd_q(vscsi);
1376                break;
1377
1378        case ERR_DISCONNECTED:
1379                vscsi->flags &= ~DISCONNECT_SCHEDULED;
1380                pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
1381                         vscsi->flags, vscsi->state);
1382                break;
1383
1384        default:
1385                dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n",
1386                        vscsi->state);
1387                break;
1388        }
1389
1390        if (free_qs)
1391                ibmvscsis_free_cmd_qs(vscsi);
1392
1393        /*
1394         * There is a timing window where we could lose a disconnect request.
1395         * The known path to this window occurs during the DISCONNECT_RECONNECT
1396         * case above: reset_queue calls free_command_q, which will release the
1397         * interrupt lock.  During that time, a new post_disconnect call can be
1398         * made with a "more severe" state (DISCONNECT or UNCONFIGURING).
1399         * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect
1400         * will only set the new_state.  Now free_command_q reacquires the intr
1401         * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_
1402         * FIELDS), and the disconnect is lost.  This is particularly bad when
1403         * the new disconnect was for UNCONFIGURING, since the unconfigure hangs
1404         * forever.
1405         * Fix is that free command queue sets acr state and acr flags if there
1406         * is a change under the lock
1407         * note free command queue writes to this state it clears it
1408         * before releasing the lock, different drivers call the free command
1409         * queue different times so dont initialize above
1410         */
1411        if (vscsi->phyp_acr_state != 0) {
1412                /*
1413                 * set any bits in flags that may have been cleared by
1414                 * a call to free command queue in switch statement
1415                 * or reset queue
1416                 */
1417                vscsi->flags |= vscsi->phyp_acr_flags;
1418                ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0);
1419                vscsi->phyp_acr_state = 0;
1420                vscsi->phyp_acr_flags = 0;
1421
1422                pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
1423                         vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
1424                         vscsi->phyp_acr_state);
1425        }
1426
1427        pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
1428                 vscsi->flags, vscsi->state, vscsi->new_state);
1429}
1430
1431/**
1432 * ibmvscsis_copy_crq_packet() - Copy CRQ Packet
1433 * @vscsi:      Pointer to our adapter structure
1434 * @cmd:        Pointer to command element to use to process the request
1435 * @crq:        Pointer to CRQ entry containing the request
1436 *
1437 * Copy the srp information unit from the hosted
1438 * partition using remote dma
1439 *
1440 * EXECUTION ENVIRONMENT:
1441 *      Interrupt, interrupt lock held
1442 */
1443static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
1444                                      struct ibmvscsis_cmd *cmd,
1445                                      struct viosrp_crq *crq)
1446{
1447        struct iu_entry *iue = cmd->iue;
1448        long rc = 0;
1449        u16 len;
1450
1451        len = be16_to_cpu(crq->IU_length);
1452        if ((len > SRP_MAX_IU_LEN) || (len == 0)) {
1453                dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len);
1454                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1455                return SRP_VIOLATION;
1456        }
1457
1458        rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn,
1459                         be64_to_cpu(crq->IU_data_ptr),
1460                         vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma);
1461
1462        switch (rc) {
1463        case H_SUCCESS:
1464                cmd->init_time = mftb();
1465                iue->remote_token = crq->IU_data_ptr;
1466                iue->iu_len = len;
1467                pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n",
1468                         be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
1469                break;
1470        case H_PERMISSION:
1471                if (connection_broken(vscsi))
1472                        ibmvscsis_post_disconnect(vscsi,
1473                                                  ERR_DISCONNECT_RECONNECT,
1474                                                  (RESPONSE_Q_DOWN |
1475                                                   CLIENT_FAILED));
1476                else
1477                        ibmvscsis_post_disconnect(vscsi,
1478                                                  ERR_DISCONNECT_RECONNECT, 0);
1479
1480                dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1481                        rc);
1482                break;
1483        case H_DEST_PARM:
1484        case H_SOURCE_PARM:
1485        default:
1486                dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1487                        rc);
1488                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1489                break;
1490        }
1491
1492        return rc;
1493}
1494
1495/**
1496 * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram
1497 * @vscsi:      Pointer to our adapter structure
1498 * @iue:        Information Unit containing the Adapter Info MAD request
1499 *
1500 * EXECUTION ENVIRONMENT:
1501 *      Interrupt adapter lock is held
1502 */
1503static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1504                                   struct iu_entry *iue)
1505{
1506        struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info;
1507        struct mad_adapter_info_data *info;
1508        uint flag_bits = 0;
1509        dma_addr_t token;
1510        long rc;
1511
1512        mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1513
1514        if (be16_to_cpu(mad->common.length) > sizeof(*info)) {
1515                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1516                return 0;
1517        }
1518
1519        info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
1520                                  GFP_ATOMIC);
1521        if (!info) {
1522                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1523                        iue->target);
1524                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1525                return 0;
1526        }
1527
1528        /* Get remote info */
1529        rc = h_copy_rdma(be16_to_cpu(mad->common.length),
1530                         vscsi->dds.window[REMOTE].liobn,
1531                         be64_to_cpu(mad->buffer),
1532                         vscsi->dds.window[LOCAL].liobn, token);
1533
1534        if (rc != H_SUCCESS) {
1535                if (rc == H_PERMISSION) {
1536                        if (connection_broken(vscsi))
1537                                flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1538                }
1539                pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n",
1540                        rc);
1541                pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
1542                         be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
1543                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1544                                          flag_bits);
1545                goto free_dma;
1546        }
1547
1548        /*
1549         * Copy client info, but ignore partition number, which we
1550         * already got from phyp - unless we failed to get it from
1551         * phyp (e.g. if we're running on a p5 system).
1552         */
1553        if (vscsi->client_data.partition_number == 0)
1554                vscsi->client_data.partition_number =
1555                        be32_to_cpu(info->partition_number);
1556        strncpy(vscsi->client_data.srp_version, info->srp_version,
1557                sizeof(vscsi->client_data.srp_version));
1558        strncpy(vscsi->client_data.partition_name, info->partition_name,
1559                sizeof(vscsi->client_data.partition_name));
1560        vscsi->client_data.mad_version = be32_to_cpu(info->mad_version);
1561        vscsi->client_data.os_type = be32_to_cpu(info->os_type);
1562
1563        /* Copy our info */
1564        strncpy(info->srp_version, SRP_VERSION,
1565                sizeof(info->srp_version));
1566        strncpy(info->partition_name, vscsi->dds.partition_name,
1567                sizeof(info->partition_name));
1568        info->partition_number = cpu_to_be32(vscsi->dds.partition_num);
1569        info->mad_version = cpu_to_be32(MAD_VERSION_1);
1570        info->os_type = cpu_to_be32(LINUX);
1571        memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
1572        info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
1573
1574        dma_wmb();
1575        rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
1576                         token, vscsi->dds.window[REMOTE].liobn,
1577                         be64_to_cpu(mad->buffer));
1578        switch (rc) {
1579        case H_SUCCESS:
1580                break;
1581
1582        case H_SOURCE_PARM:
1583        case H_DEST_PARM:
1584        case H_PERMISSION:
1585                if (connection_broken(vscsi))
1586                        flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1587        default:
1588                dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
1589                        rc);
1590                ibmvscsis_post_disconnect(vscsi,
1591                                          ERR_DISCONNECT_RECONNECT,
1592                                          flag_bits);
1593                break;
1594        }
1595
1596free_dma:
1597        dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
1598        pr_debug("Leaving adapter_info, rc %ld\n", rc);
1599
1600        return rc;
1601}
1602
1603/**
1604 * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram
1605 * @vscsi:      Pointer to our adapter structure
1606 * @iue:        Information Unit containing the Capabilities MAD request
1607 *
1608 * NOTE: if you return an error from this routine you must be
1609 * disconnecting or you will cause a hang
1610 *
1611 * EXECUTION ENVIRONMENT:
1612 *      Interrupt called with adapter lock held
1613 */
1614static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1615{
1616        struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities;
1617        struct capabilities *cap;
1618        struct mad_capability_common *common;
1619        dma_addr_t token;
1620        u16 olen, len, status, min_len, cap_len;
1621        u32 flag;
1622        uint flag_bits = 0;
1623        long rc = 0;
1624
1625        olen = be16_to_cpu(mad->common.length);
1626        /*
1627         * struct capabilities hardcodes a couple capabilities after the
1628         * header, but the capabilities can actually be in any order.
1629         */
1630        min_len = offsetof(struct capabilities, migration);
1631        if ((olen < min_len) || (olen > PAGE_SIZE)) {
1632                pr_warn("cap_mad: invalid len %d\n", olen);
1633                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1634                return 0;
1635        }
1636
1637        cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
1638                                 GFP_ATOMIC);
1639        if (!cap) {
1640                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1641                        iue->target);
1642                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1643                return 0;
1644        }
1645        rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn,
1646                         be64_to_cpu(mad->buffer),
1647                         vscsi->dds.window[LOCAL].liobn, token);
1648        if (rc == H_SUCCESS) {
1649                strncpy(cap->name, dev_name(&vscsi->dma_dev->dev),
1650                        SRP_MAX_LOC_LEN);
1651
1652                len = olen - min_len;
1653                status = VIOSRP_MAD_SUCCESS;
1654                common = (struct mad_capability_common *)&cap->migration;
1655
1656                while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
1657                        pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n",
1658                                 len, be32_to_cpu(common->cap_type),
1659                                 be16_to_cpu(common->length));
1660
1661                        cap_len = be16_to_cpu(common->length);
1662                        if (cap_len > len) {
1663                                dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n");
1664                                status = VIOSRP_MAD_FAILED;
1665                                break;
1666                        }
1667
1668                        if (cap_len == 0) {
1669                                dev_err(&vscsi->dev, "cap_mad: cap len is 0\n");
1670                                status = VIOSRP_MAD_FAILED;
1671                                break;
1672                        }
1673
1674                        switch (common->cap_type) {
1675                        default:
1676                                pr_debug("cap_mad: unsupported capability\n");
1677                                common->server_support = 0;
1678                                flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
1679                                cap->flags &= ~flag;
1680                                break;
1681                        }
1682
1683                        len = len - cap_len;
1684                        common = (struct mad_capability_common *)
1685                                ((char *)common + cap_len);
1686                }
1687
1688                mad->common.status = cpu_to_be16(status);
1689
1690                dma_wmb();
1691                rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token,
1692                                 vscsi->dds.window[REMOTE].liobn,
1693                                 be64_to_cpu(mad->buffer));
1694
1695                if (rc != H_SUCCESS) {
1696                        pr_debug("cap_mad: failed to copy to client, rc %ld\n",
1697                                 rc);
1698
1699                        if (rc == H_PERMISSION) {
1700                                if (connection_broken(vscsi))
1701                                        flag_bits = (RESPONSE_Q_DOWN |
1702                                                     CLIENT_FAILED);
1703                        }
1704
1705                        pr_warn("cap_mad: error copying data to client, rc %ld\n",
1706                                rc);
1707                        ibmvscsis_post_disconnect(vscsi,
1708                                                  ERR_DISCONNECT_RECONNECT,
1709                                                  flag_bits);
1710                }
1711        }
1712
1713        dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
1714
1715        pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n",
1716                 rc, vscsi->client_cap);
1717
1718        return rc;
1719}
1720
1721/**
1722 * ibmvscsis_process_mad() - Service a MAnagement Data gram
1723 * @vscsi:      Pointer to our adapter structure
1724 * @iue:        Information Unit containing the MAD request
1725 *
1726 * Must be called with interrupt lock held.
1727 */
1728static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1729{
1730        struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1731        struct viosrp_empty_iu *empty;
1732        long rc = ADAPT_SUCCESS;
1733
1734        switch (be32_to_cpu(mad->type)) {
1735        case VIOSRP_EMPTY_IU_TYPE:
1736                empty = &vio_iu(iue)->mad.empty_iu;
1737                vscsi->empty_iu_id = be64_to_cpu(empty->buffer);
1738                vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag);
1739                mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1740                break;
1741        case VIOSRP_ADAPTER_INFO_TYPE:
1742                rc = ibmvscsis_adapter_info(vscsi, iue);
1743                break;
1744        case VIOSRP_CAPABILITIES_TYPE:
1745                rc = ibmvscsis_cap_mad(vscsi, iue);
1746                break;
1747        case VIOSRP_ENABLE_FAST_FAIL:
1748                if (vscsi->state == CONNECTED) {
1749                        vscsi->fast_fail = true;
1750                        mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1751                } else {
1752                        pr_warn("fast fail mad sent after login\n");
1753                        mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
1754                }
1755                break;
1756        default:
1757                mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED);
1758                break;
1759        }
1760
1761        return rc;
1762}
1763
1764/**
1765 * srp_snd_msg_failed() - Handle an error when sending a response
1766 * @vscsi:      Pointer to our adapter structure
1767 * @rc:         The return code from the h_send_crq command
1768 *
1769 * Must be called with interrupt lock held.
1770 */
1771static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
1772{
1773        ktime_t kt;
1774
1775        if (rc != H_DROPPED) {
1776                ibmvscsis_free_cmd_qs(vscsi);
1777
1778                if (rc == H_CLOSED)
1779                        vscsi->flags |= CLIENT_FAILED;
1780
1781                /* don't flag the same problem multiple times */
1782                if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1783                        vscsi->flags |= RESPONSE_Q_DOWN;
1784                        if (!(vscsi->state & (ERR_DISCONNECT |
1785                                              ERR_DISCONNECT_RECONNECT |
1786                                              ERR_DISCONNECTED | UNDEFINED))) {
1787                                dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n",
1788                                        vscsi->state, vscsi->flags, rc);
1789                        }
1790                        ibmvscsis_post_disconnect(vscsi,
1791                                                  ERR_DISCONNECT_RECONNECT, 0);
1792                }
1793                return;
1794        }
1795
1796        /*
1797         * The response queue is full.
1798         * If the server is processing SRP requests, i.e.
1799         * the client has successfully done an
1800         * SRP_LOGIN, then it will wait forever for room in
1801         * the queue.  However if the system admin
1802         * is attempting to unconfigure the server then one
1803         * or more children will be in a state where
1804         * they are being removed. So if there is even one
1805         * child being removed then the driver assumes
1806         * the system admin is attempting to break the
1807         * connection with the client and MAX_TIMER_POPS
1808         * is honored.
1809         */
1810        if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
1811            (vscsi->state == SRP_PROCESSING)) {
1812                pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
1813                         vscsi->flags, (int)vscsi->rsp_q_timer.started,
1814                         vscsi->rsp_q_timer.timer_pops);
1815
1816                /*
1817                 * Check if the timer is running; if it
1818                 * is not then start it up.
1819                 */
1820                if (!vscsi->rsp_q_timer.started) {
1821                        if (vscsi->rsp_q_timer.timer_pops <
1822                            MAX_TIMER_POPS) {
1823                                kt = WAIT_NANO_SECONDS;
1824                        } else {
1825                                /*
1826                                 * slide the timeslice if the maximum
1827                                 * timer pops have already happened
1828                                 */
1829                                kt = ktime_set(WAIT_SECONDS, 0);
1830                        }
1831
1832                        vscsi->rsp_q_timer.started = true;
1833                        hrtimer_start(&vscsi->rsp_q_timer.timer, kt,
1834                                      HRTIMER_MODE_REL);
1835                }
1836        } else {
1837                /*
1838                 * TBD: Do we need to worry about this? Need to get
1839                 *      remove working.
1840                 */
1841                /*
1842                 * waited a long time and it appears the system admin
1843                 * is bring this driver down
1844                 */
1845                vscsi->flags |= RESPONSE_Q_DOWN;
1846                ibmvscsis_free_cmd_qs(vscsi);
1847                /*
1848                 * if the driver is already attempting to disconnect
1849                 * from the client and has already logged an error
1850                 * trace this event but don't put it in the error log
1851                 */
1852                if (!(vscsi->state & (ERR_DISCONNECT |
1853                                      ERR_DISCONNECT_RECONNECT |
1854                                      ERR_DISCONNECTED | UNDEFINED))) {
1855                        dev_err(&vscsi->dev, "client crq full too long\n");
1856                        ibmvscsis_post_disconnect(vscsi,
1857                                                  ERR_DISCONNECT_RECONNECT,
1858                                                  0);
1859                }
1860        }
1861}
1862
1863/**
1864 * ibmvscsis_send_messages() - Send a Response
1865 * @vscsi:      Pointer to our adapter structure
1866 *
1867 * Send a response, first checking the waiting queue. Responses are
1868 * sent in order they are received. If the response cannot be sent,
1869 * because the client queue is full, it stays on the waiting queue.
1870 *
1871 * PRECONDITION:
1872 *      Called with interrupt lock held
1873 */
1874static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1875{
1876        u64 msg_hi = 0;
1877        /* note do not attempt to access the IU_data_ptr with this pointer
1878         * it is not valid
1879         */
1880        struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
1881        struct ibmvscsis_cmd *cmd, *nxt;
1882        struct iu_entry *iue;
1883        long rc = ADAPT_SUCCESS;
1884        bool retry = false;
1885
1886        if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1887                do {
1888                        retry = false;
1889                        list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp,
1890                                                 list) {
1891                                /*
1892                                 * Check to make sure abort cmd gets processed
1893                                 * prior to the abort tmr cmd
1894                                 */
1895                                if (cmd->flags & DELAY_SEND)
1896                                        continue;
1897
1898                                if (cmd->abort_cmd) {
1899                                        retry = true;
1900                                        cmd->abort_cmd->flags &= ~(DELAY_SEND);
1901                                        cmd->abort_cmd = NULL;
1902                                }
1903
1904                                /*
1905                                 * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and
1906                                 * the case where LIO issued a
1907                                 * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST
1908                                 * case then we dont send a response, since it
1909                                 * was already done.
1910                                 */
1911                                if (cmd->se_cmd.transport_state & CMD_T_ABORTED &&
1912                                    !(cmd->se_cmd.transport_state & CMD_T_TAS)) {
1913                                        list_del(&cmd->list);
1914                                        ibmvscsis_free_cmd_resources(vscsi,
1915                                                                     cmd);
1916                                        /*
1917                                         * With a successfully aborted op
1918                                         * through LIO we want to increment the
1919                                         * the vscsi credit so that when we dont
1920                                         * send a rsp to the original scsi abort
1921                                         * op (h_send_crq), but the tm rsp to
1922                                         * the abort is sent, the credit is
1923                                         * correctly sent with the abort tm rsp.
1924                                         * We would need 1 for the abort tm rsp
1925                                         * and 1 credit for the aborted scsi op.
1926                                         * Thus we need to increment here.
1927                                         * Also we want to increment the credit
1928                                         * here because we want to make sure
1929                                         * cmd is actually released first
1930                                         * otherwise the client will think it
1931                                         * it can send a new cmd, and we could
1932                                         * find ourselves short of cmd elements.
1933                                         */
1934                                        vscsi->credit += 1;
1935                                } else {
1936                                        iue = cmd->iue;
1937
1938                                        crq->valid = VALID_CMD_RESP_EL;
1939                                        crq->format = cmd->rsp.format;
1940
1941                                        if (cmd->flags & CMD_FAST_FAIL)
1942                                                crq->status = VIOSRP_ADAPTER_FAIL;
1943
1944                                        crq->IU_length = cpu_to_be16(cmd->rsp.len);
1945
1946                                        rc = h_send_crq(vscsi->dma_dev->unit_address,
1947                                                        be64_to_cpu(msg_hi),
1948                                                        be64_to_cpu(cmd->rsp.tag));
1949
1950                                        pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
1951                                                 cmd, be64_to_cpu(cmd->rsp.tag), rc);
1952
1953                                        /* if all ok free up the command
1954                                         * element resources
1955                                         */
1956                                        if (rc == H_SUCCESS) {
1957                                                /* some movement has occurred */
1958                                                vscsi->rsp_q_timer.timer_pops = 0;
1959                                                list_del(&cmd->list);
1960
1961                                                ibmvscsis_free_cmd_resources(vscsi,
1962                                                                             cmd);
1963                                        } else {
1964                                                srp_snd_msg_failed(vscsi, rc);
1965                                                break;
1966                                        }
1967                                }
1968                        }
1969                } while (retry);
1970
1971                if (!rc) {
1972                        /*
1973                         * The timer could pop with the queue empty.  If
1974                         * this happens, rc will always indicate a
1975                         * success; clear the pop count.
1976                         */
1977                        vscsi->rsp_q_timer.timer_pops = 0;
1978                }
1979        } else {
1980                ibmvscsis_free_cmd_qs(vscsi);
1981        }
1982}
1983
1984/* Called with intr lock held */
1985static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
1986                                    struct ibmvscsis_cmd *cmd,
1987                                    struct viosrp_crq *crq)
1988{
1989        struct iu_entry *iue = cmd->iue;
1990        struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1991        uint flag_bits = 0;
1992        long rc;
1993
1994        dma_wmb();
1995        rc = h_copy_rdma(sizeof(struct mad_common),
1996                         vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
1997                         vscsi->dds.window[REMOTE].liobn,
1998                         be64_to_cpu(crq->IU_data_ptr));
1999        if (!rc) {
2000                cmd->rsp.format = VIOSRP_MAD_FORMAT;
2001                cmd->rsp.len = sizeof(struct mad_common);
2002                cmd->rsp.tag = mad->tag;
2003                list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2004                ibmvscsis_send_messages(vscsi);
2005        } else {
2006                pr_debug("Error sending mad response, rc %ld\n", rc);
2007                if (rc == H_PERMISSION) {
2008                        if (connection_broken(vscsi))
2009                                flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
2010                }
2011                dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n",
2012                        rc);
2013
2014                ibmvscsis_free_cmd_resources(vscsi, cmd);
2015                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
2016                                          flag_bits);
2017        }
2018}
2019
2020/**
2021 * ibmvscsis_mad() - Service a MAnagement Data gram.
2022 * @vscsi:      Pointer to our adapter structure
2023 * @crq:        Pointer to the CRQ entry containing the MAD request
2024 *
2025 * EXECUTION ENVIRONMENT:
2026 *      Interrupt, called with adapter lock held
2027 */
2028static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
2029{
2030        struct iu_entry *iue;
2031        struct ibmvscsis_cmd *cmd;
2032        struct mad_common *mad;
2033        long rc = ADAPT_SUCCESS;
2034
2035        switch (vscsi->state) {
2036                /*
2037                 * We have not exchanged Init Msgs yet, so this MAD was sent
2038                 * before the last Transport Event; client will not be
2039                 * expecting a response.
2040                 */
2041        case WAIT_CONNECTION:
2042                pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n",
2043                         vscsi->flags);
2044                return ADAPT_SUCCESS;
2045
2046        case SRP_PROCESSING:
2047        case CONNECTED:
2048                break;
2049
2050                /*
2051                 * We should never get here while we're in these states.
2052                 * Just log an error and get out.
2053                 */
2054        case UNCONFIGURING:
2055        case WAIT_IDLE:
2056        case ERR_DISCONNECT:
2057        case ERR_DISCONNECT_RECONNECT:
2058        default:
2059                dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n",
2060                        vscsi->state);
2061                return ADAPT_SUCCESS;
2062        }
2063
2064        cmd = ibmvscsis_get_free_cmd(vscsi);
2065        if (!cmd) {
2066                dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n",
2067                        vscsi->debit);
2068                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2069                return ERROR;
2070        }
2071        iue = cmd->iue;
2072        cmd->type = ADAPTER_MAD;
2073
2074        rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
2075        if (!rc) {
2076                mad = (struct mad_common *)&vio_iu(iue)->mad;
2077
2078                pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
2079
2080                rc = ibmvscsis_process_mad(vscsi, iue);
2081
2082                pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
2083                         rc);
2084
2085                if (!rc)
2086                        ibmvscsis_send_mad_resp(vscsi, cmd, crq);
2087        } else {
2088                ibmvscsis_free_cmd_resources(vscsi, cmd);
2089        }
2090
2091        pr_debug("Leaving mad, rc %ld\n", rc);
2092        return rc;
2093}
2094
2095/**
2096 * ibmvscsis_login_rsp() - Create/copy a login response notice to the client
2097 * @vscsi:      Pointer to our adapter structure
2098 * @cmd:        Pointer to the command for the SRP Login request
2099 *
2100 * EXECUTION ENVIRONMENT:
2101 *      Interrupt, interrupt lock held
2102 */
2103static long ibmvscsis_login_rsp(struct scsi_info *vscsi,
2104                                struct ibmvscsis_cmd *cmd)
2105{
2106        struct iu_entry *iue = cmd->iue;
2107        struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp;
2108        struct format_code *fmt;
2109        uint flag_bits = 0;
2110        long rc = ADAPT_SUCCESS;
2111
2112        memset(rsp, 0, sizeof(struct srp_login_rsp));
2113
2114        rsp->opcode = SRP_LOGIN_RSP;
2115        rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit);
2116        rsp->tag = cmd->rsp.tag;
2117        rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
2118        rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
2119        fmt = (struct format_code *)&rsp->buf_fmt;
2120        fmt->buffers = SUPPORTED_FORMATS;
2121        vscsi->credit = 0;
2122
2123        cmd->rsp.len = sizeof(struct srp_login_rsp);
2124
2125        dma_wmb();
2126        rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
2127                         iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
2128                         be64_to_cpu(iue->remote_token));
2129
2130        switch (rc) {
2131        case H_SUCCESS:
2132                break;
2133
2134        case H_PERMISSION:
2135                if (connection_broken(vscsi))
2136                        flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
2137                dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
2138                        rc);
2139                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
2140                                          flag_bits);
2141                break;
2142        case H_SOURCE_PARM:
2143        case H_DEST_PARM:
2144        default:
2145                dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
2146                        rc);
2147                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2148                break;
2149        }
2150
2151        return rc;
2152}
2153
2154/**
2155 * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client
2156 * @vscsi:      Pointer to our adapter structure
2157 * @cmd:        Pointer to the command for the SRP Login request
2158 * @reason:     The reason the SRP Login is being rejected, per SRP protocol
2159 *
2160 * EXECUTION ENVIRONMENT:
2161 *      Interrupt, interrupt lock held
2162 */
2163static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
2164                                    struct ibmvscsis_cmd *cmd, u32 reason)
2165{
2166        struct iu_entry *iue = cmd->iue;
2167        struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej;
2168        struct format_code *fmt;
2169        uint flag_bits = 0;
2170        long rc = ADAPT_SUCCESS;
2171
2172        memset(rej, 0, sizeof(*rej));
2173
2174        rej->opcode = SRP_LOGIN_REJ;
2175        rej->reason = cpu_to_be32(reason);
2176        rej->tag = cmd->rsp.tag;
2177        fmt = (struct format_code *)&rej->buf_fmt;
2178        fmt->buffers = SUPPORTED_FORMATS;
2179
2180        cmd->rsp.len = sizeof(*rej);
2181
2182        dma_wmb();
2183        rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
2184                         iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
2185                         be64_to_cpu(iue->remote_token));
2186
2187        switch (rc) {
2188        case H_SUCCESS:
2189                break;
2190        case H_PERMISSION:
2191                if (connection_broken(vscsi))
2192                        flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
2193                dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
2194                        rc);
2195                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
2196                                          flag_bits);
2197                break;
2198        case H_SOURCE_PARM:
2199        case H_DEST_PARM:
2200        default:
2201                dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
2202                        rc);
2203                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2204                break;
2205        }
2206
2207        return rc;
2208}
2209
2210static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
2211{
2212        char *name = tport->tport_name;
2213        struct ibmvscsis_nexus *nexus;
2214        int rc;
2215
2216        if (tport->ibmv_nexus) {
2217                pr_debug("tport->ibmv_nexus already exists\n");
2218                return 0;
2219        }
2220
2221        nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
2222        if (!nexus) {
2223                pr_err("Unable to allocate struct ibmvscsis_nexus\n");
2224                return -ENOMEM;
2225        }
2226
2227        nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0,
2228                                              TARGET_PROT_NORMAL, name, nexus,
2229                                              NULL);
2230        if (IS_ERR(nexus->se_sess)) {
2231                rc = PTR_ERR(nexus->se_sess);
2232                goto transport_init_fail;
2233        }
2234
2235        tport->ibmv_nexus = nexus;
2236
2237        return 0;
2238
2239transport_init_fail:
2240        kfree(nexus);
2241        return rc;
2242}
2243
2244static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
2245{
2246        struct se_session *se_sess;
2247        struct ibmvscsis_nexus *nexus;
2248
2249        nexus = tport->ibmv_nexus;
2250        if (!nexus)
2251                return -ENODEV;
2252
2253        se_sess = nexus->se_sess;
2254        if (!se_sess)
2255                return -ENODEV;
2256
2257        /*
2258         * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
2259         */
2260        target_wait_for_sess_cmds(se_sess);
2261        transport_deregister_session_configfs(se_sess);
2262        transport_deregister_session(se_sess);
2263        tport->ibmv_nexus = NULL;
2264        kfree(nexus);
2265
2266        return 0;
2267}
2268
2269/**
2270 * ibmvscsis_srp_login() - Process an SRP Login Request
2271 * @vscsi:      Pointer to our adapter structure
2272 * @cmd:        Command element to use to process the SRP Login request
2273 * @crq:        Pointer to CRQ entry containing the SRP Login request
2274 *
2275 * EXECUTION ENVIRONMENT:
2276 *      Interrupt, called with interrupt lock held
2277 */
2278static long ibmvscsis_srp_login(struct scsi_info *vscsi,
2279                                struct ibmvscsis_cmd *cmd,
2280                                struct viosrp_crq *crq)
2281{
2282        struct iu_entry *iue = cmd->iue;
2283        struct srp_login_req *req = &vio_iu(iue)->srp.login_req;
2284        struct port_id {
2285                __be64 id_extension;
2286                __be64 io_guid;
2287        } *iport, *tport;
2288        struct format_code *fmt;
2289        u32 reason = 0x0;
2290        long rc = ADAPT_SUCCESS;
2291
2292        iport = (struct port_id *)req->initiator_port_id;
2293        tport = (struct port_id *)req->target_port_id;
2294        fmt = (struct format_code *)&req->req_buf_fmt;
2295        if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN)
2296                reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE;
2297        else if (be32_to_cpu(req->req_it_iu_len) < 64)
2298                reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
2299        else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) ||
2300                 (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1)))
2301                reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL;
2302        else if (req->req_flags & SRP_MULTICHAN_MULTI)
2303                reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
2304        else if (fmt->buffers & (~SUPPORTED_FORMATS))
2305                reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
2306        else if ((fmt->buffers & SUPPORTED_FORMATS) == 0)
2307                reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
2308
2309        if (vscsi->state == SRP_PROCESSING)
2310                reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED;
2311
2312        rc = ibmvscsis_make_nexus(&vscsi->tport);
2313        if (rc)
2314                reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
2315
2316        cmd->rsp.format = VIOSRP_SRP_FORMAT;
2317        cmd->rsp.tag = req->tag;
2318
2319        pr_debug("srp_login: reason 0x%x\n", reason);
2320
2321        if (reason)
2322                rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
2323        else
2324                rc = ibmvscsis_login_rsp(vscsi, cmd);
2325
2326        if (!rc) {
2327                if (!reason)
2328                        vscsi->state = SRP_PROCESSING;
2329
2330                list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2331                ibmvscsis_send_messages(vscsi);
2332        } else {
2333                ibmvscsis_free_cmd_resources(vscsi, cmd);
2334        }
2335
2336        pr_debug("Leaving srp_login, rc %ld\n", rc);
2337        return rc;
2338}
2339
2340/**
2341 * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus
2342 * @vscsi:      Pointer to our adapter structure
2343 * @cmd:        Command element to use to process the Implicit Logout request
2344 * @crq:        Pointer to CRQ entry containing the Implicit Logout request
2345 *
2346 * Do the logic to close the I_T nexus.  This function may not
2347 * behave to specification.
2348 *
2349 * EXECUTION ENVIRONMENT:
2350 *      Interrupt, interrupt lock held
2351 */
2352static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi,
2353                                   struct ibmvscsis_cmd *cmd,
2354                                   struct viosrp_crq *crq)
2355{
2356        struct iu_entry *iue = cmd->iue;
2357        struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout;
2358        long rc = ADAPT_SUCCESS;
2359
2360        if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) ||
2361            !list_empty(&vscsi->waiting_rsp)) {
2362                dev_err(&vscsi->dev, "i_logout: outstanding work\n");
2363                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2364        } else {
2365                cmd->rsp.format = SRP_FORMAT;
2366                cmd->rsp.tag = log_out->tag;
2367                cmd->rsp.len = sizeof(struct mad_common);
2368                list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2369                ibmvscsis_send_messages(vscsi);
2370
2371                ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
2372        }
2373
2374        return rc;
2375}
2376
2377/* Called with intr lock held */
2378static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
2379{
2380        struct ibmvscsis_cmd *cmd;
2381        struct iu_entry *iue;
2382        struct srp_cmd *srp;
2383        struct srp_tsk_mgmt *tsk;
2384        long rc;
2385
2386        if (vscsi->request_limit - vscsi->debit <= 0) {
2387                /* Client has exceeded request limit */
2388                dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n",
2389                        vscsi->request_limit, vscsi->debit);
2390                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2391                return;
2392        }
2393
2394        cmd = ibmvscsis_get_free_cmd(vscsi);
2395        if (!cmd) {
2396                dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n",
2397                        vscsi->debit);
2398                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2399                return;
2400        }
2401        iue = cmd->iue;
2402        srp = &vio_iu(iue)->srp.cmd;
2403
2404        rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
2405        if (rc) {
2406                ibmvscsis_free_cmd_resources(vscsi, cmd);
2407                return;
2408        }
2409
2410        if (vscsi->state == SRP_PROCESSING) {
2411                switch (srp->opcode) {
2412                case SRP_LOGIN_REQ:
2413                        rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2414                        break;
2415
2416                case SRP_TSK_MGMT:
2417                        tsk = &vio_iu(iue)->srp.tsk_mgmt;
2418                        pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
2419                                 tsk->tag);
2420                        cmd->rsp.tag = tsk->tag;
2421                        vscsi->debit += 1;
2422                        cmd->type = TASK_MANAGEMENT;
2423                        list_add_tail(&cmd->list, &vscsi->schedule_q);
2424                        queue_work(vscsi->work_q, &cmd->work);
2425                        break;
2426
2427                case SRP_CMD:
2428                        pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
2429                                 srp->tag);
2430                        cmd->rsp.tag = srp->tag;
2431                        vscsi->debit += 1;
2432                        cmd->type = SCSI_CDB;
2433                        /*
2434                         * We want to keep track of work waiting for
2435                         * the workqueue.
2436                         */
2437                        list_add_tail(&cmd->list, &vscsi->schedule_q);
2438                        queue_work(vscsi->work_q, &cmd->work);
2439                        break;
2440
2441                case SRP_I_LOGOUT:
2442                        rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq);
2443                        break;
2444
2445                case SRP_CRED_RSP:
2446                case SRP_AER_RSP:
2447                default:
2448                        ibmvscsis_free_cmd_resources(vscsi, cmd);
2449                        dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n",
2450                                (uint)srp->opcode);
2451                        ibmvscsis_post_disconnect(vscsi,
2452                                                  ERR_DISCONNECT_RECONNECT, 0);
2453                        break;
2454                }
2455        } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) {
2456                rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2457        } else {
2458                ibmvscsis_free_cmd_resources(vscsi, cmd);
2459                dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n",
2460                        vscsi->state);
2461                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2462        }
2463}
2464
2465/**
2466 * ibmvscsis_ping_response() - Respond to a ping request
2467 * @vscsi:      Pointer to our adapter structure
2468 *
2469 * Let the client know that the server is alive and waiting on
2470 * its native I/O stack.
2471 * If any type of error occurs from the call to queue a ping
2472 * response then the client is either not accepting or receiving
2473 * interrupts.  Disconnect with an error.
2474 *
2475 * EXECUTION ENVIRONMENT:
2476 *      Interrupt, interrupt lock held
2477 */
2478static long ibmvscsis_ping_response(struct scsi_info *vscsi)
2479{
2480        struct viosrp_crq *crq;
2481        u64 buffer[2] = { 0, 0 };
2482        long rc;
2483
2484        crq = (struct viosrp_crq *)&buffer;
2485        crq->valid = VALID_CMD_RESP_EL;
2486        crq->format = (u8)MESSAGE_IN_CRQ;
2487        crq->status = PING_RESPONSE;
2488
2489        rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
2490                        cpu_to_be64(buffer[MSG_LOW]));
2491
2492        switch (rc) {
2493        case H_SUCCESS:
2494                break;
2495        case H_CLOSED:
2496                vscsi->flags |= CLIENT_FAILED;
2497        case H_DROPPED:
2498                vscsi->flags |= RESPONSE_Q_DOWN;
2499        case H_REMOTE_PARM:
2500                dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
2501                        rc);
2502                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2503                break;
2504        default:
2505                dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n",
2506                        rc);
2507                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2508                break;
2509        }
2510
2511        return rc;
2512}
2513
2514/**
2515 * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
2516 * @vscsi:      Pointer to our adapter structure
2517 * @crq:        Pointer to CRQ element containing the SRP request
2518 *
2519 * This function will return success if the command queue element is valid
2520 * and the srp iu or MAD request it pointed to was also valid.  That does
2521 * not mean that an error was not returned to the client.
2522 *
2523 * EXECUTION ENVIRONMENT:
2524 *      Interrupt, intr lock held
2525 */
2526static long ibmvscsis_parse_command(struct scsi_info *vscsi,
2527                                    struct viosrp_crq *crq)
2528{
2529        long rc = ADAPT_SUCCESS;
2530
2531        switch (crq->valid) {
2532        case VALID_CMD_RESP_EL:
2533                switch (crq->format) {
2534                case OS400_FORMAT:
2535                case AIX_FORMAT:
2536                case LINUX_FORMAT:
2537                case MAD_FORMAT:
2538                        if (vscsi->flags & PROCESSING_MAD) {
2539                                rc = ERROR;
2540                                dev_err(&vscsi->dev, "parse_command: already processing mad\n");
2541                                ibmvscsis_post_disconnect(vscsi,
2542                                                       ERR_DISCONNECT_RECONNECT,
2543                                                       0);
2544                        } else {
2545                                vscsi->flags |= PROCESSING_MAD;
2546                                rc = ibmvscsis_mad(vscsi, crq);
2547                        }
2548                        break;
2549
2550                case SRP_FORMAT:
2551                        ibmvscsis_srp_cmd(vscsi, crq);
2552                        break;
2553
2554                case MESSAGE_IN_CRQ:
2555                        if (crq->status == PING)
2556                                ibmvscsis_ping_response(vscsi);
2557                        break;
2558
2559                default:
2560                        dev_err(&vscsi->dev, "parse_command: invalid format %d\n",
2561                                (uint)crq->format);
2562                        ibmvscsis_post_disconnect(vscsi,
2563                                                  ERR_DISCONNECT_RECONNECT, 0);
2564                        break;
2565                }
2566                break;
2567
2568        case VALID_TRANS_EVENT:
2569                rc = ibmvscsis_trans_event(vscsi, crq);
2570                break;
2571
2572        case VALID_INIT_MSG:
2573                rc = ibmvscsis_init_msg(vscsi, crq);
2574                break;
2575
2576        default:
2577                dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n",
2578                        (uint)crq->valid);
2579                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2580                break;
2581        }
2582
2583        /*
2584         * Return only what the interrupt handler cares
2585         * about. Most errors we keep right on trucking.
2586         */
2587        rc = vscsi->flags & SCHEDULE_DISCONNECT;
2588
2589        return rc;
2590}
2591
2592static int read_dma_window(struct scsi_info *vscsi)
2593{
2594        struct vio_dev *vdev = vscsi->dma_dev;
2595        const __be32 *dma_window;
2596        const __be32 *prop;
2597
2598        /* TODO Using of_parse_dma_window would be better, but it doesn't give
2599         * a way to read multiple windows without already knowing the size of
2600         * a window or the number of windows.
2601         */
2602        dma_window = (const __be32 *)vio_get_attribute(vdev,
2603                                                       "ibm,my-dma-window",
2604                                                       NULL);
2605        if (!dma_window) {
2606                pr_err("Couldn't find ibm,my-dma-window property\n");
2607                return -1;
2608        }
2609
2610        vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window);
2611        dma_window++;
2612
2613        prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2614                                                 NULL);
2615        if (!prop) {
2616                pr_warn("Couldn't find ibm,#dma-address-cells property\n");
2617                dma_window++;
2618        } else {
2619                dma_window += be32_to_cpu(*prop);
2620        }
2621
2622        prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2623                                                 NULL);
2624        if (!prop) {
2625                pr_warn("Couldn't find ibm,#dma-size-cells property\n");
2626                dma_window++;
2627        } else {
2628                dma_window += be32_to_cpu(*prop);
2629        }
2630
2631        /* dma_window should point to the second window now */
2632        vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window);
2633
2634        return 0;
2635}
2636
2637static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name)
2638{
2639        struct ibmvscsis_tport *tport = NULL;
2640        struct vio_dev *vdev;
2641        struct scsi_info *vscsi;
2642
2643        spin_lock_bh(&ibmvscsis_dev_lock);
2644        list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) {
2645                vdev = vscsi->dma_dev;
2646                if (!strcmp(dev_name(&vdev->dev), name)) {
2647                        tport = &vscsi->tport;
2648                        break;
2649                }
2650        }
2651        spin_unlock_bh(&ibmvscsis_dev_lock);
2652
2653        return tport;
2654}
2655
2656/**
2657 * ibmvscsis_parse_cmd() - Parse SRP Command
2658 * @vscsi:      Pointer to our adapter structure
2659 * @cmd:        Pointer to command element with SRP command
2660 *
2661 * Parse the srp command; if it is valid then submit it to tcm.
2662 * Note: The return code does not reflect the status of the SCSI CDB.
2663 *
2664 * EXECUTION ENVIRONMENT:
2665 *      Process level
2666 */
2667static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
2668                                struct ibmvscsis_cmd *cmd)
2669{
2670        struct iu_entry *iue = cmd->iue;
2671        struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
2672        struct ibmvscsis_nexus *nexus;
2673        u64 data_len = 0;
2674        enum dma_data_direction dir;
2675        int attr = 0;
2676        int rc = 0;
2677
2678        nexus = vscsi->tport.ibmv_nexus;
2679        /*
2680         * additional length in bytes.  Note that the SRP spec says that
2681         * additional length is in 4-byte words, but technically the
2682         * additional length field is only the upper 6 bits of the byte.
2683         * The lower 2 bits are reserved.  If the lower 2 bits are 0 (as
2684         * all reserved fields should be), then interpreting the byte as
2685         * an int will yield the length in bytes.
2686         */
2687        if (srp->add_cdb_len & 0x03) {
2688                dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n");
2689                spin_lock_bh(&vscsi->intr_lock);
2690                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2691                ibmvscsis_free_cmd_resources(vscsi, cmd);
2692                spin_unlock_bh(&vscsi->intr_lock);
2693                return;
2694        }
2695
2696        if (srp_get_desc_table(srp, &dir, &data_len)) {
2697                dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
2698                        srp->tag);
2699                goto fail;
2700        }
2701
2702        cmd->rsp.sol_not = srp->sol_not;
2703
2704        switch (srp->task_attr) {
2705        case SRP_SIMPLE_TASK:
2706                attr = TCM_SIMPLE_TAG;
2707                break;
2708        case SRP_ORDERED_TASK:
2709                attr = TCM_ORDERED_TAG;
2710                break;
2711        case SRP_HEAD_TASK:
2712                attr = TCM_HEAD_TAG;
2713                break;
2714        case SRP_ACA_TASK:
2715                attr = TCM_ACA_TAG;
2716                break;
2717        default:
2718                dev_err(&vscsi->dev, "Invalid task attribute %d\n",
2719                        srp->task_attr);
2720                goto fail;
2721        }
2722
2723        cmd->se_cmd.tag = be64_to_cpu(srp->tag);
2724
2725        spin_lock_bh(&vscsi->intr_lock);
2726        list_add_tail(&cmd->list, &vscsi->active_q);
2727        spin_unlock_bh(&vscsi->intr_lock);
2728
2729        srp->lun.scsi_lun[0] &= 0x3f;
2730
2731        rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
2732                               cmd->sense_buf, scsilun_to_int(&srp->lun),
2733                               data_len, attr, dir, 0);
2734        if (rc) {
2735                dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
2736                spin_lock_bh(&vscsi->intr_lock);
2737                list_del(&cmd->list);
2738                ibmvscsis_free_cmd_resources(vscsi, cmd);
2739                spin_unlock_bh(&vscsi->intr_lock);
2740                goto fail;
2741        }
2742        return;
2743
2744fail:
2745        spin_lock_bh(&vscsi->intr_lock);
2746        ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2747        spin_unlock_bh(&vscsi->intr_lock);
2748}
2749
2750/**
2751 * ibmvscsis_parse_task() - Parse SRP Task Management Request
2752 * @vscsi:      Pointer to our adapter structure
2753 * @cmd:        Pointer to command element with SRP task management request
2754 *
2755 * Parse the srp task management request; if it is valid then submit it to tcm.
2756 * Note: The return code does not reflect the status of the task management
2757 * request.
2758 *
2759 * EXECUTION ENVIRONMENT:
2760 *      Processor level
2761 */
2762static void ibmvscsis_parse_task(struct scsi_info *vscsi,
2763                                 struct ibmvscsis_cmd *cmd)
2764{
2765        struct iu_entry *iue = cmd->iue;
2766        struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
2767        int tcm_type;
2768        u64 tag_to_abort = 0;
2769        int rc = 0;
2770        struct ibmvscsis_nexus *nexus;
2771
2772        nexus = vscsi->tport.ibmv_nexus;
2773
2774        cmd->rsp.sol_not = srp_tsk->sol_not;
2775
2776        switch (srp_tsk->tsk_mgmt_func) {
2777        case SRP_TSK_ABORT_TASK:
2778                tcm_type = TMR_ABORT_TASK;
2779                tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
2780                break;
2781        case SRP_TSK_ABORT_TASK_SET:
2782                tcm_type = TMR_ABORT_TASK_SET;
2783                break;
2784        case SRP_TSK_CLEAR_TASK_SET:
2785                tcm_type = TMR_CLEAR_TASK_SET;
2786                break;
2787        case SRP_TSK_LUN_RESET:
2788                tcm_type = TMR_LUN_RESET;
2789                break;
2790        case SRP_TSK_CLEAR_ACA:
2791                tcm_type = TMR_CLEAR_ACA;
2792                break;
2793        default:
2794                dev_err(&vscsi->dev, "unknown task mgmt func %d\n",
2795                        srp_tsk->tsk_mgmt_func);
2796                cmd->se_cmd.se_tmr_req->response =
2797                        TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
2798                rc = -1;
2799                break;
2800        }
2801
2802        if (!rc) {
2803                cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag);
2804
2805                spin_lock_bh(&vscsi->intr_lock);
2806                list_add_tail(&cmd->list, &vscsi->active_q);
2807                spin_unlock_bh(&vscsi->intr_lock);
2808
2809                srp_tsk->lun.scsi_lun[0] &= 0x3f;
2810
2811                pr_debug("calling submit_tmr, func %d\n",
2812                         srp_tsk->tsk_mgmt_func);
2813                rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
2814                                       scsilun_to_int(&srp_tsk->lun), srp_tsk,
2815                                       tcm_type, GFP_KERNEL, tag_to_abort, 0);
2816                if (rc) {
2817                        dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
2818                                rc);
2819                        spin_lock_bh(&vscsi->intr_lock);
2820                        list_del(&cmd->list);
2821                        spin_unlock_bh(&vscsi->intr_lock);
2822                        cmd->se_cmd.se_tmr_req->response =
2823                                TMR_FUNCTION_REJECTED;
2824                }
2825        }
2826
2827        if (rc)
2828                transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0);
2829}
2830
2831static void ibmvscsis_scheduler(struct work_struct *work)
2832{
2833        struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd,
2834                                                 work);
2835        struct scsi_info *vscsi = cmd->adapter;
2836
2837        spin_lock_bh(&vscsi->intr_lock);
2838
2839        /* Remove from schedule_q */
2840        list_del(&cmd->list);
2841
2842        /* Don't submit cmd if we're disconnecting */
2843        if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) {
2844                ibmvscsis_free_cmd_resources(vscsi, cmd);
2845
2846                /* ibmvscsis_disconnect might be waiting for us */
2847                if (list_empty(&vscsi->active_q) &&
2848                    list_empty(&vscsi->schedule_q) &&
2849                    (vscsi->flags & WAIT_FOR_IDLE)) {
2850                        vscsi->flags &= ~WAIT_FOR_IDLE;
2851                        complete(&vscsi->wait_idle);
2852                }
2853
2854                spin_unlock_bh(&vscsi->intr_lock);
2855                return;
2856        }
2857
2858        spin_unlock_bh(&vscsi->intr_lock);
2859
2860        switch (cmd->type) {
2861        case SCSI_CDB:
2862                ibmvscsis_parse_cmd(vscsi, cmd);
2863                break;
2864        case TASK_MANAGEMENT:
2865                ibmvscsis_parse_task(vscsi, cmd);
2866                break;
2867        default:
2868                dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n",
2869                        cmd->type);
2870                spin_lock_bh(&vscsi->intr_lock);
2871                ibmvscsis_free_cmd_resources(vscsi, cmd);
2872                spin_unlock_bh(&vscsi->intr_lock);
2873                break;
2874        }
2875}
2876
2877static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num)
2878{
2879        struct ibmvscsis_cmd *cmd;
2880        int i;
2881
2882        INIT_LIST_HEAD(&vscsi->free_cmd);
2883        vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd),
2884                                  GFP_KERNEL);
2885        if (!vscsi->cmd_pool)
2886                return -ENOMEM;
2887
2888        for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
2889             i++, cmd++) {
2890                cmd->abort_cmd = NULL;
2891                cmd->adapter = vscsi;
2892                INIT_WORK(&cmd->work, ibmvscsis_scheduler);
2893                list_add_tail(&cmd->list, &vscsi->free_cmd);
2894        }
2895
2896        return 0;
2897}
2898
2899static void ibmvscsis_free_cmds(struct scsi_info *vscsi)
2900{
2901        kfree(vscsi->cmd_pool);
2902        vscsi->cmd_pool = NULL;
2903        INIT_LIST_HEAD(&vscsi->free_cmd);
2904}
2905
2906/**
2907 * ibmvscsis_service_wait_q() - Service Waiting Queue
2908 * @timer:      Pointer to timer which has expired
2909 *
2910 * This routine is called when the timer pops to service the waiting
2911 * queue. Elements on the queue have completed, their responses have been
2912 * copied to the client, but the client's response queue was full so
2913 * the queue message could not be sent. The routine grabs the proper locks
2914 * and calls send messages.
2915 *
2916 * EXECUTION ENVIRONMENT:
2917 *      called at interrupt level
2918 */
2919static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer)
2920{
2921        struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer);
2922        struct scsi_info *vscsi = container_of(p_timer, struct scsi_info,
2923                                               rsp_q_timer);
2924
2925        spin_lock_bh(&vscsi->intr_lock);
2926        p_timer->timer_pops += 1;
2927        p_timer->started = false;
2928        ibmvscsis_send_messages(vscsi);
2929        spin_unlock_bh(&vscsi->intr_lock);
2930
2931        return HRTIMER_NORESTART;
2932}
2933
2934static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
2935{
2936        struct timer_cb *p_timer;
2937
2938        p_timer = &vscsi->rsp_q_timer;
2939        hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2940
2941        p_timer->timer.function = ibmvscsis_service_wait_q;
2942        p_timer->started = false;
2943        p_timer->timer_pops = 0;
2944
2945        return ADAPT_SUCCESS;
2946}
2947
2948static void ibmvscsis_freetimer(struct scsi_info *vscsi)
2949{
2950        struct timer_cb *p_timer;
2951
2952        p_timer = &vscsi->rsp_q_timer;
2953
2954        (void)hrtimer_cancel(&p_timer->timer);
2955
2956        p_timer->started = false;
2957        p_timer->timer_pops = 0;
2958}
2959
2960static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
2961{
2962        struct scsi_info *vscsi = data;
2963
2964        vio_disable_interrupts(vscsi->dma_dev);
2965        tasklet_schedule(&vscsi->work_task);
2966
2967        return IRQ_HANDLED;
2968}
2969
2970/**
2971 * ibmvscsis_enable_change_state() - Set new state based on enabled status
2972 * @vscsi:      Pointer to our adapter structure
2973 *
2974 * This function determines our new state now that we are enabled.  This
2975 * may involve sending an Init Complete message to the client.
2976 *
2977 * Must be called with interrupt lock held.
2978 */
2979static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
2980{
2981        int bytes;
2982        long rc = ADAPT_SUCCESS;
2983
2984        bytes = vscsi->cmd_q.size * PAGE_SIZE;
2985        rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
2986        if (rc == H_CLOSED || rc == H_SUCCESS) {
2987                vscsi->state = WAIT_CONNECTION;
2988                rc = ibmvscsis_establish_new_q(vscsi);
2989        }
2990
2991        if (rc != ADAPT_SUCCESS) {
2992                vscsi->state = ERR_DISCONNECTED;
2993                vscsi->flags |= RESPONSE_Q_DOWN;
2994        }
2995
2996        return rc;
2997}
2998
2999/**
3000 * ibmvscsis_create_command_q() - Create Command Queue
3001 * @vscsi:      Pointer to our adapter structure
3002 * @num_cmds:   Currently unused.  In the future, may be used to determine
3003 *              the size of the CRQ.
3004 *
3005 * Allocates memory for command queue maps remote memory into an ioba
3006 * initializes the command response queue
3007 *
3008 * EXECUTION ENVIRONMENT:
3009 *      Process level only
3010 */
3011static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
3012{
3013        int pages;
3014        struct vio_dev *vdev = vscsi->dma_dev;
3015
3016        /* We might support multiple pages in the future, but just 1 for now */
3017        pages = 1;
3018
3019        vscsi->cmd_q.size = pages;
3020
3021        vscsi->cmd_q.base_addr =
3022                (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
3023        if (!vscsi->cmd_q.base_addr)
3024                return -ENOMEM;
3025
3026        vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1;
3027
3028        vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev,
3029                                                vscsi->cmd_q.base_addr,
3030                                                PAGE_SIZE, DMA_BIDIRECTIONAL);
3031        if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) {
3032                free_page((unsigned long)vscsi->cmd_q.base_addr);
3033                return -ENOMEM;
3034        }
3035
3036        return 0;
3037}
3038
3039/**
3040 * ibmvscsis_destroy_command_q - Destroy Command Queue
3041 * @vscsi:      Pointer to our adapter structure
3042 *
3043 * Releases memory for command queue and unmaps mapped remote memory.
3044 *
3045 * EXECUTION ENVIRONMENT:
3046 *      Process level only
3047 */
3048static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi)
3049{
3050        dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token,
3051                         PAGE_SIZE, DMA_BIDIRECTIONAL);
3052        free_page((unsigned long)vscsi->cmd_q.base_addr);
3053        vscsi->cmd_q.base_addr = NULL;
3054        vscsi->state = NO_QUEUE;
3055}
3056
3057static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi,
3058                              struct ibmvscsis_cmd *cmd)
3059{
3060        struct iu_entry *iue = cmd->iue;
3061        struct se_cmd *se_cmd = &cmd->se_cmd;
3062        struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
3063        struct scsi_sense_hdr sshdr;
3064        u8 rc = se_cmd->scsi_status;
3065
3066        if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb)))
3067                if (scsi_normalize_sense(se_cmd->sense_buffer,
3068                                         se_cmd->scsi_sense_length, &sshdr))
3069                        if (sshdr.sense_key == HARDWARE_ERROR &&
3070                            (se_cmd->residual_count == 0 ||
3071                             se_cmd->residual_count == se_cmd->data_length)) {
3072                                rc = NO_SENSE;
3073                                cmd->flags |= CMD_FAST_FAIL;
3074                        }
3075
3076        return rc;
3077}
3078
3079/**
3080 * srp_build_response() - Build an SRP response buffer
3081 * @vscsi:      Pointer to our adapter structure
3082 * @cmd:        Pointer to command for which to send the response
3083 * @len_p:      Where to return the length of the IU response sent.  This
3084 *              is needed to construct the CRQ response.
3085 *
3086 * Build the SRP response buffer and copy it to the client's memory space.
3087 */
3088static long srp_build_response(struct scsi_info *vscsi,
3089                               struct ibmvscsis_cmd *cmd, uint *len_p)
3090{
3091        struct iu_entry *iue = cmd->iue;
3092        struct se_cmd *se_cmd = &cmd->se_cmd;
3093        struct srp_rsp *rsp;
3094        uint len;
3095        u32 rsp_code;
3096        char *data;
3097        u32 *tsk_status;
3098        long rc = ADAPT_SUCCESS;
3099
3100        spin_lock_bh(&vscsi->intr_lock);
3101
3102        rsp = &vio_iu(iue)->srp.rsp;
3103        len = sizeof(*rsp);
3104        memset(rsp, 0, len);
3105        data = rsp->data;
3106
3107        rsp->opcode = SRP_RSP;
3108
3109        rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
3110        rsp->tag = cmd->rsp.tag;
3111        rsp->flags = 0;
3112
3113        if (cmd->type == SCSI_CDB) {
3114                rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
3115                if (rsp->status) {
3116                        pr_debug("build_resp: cmd %p, scsi status %d\n", cmd,
3117                                 (int)rsp->status);
3118                        ibmvscsis_determine_resid(se_cmd, rsp);
3119                        if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
3120                                rsp->sense_data_len =
3121                                        cpu_to_be32(se_cmd->scsi_sense_length);
3122                                rsp->flags |= SRP_RSP_FLAG_SNSVALID;
3123                                len += se_cmd->scsi_sense_length;
3124                                memcpy(data, se_cmd->sense_buffer,
3125                                       se_cmd->scsi_sense_length);
3126                        }
3127                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3128                                UCSOLNT_RESP_SHIFT;
3129                } else if (cmd->flags & CMD_FAST_FAIL) {
3130                        pr_debug("build_resp: cmd %p, fast fail\n", cmd);
3131                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3132                                UCSOLNT_RESP_SHIFT;
3133                } else {
3134                        rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3135                                SCSOLNT_RESP_SHIFT;
3136                }
3137        } else {
3138                /* this is task management */
3139                rsp->status = 0;
3140                rsp->resp_data_len = cpu_to_be32(4);
3141                rsp->flags |= SRP_RSP_FLAG_RSPVALID;
3142
3143                switch (se_cmd->se_tmr_req->response) {
3144                case TMR_FUNCTION_COMPLETE:
3145                case TMR_TASK_DOES_NOT_EXIST:
3146                        rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE;
3147                        rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3148                                SCSOLNT_RESP_SHIFT;
3149                        break;
3150                case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3151                case TMR_LUN_DOES_NOT_EXIST:
3152                        rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED;
3153                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3154                                UCSOLNT_RESP_SHIFT;
3155                        break;
3156                case TMR_FUNCTION_FAILED:
3157                case TMR_FUNCTION_REJECTED:
3158                default:
3159                        rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED;
3160                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3161                                UCSOLNT_RESP_SHIFT;
3162                        break;
3163                }
3164
3165                tsk_status = (u32 *)data;
3166                *tsk_status = cpu_to_be32(rsp_code);
3167                data = (char *)(tsk_status + 1);
3168                len += 4;
3169        }
3170
3171        dma_wmb();
3172        rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
3173                         vscsi->dds.window[REMOTE].liobn,
3174                         be64_to_cpu(iue->remote_token));
3175
3176        switch (rc) {
3177        case H_SUCCESS:
3178                vscsi->credit = 0;
3179                *len_p = len;
3180                break;
3181        case H_PERMISSION:
3182                if (connection_broken(vscsi))
3183                        vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED;
3184
3185                dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n",
3186                        rc, vscsi->flags, vscsi->state);
3187                break;
3188        case H_SOURCE_PARM:
3189        case H_DEST_PARM:
3190        default:
3191                dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n",
3192                        rc);
3193                break;
3194        }
3195
3196        spin_unlock_bh(&vscsi->intr_lock);
3197
3198        return rc;
3199}
3200
3201static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
3202                          int nsg, struct srp_direct_buf *md, int nmd,
3203                          enum dma_data_direction dir, unsigned int bytes)
3204{
3205        struct iu_entry *iue = cmd->iue;
3206        struct srp_target *target = iue->target;
3207        struct scsi_info *vscsi = target->ldata;
3208        struct scatterlist *sgp;
3209        dma_addr_t client_ioba, server_ioba;
3210        ulong buf_len;
3211        ulong client_len, server_len;
3212        int md_idx;
3213        long tx_len;
3214        long rc = 0;
3215
3216        if (bytes == 0)
3217                return 0;
3218
3219        sgp = sg;
3220        client_len = 0;
3221        server_len = 0;
3222        md_idx = 0;
3223        tx_len = bytes;
3224
3225        do {
3226                if (client_len == 0) {
3227                        if (md_idx >= nmd) {
3228                                dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n");
3229                                rc = -EIO;
3230                                break;
3231                        }
3232                        client_ioba = be64_to_cpu(md[md_idx].va);
3233                        client_len = be32_to_cpu(md[md_idx].len);
3234                }
3235                if (server_len == 0) {
3236                        if (!sgp) {
3237                                dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n");
3238                                rc = -EIO;
3239                                break;
3240                        }
3241                        server_ioba = sg_dma_address(sgp);
3242                        server_len = sg_dma_len(sgp);
3243                }
3244
3245                buf_len = tx_len;
3246
3247                if (buf_len > client_len)
3248                        buf_len = client_len;
3249
3250                if (buf_len > server_len)
3251                        buf_len = server_len;
3252
3253                if (buf_len > max_vdma_size)
3254                        buf_len = max_vdma_size;
3255
3256                if (dir == DMA_TO_DEVICE) {
3257                        /* read from client */
3258                        rc = h_copy_rdma(buf_len,
3259                                         vscsi->dds.window[REMOTE].liobn,
3260                                         client_ioba,
3261                                         vscsi->dds.window[LOCAL].liobn,
3262                                         server_ioba);
3263                } else {
3264                        /* The h_copy_rdma will cause phyp, running in another
3265                         * partition, to read memory, so we need to make sure
3266                         * the data has been written out, hence these syncs.
3267                         */
3268                        /* ensure that everything is in memory */
3269                        isync();
3270                        /* ensure that memory has been made visible */
3271                        dma_wmb();
3272                        rc = h_copy_rdma(buf_len,
3273                                         vscsi->dds.window[LOCAL].liobn,
3274                                         server_ioba,
3275                                         vscsi->dds.window[REMOTE].liobn,
3276                                         client_ioba);
3277                }
3278                switch (rc) {
3279                case H_SUCCESS:
3280                        break;
3281                case H_PERMISSION:
3282                case H_SOURCE_PARM:
3283                case H_DEST_PARM:
3284                        if (connection_broken(vscsi)) {
3285                                spin_lock_bh(&vscsi->intr_lock);
3286                                vscsi->flags |=
3287                                        (RESPONSE_Q_DOWN | CLIENT_FAILED);
3288                                spin_unlock_bh(&vscsi->intr_lock);
3289                        }
3290                        dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n",
3291                                rc);
3292                        break;
3293
3294                default:
3295                        dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n",
3296                                rc);
3297                        break;
3298                }
3299
3300                if (!rc) {
3301                        tx_len -= buf_len;
3302                        if (tx_len) {
3303                                client_len -= buf_len;
3304                                if (client_len == 0)
3305                                        md_idx++;
3306                                else
3307                                        client_ioba += buf_len;
3308
3309                                server_len -= buf_len;
3310                                if (server_len == 0)
3311                                        sgp = sg_next(sgp);
3312                                else
3313                                        server_ioba += buf_len;
3314                        } else {
3315                                break;
3316                        }
3317                }
3318        } while (!rc);
3319
3320        return rc;
3321}
3322
3323/**
3324 * ibmvscsis_handle_crq() - Handle CRQ
3325 * @data:       Pointer to our adapter structure
3326 *
3327 * Read the command elements from the command queue and copy the payloads
3328 * associated with the command elements to local memory and execute the
3329 * SRP requests.
3330 *
3331 * Note: this is an edge triggered interrupt. It can not be shared.
3332 */
3333static void ibmvscsis_handle_crq(unsigned long data)
3334{
3335        struct scsi_info *vscsi = (struct scsi_info *)data;
3336        struct viosrp_crq *crq;
3337        long rc;
3338        bool ack = true;
3339        volatile u8 valid;
3340
3341        spin_lock_bh(&vscsi->intr_lock);
3342
3343        pr_debug("got interrupt\n");
3344
3345        /*
3346         * if we are in a path where we are waiting for all pending commands
3347         * to complete because we received a transport event and anything in
3348         * the command queue is for a new connection, do nothing
3349         */
3350        if (TARGET_STOP(vscsi)) {
3351                vio_enable_interrupts(vscsi->dma_dev);
3352
3353                pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n",
3354                         vscsi->flags, vscsi->state);
3355                spin_unlock_bh(&vscsi->intr_lock);
3356                return;
3357        }
3358
3359        rc = vscsi->flags & SCHEDULE_DISCONNECT;
3360        crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3361        valid = crq->valid;
3362        dma_rmb();
3363
3364        while (valid) {
3365                /*
3366                 * These are edege triggered interrupts. After dropping out of
3367                 * the while loop, the code must check for work since an
3368                 * interrupt could be lost, and an elment be left on the queue,
3369                 * hence the label.
3370                 */
3371cmd_work:
3372                vscsi->cmd_q.index =
3373                        (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
3374
3375                if (!rc) {
3376                        rc = ibmvscsis_parse_command(vscsi, crq);
3377                } else {
3378                        if ((uint)crq->valid == VALID_TRANS_EVENT) {
3379                                /*
3380                                 * must service the transport layer events even
3381                                 * in an error state, dont break out until all
3382                                 * the consecutive transport events have been
3383                                 * processed
3384                                 */
3385                                rc = ibmvscsis_trans_event(vscsi, crq);
3386                        } else if (vscsi->flags & TRANS_EVENT) {
3387                                /*
3388                                 * if a transport event has occurred leave
3389                                 * everything but transport events on the queue
3390                                 *
3391                                 * need to decrement the queue index so we can
3392                                 * look at the element again
3393                                 */
3394                                if (vscsi->cmd_q.index)
3395                                        vscsi->cmd_q.index -= 1;
3396                                else
3397                                        /*
3398                                         * index is at 0 it just wrapped.
3399                                         * have it index last element in q
3400                                         */
3401                                        vscsi->cmd_q.index = vscsi->cmd_q.mask;
3402                                break;
3403                        }
3404                }
3405
3406                crq->valid = INVALIDATE_CMD_RESP_EL;
3407
3408                crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3409                valid = crq->valid;
3410                dma_rmb();
3411        }
3412
3413        if (!rc) {
3414                if (ack) {
3415                        vio_enable_interrupts(vscsi->dma_dev);
3416                        ack = false;
3417                        pr_debug("handle_crq, reenabling interrupts\n");
3418                }
3419                valid = crq->valid;
3420                dma_rmb();
3421                if (valid)
3422                        goto cmd_work;
3423        } else {
3424                pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
3425                         vscsi->flags, vscsi->state, vscsi->cmd_q.index);
3426        }
3427
3428        pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
3429                 (int)list_empty(&vscsi->schedule_q), vscsi->flags,
3430                 vscsi->state);
3431
3432        spin_unlock_bh(&vscsi->intr_lock);
3433}
3434
3435static int ibmvscsis_probe(struct vio_dev *vdev,
3436                           const struct vio_device_id *id)
3437{
3438        struct scsi_info *vscsi;
3439        int rc = 0;
3440        long hrc = 0;
3441        char wq_name[24];
3442
3443        vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
3444        if (!vscsi) {
3445                rc = -ENOMEM;
3446                pr_err("probe: allocation of adapter failed\n");
3447                return rc;
3448        }
3449
3450        vscsi->dma_dev = vdev;
3451        vscsi->dev = vdev->dev;
3452        INIT_LIST_HEAD(&vscsi->schedule_q);
3453        INIT_LIST_HEAD(&vscsi->waiting_rsp);
3454        INIT_LIST_HEAD(&vscsi->active_q);
3455
3456        snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
3457                 dev_name(&vdev->dev));
3458
3459        pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
3460
3461        rc = read_dma_window(vscsi);
3462        if (rc)
3463                goto free_adapter;
3464        pr_debug("Probe: liobn 0x%x, riobn 0x%x\n",
3465                 vscsi->dds.window[LOCAL].liobn,
3466                 vscsi->dds.window[REMOTE].liobn);
3467
3468        strcpy(vscsi->eye, "VSCSI ");
3469        strncat(vscsi->eye, vdev->name, MAX_EYE);
3470
3471        vscsi->dds.unit_id = vdev->unit_address;
3472        strncpy(vscsi->dds.partition_name, partition_name,
3473                sizeof(vscsi->dds.partition_name));
3474        vscsi->dds.partition_num = partition_number;
3475
3476        spin_lock_bh(&ibmvscsis_dev_lock);
3477        list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
3478        spin_unlock_bh(&ibmvscsis_dev_lock);
3479
3480        /*
3481         * TBD: How do we determine # of cmds to request?  Do we know how
3482         * many "children" we have?
3483         */
3484        vscsi->request_limit = INITIAL_SRP_LIMIT;
3485        rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit,
3486                              SRP_MAX_IU_LEN);
3487        if (rc)
3488                goto rem_list;
3489
3490        vscsi->target.ldata = vscsi;
3491
3492        rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit);
3493        if (rc) {
3494                dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n",
3495                        rc, vscsi->request_limit);
3496                goto free_target;
3497        }
3498
3499        /*
3500         * Note: the lock is used in freeing timers, so must initialize
3501         * first so that ordering in case of error is correct.
3502         */
3503        spin_lock_init(&vscsi->intr_lock);
3504
3505        rc = ibmvscsis_alloctimer(vscsi);
3506        if (rc) {
3507                dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc);
3508                goto free_cmds;
3509        }
3510
3511        rc = ibmvscsis_create_command_q(vscsi, 256);
3512        if (rc) {
3513                dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n",
3514                        rc);
3515                goto free_timer;
3516        }
3517
3518        vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
3519        if (!vscsi->map_buf) {
3520                rc = -ENOMEM;
3521                dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n");
3522                goto destroy_queue;
3523        }
3524
3525        vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
3526                                         DMA_BIDIRECTIONAL);
3527        if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
3528                rc = -ENOMEM;
3529                dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
3530                goto free_buf;
3531        }
3532
3533        hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
3534                       (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
3535                       0);
3536        if (hrc == H_SUCCESS)
3537                vscsi->client_data.partition_number =
3538                        be64_to_cpu(*(u64 *)vscsi->map_buf);
3539        /*
3540         * We expect the VIOCTL to fail if we're configured as "any
3541         * client can connect" and the client isn't activated yet.
3542         * We'll make the call again when he sends an init msg.
3543         */
3544        pr_debug("probe hrc %ld, client partition num %d\n",
3545                 hrc, vscsi->client_data.partition_number);
3546
3547        tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
3548                     (unsigned long)vscsi);
3549
3550        init_completion(&vscsi->wait_idle);
3551        init_completion(&vscsi->unconfig);
3552
3553        snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
3554        vscsi->work_q = create_workqueue(wq_name);
3555        if (!vscsi->work_q) {
3556                rc = -ENOMEM;
3557                dev_err(&vscsi->dev, "create_workqueue failed\n");
3558                goto unmap_buf;
3559        }
3560
3561        rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi);
3562        if (rc) {
3563                rc = -EPERM;
3564                dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc);
3565                goto destroy_WQ;
3566        }
3567
3568        vscsi->state = WAIT_ENABLED;
3569
3570        dev_set_drvdata(&vdev->dev, vscsi);
3571
3572        return 0;
3573
3574destroy_WQ:
3575        destroy_workqueue(vscsi->work_q);
3576unmap_buf:
3577        dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3578                         DMA_BIDIRECTIONAL);
3579free_buf:
3580        kfree(vscsi->map_buf);
3581destroy_queue:
3582        tasklet_kill(&vscsi->work_task);
3583        ibmvscsis_unregister_command_q(vscsi);
3584        ibmvscsis_destroy_command_q(vscsi);
3585free_timer:
3586        ibmvscsis_freetimer(vscsi);
3587free_cmds:
3588        ibmvscsis_free_cmds(vscsi);
3589free_target:
3590        srp_target_free(&vscsi->target);
3591rem_list:
3592        spin_lock_bh(&ibmvscsis_dev_lock);
3593        list_del(&vscsi->list);
3594        spin_unlock_bh(&ibmvscsis_dev_lock);
3595free_adapter:
3596        kfree(vscsi);
3597
3598        return rc;
3599}
3600
3601static int ibmvscsis_remove(struct vio_dev *vdev)
3602{
3603        struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
3604
3605        pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
3606
3607        spin_lock_bh(&vscsi->intr_lock);
3608        ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
3609        vscsi->flags |= CFG_SLEEPING;
3610        spin_unlock_bh(&vscsi->intr_lock);
3611        wait_for_completion(&vscsi->unconfig);
3612
3613        vio_disable_interrupts(vdev);
3614        free_irq(vdev->irq, vscsi);
3615        destroy_workqueue(vscsi->work_q);
3616        dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3617                         DMA_BIDIRECTIONAL);
3618        kfree(vscsi->map_buf);
3619        tasklet_kill(&vscsi->work_task);
3620        ibmvscsis_destroy_command_q(vscsi);
3621        ibmvscsis_freetimer(vscsi);
3622        ibmvscsis_free_cmds(vscsi);
3623        srp_target_free(&vscsi->target);
3624        spin_lock_bh(&ibmvscsis_dev_lock);
3625        list_del(&vscsi->list);
3626        spin_unlock_bh(&ibmvscsis_dev_lock);
3627        kfree(vscsi);
3628
3629        return 0;
3630}
3631
3632static ssize_t system_id_show(struct device *dev,
3633                              struct device_attribute *attr, char *buf)
3634{
3635        return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
3636}
3637
3638static ssize_t partition_number_show(struct device *dev,
3639                                     struct device_attribute *attr, char *buf)
3640{
3641        return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
3642}
3643
3644static ssize_t unit_address_show(struct device *dev,
3645                                 struct device_attribute *attr, char *buf)
3646{
3647        struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev);
3648
3649        return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address);
3650}
3651
3652static int ibmvscsis_get_system_info(void)
3653{
3654        struct device_node *rootdn, *vdevdn;
3655        const char *id, *model, *name;
3656        const uint *num;
3657
3658        rootdn = of_find_node_by_path("/");
3659        if (!rootdn)
3660                return -ENOENT;
3661
3662        model = of_get_property(rootdn, "model", NULL);
3663        id = of_get_property(rootdn, "system-id", NULL);
3664        if (model && id)
3665                snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
3666
3667        name = of_get_property(rootdn, "ibm,partition-name", NULL);
3668        if (name)
3669                strncpy(partition_name, name, sizeof(partition_name));
3670
3671        num = of_get_property(rootdn, "ibm,partition-no", NULL);
3672        if (num)
3673                partition_number = of_read_number(num, 1);
3674
3675        of_node_put(rootdn);
3676
3677        vdevdn = of_find_node_by_path("/vdevice");
3678        if (vdevdn) {
3679                const uint *mvds;
3680
3681                mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size",
3682                                       NULL);
3683                if (mvds)
3684                        max_vdma_size = *mvds;
3685                of_node_put(vdevdn);
3686        }
3687
3688        return 0;
3689}
3690
3691static char *ibmvscsis_get_fabric_name(void)
3692{
3693        return "ibmvscsis";
3694}
3695
3696static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
3697{
3698        struct ibmvscsis_tport *tport =
3699                container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3700
3701        return tport->tport_name;
3702}
3703
3704static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg)
3705{
3706        struct ibmvscsis_tport *tport =
3707                container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3708
3709        return tport->tport_tpgt;
3710}
3711
3712static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg)
3713{
3714        return 1;
3715}
3716
3717static int ibmvscsis_check_true(struct se_portal_group *se_tpg)
3718{
3719        return 1;
3720}
3721
3722static int ibmvscsis_check_false(struct se_portal_group *se_tpg)
3723{
3724        return 0;
3725}
3726
3727static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg)
3728{
3729        return 1;
3730}
3731
3732static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd)
3733{
3734        return target_put_sess_cmd(se_cmd);
3735}
3736
3737static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
3738{
3739        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3740                                                 se_cmd);
3741        struct scsi_info *vscsi = cmd->adapter;
3742
3743        spin_lock_bh(&vscsi->intr_lock);
3744        /* Remove from active_q */
3745        list_move_tail(&cmd->list, &vscsi->waiting_rsp);
3746        ibmvscsis_send_messages(vscsi);
3747        spin_unlock_bh(&vscsi->intr_lock);
3748}
3749
3750static u32 ibmvscsis_sess_get_index(struct se_session *se_sess)
3751{
3752        return 0;
3753}
3754
3755static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3756{
3757        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3758                                                 se_cmd);
3759        struct scsi_info *vscsi = cmd->adapter;
3760        struct iu_entry *iue = cmd->iue;
3761        int rc;
3762
3763        /*
3764         * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success
3765         * since LIO can't do anything about it, and we dont want to
3766         * attempt an srp_transfer_data.
3767         */
3768        if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
3769                pr_err("write_pending failed since: %d\n", vscsi->flags);
3770                return 0;
3771        }
3772
3773        rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
3774                               1, 1);
3775        if (rc) {
3776                pr_err("srp_transfer_data() failed: %d\n", rc);
3777                return -EIO;
3778        }
3779        /*
3780         * We now tell TCM to add this WRITE CDB directly into the TCM storage
3781         * object execution queue.
3782         */
3783        target_execute_cmd(se_cmd);
3784        return 0;
3785}
3786
3787static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd)
3788{
3789        return 0;
3790}
3791
3792static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
3793{
3794}
3795
3796static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd)
3797{
3798        return 0;
3799}
3800
3801static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
3802{
3803        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3804                                                 se_cmd);
3805        struct iu_entry *iue = cmd->iue;
3806        struct scsi_info *vscsi = cmd->adapter;
3807        char *sd;
3808        uint len = 0;
3809        int rc;
3810
3811        rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
3812                               1);
3813        if (rc) {
3814                pr_err("srp_transfer_data failed: %d\n", rc);
3815                sd = se_cmd->sense_buffer;
3816                se_cmd->scsi_sense_length = 18;
3817                memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
3818                /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
3819                scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR,
3820                                        0x08, 0x01);
3821        }
3822
3823        srp_build_response(vscsi, cmd, &len);
3824        cmd->rsp.format = SRP_FORMAT;
3825        cmd->rsp.len = len;
3826
3827        return 0;
3828}
3829
3830static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
3831{
3832        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3833                                                 se_cmd);
3834        struct scsi_info *vscsi = cmd->adapter;
3835        uint len;
3836
3837        pr_debug("queue_status %p\n", se_cmd);
3838
3839        srp_build_response(vscsi, cmd, &len);
3840        cmd->rsp.format = SRP_FORMAT;
3841        cmd->rsp.len = len;
3842
3843        return 0;
3844}
3845
3846static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
3847{
3848        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3849                                                 se_cmd);
3850        struct scsi_info *vscsi = cmd->adapter;
3851        struct ibmvscsis_cmd *cmd_itr;
3852        struct iu_entry *iue = iue = cmd->iue;
3853        struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
3854        u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
3855        uint len;
3856
3857        pr_debug("queue_tm_rsp %p, status %d\n",
3858                 se_cmd, (int)se_cmd->se_tmr_req->response);
3859
3860        if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
3861            cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
3862                spin_lock_bh(&vscsi->intr_lock);
3863                list_for_each_entry(cmd_itr, &vscsi->active_q, list) {
3864                        if (tag_to_abort == cmd_itr->se_cmd.tag) {
3865                                cmd_itr->abort_cmd = cmd;
3866                                cmd->flags |= DELAY_SEND;
3867                                break;
3868                        }
3869                }
3870                spin_unlock_bh(&vscsi->intr_lock);
3871        }
3872
3873        srp_build_response(vscsi, cmd, &len);
3874        cmd->rsp.format = SRP_FORMAT;
3875        cmd->rsp.len = len;
3876}
3877
3878static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
3879{
3880        pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n",
3881                 se_cmd, se_cmd->tag);
3882}
3883
3884static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
3885                                           struct config_group *group,
3886                                           const char *name)
3887{
3888        struct ibmvscsis_tport *tport;
3889
3890        tport = ibmvscsis_lookup_port(name);
3891        if (tport) {
3892                tport->tport_proto_id = SCSI_PROTOCOL_SRP;
3893                pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n",
3894                         name, tport, tport->tport_proto_id);
3895                return &tport->tport_wwn;
3896        }
3897
3898        return ERR_PTR(-EINVAL);
3899}
3900
3901static void ibmvscsis_drop_tport(struct se_wwn *wwn)
3902{
3903        struct ibmvscsis_tport *tport = container_of(wwn,
3904                                                     struct ibmvscsis_tport,
3905                                                     tport_wwn);
3906
3907        pr_debug("drop_tport(%s)\n",
3908                 config_item_name(&tport->tport_wwn.wwn_group.cg_item));
3909}
3910
3911static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
3912                                                  struct config_group *group,
3913                                                  const char *name)
3914{
3915        struct ibmvscsis_tport *tport =
3916                container_of(wwn, struct ibmvscsis_tport, tport_wwn);
3917        u16 tpgt;
3918        int rc;
3919
3920        if (strstr(name, "tpgt_") != name)
3921                return ERR_PTR(-EINVAL);
3922        rc = kstrtou16(name + 5, 0, &tpgt);
3923        if (rc)
3924                return ERR_PTR(rc);
3925        tport->tport_tpgt = tpgt;
3926
3927        tport->releasing = false;
3928
3929        rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
3930                               tport->tport_proto_id);
3931        if (rc)
3932                return ERR_PTR(rc);
3933
3934        return &tport->se_tpg;
3935}
3936
3937static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg)
3938{
3939        struct ibmvscsis_tport *tport = container_of(se_tpg,
3940                                                     struct ibmvscsis_tport,
3941                                                     se_tpg);
3942
3943        tport->releasing = true;
3944        tport->enabled = false;
3945
3946        /*
3947         * Release the virtual I_T Nexus for this ibmvscsis TPG
3948         */
3949        ibmvscsis_drop_nexus(tport);
3950        /*
3951         * Deregister the se_tpg from TCM..
3952         */
3953        core_tpg_deregister(se_tpg);
3954}
3955
3956static ssize_t ibmvscsis_wwn_version_show(struct config_item *item,
3957                                          char *page)
3958{
3959        return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION);
3960}
3961CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version);
3962
3963static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
3964        &ibmvscsis_wwn_attr_version,
3965        NULL,
3966};
3967
3968static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item,
3969                                         char *page)
3970{
3971        struct se_portal_group *se_tpg = to_tpg(item);
3972        struct ibmvscsis_tport *tport = container_of(se_tpg,
3973                                                     struct ibmvscsis_tport,
3974                                                     se_tpg);
3975
3976        return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
3977}
3978
3979static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
3980                                          const char *page, size_t count)
3981{
3982        struct se_portal_group *se_tpg = to_tpg(item);
3983        struct ibmvscsis_tport *tport = container_of(se_tpg,
3984                                                     struct ibmvscsis_tport,
3985                                                     se_tpg);
3986        struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
3987        unsigned long tmp;
3988        int rc;
3989        long lrc;
3990
3991        rc = kstrtoul(page, 0, &tmp);
3992        if (rc < 0) {
3993                pr_err("Unable to extract srpt_tpg_store_enable\n");
3994                return -EINVAL;
3995        }
3996
3997        if ((tmp != 0) && (tmp != 1)) {
3998                pr_err("Illegal value for srpt_tpg_store_enable\n");
3999                return -EINVAL;
4000        }
4001
4002        if (tmp) {
4003                spin_lock_bh(&vscsi->intr_lock);
4004                tport->enabled = true;
4005                lrc = ibmvscsis_enable_change_state(vscsi);
4006                if (lrc)
4007                        pr_err("enable_change_state failed, rc %ld state %d\n",
4008                               lrc, vscsi->state);
4009                spin_unlock_bh(&vscsi->intr_lock);
4010        } else {
4011                spin_lock_bh(&vscsi->intr_lock);
4012                tport->enabled = false;
4013                /* This simulates the server going down */
4014                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
4015                spin_unlock_bh(&vscsi->intr_lock);
4016        }
4017
4018        pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
4019
4020        return count;
4021}
4022CONFIGFS_ATTR(ibmvscsis_tpg_, enable);
4023
4024static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
4025        &ibmvscsis_tpg_attr_enable,
4026        NULL,
4027};
4028
4029static const struct target_core_fabric_ops ibmvscsis_ops = {
4030        .module                         = THIS_MODULE,
4031        .name                           = "ibmvscsis",
4032        .max_data_sg_nents              = MAX_TXU / PAGE_SIZE,
4033        .get_fabric_name                = ibmvscsis_get_fabric_name,
4034        .tpg_get_wwn                    = ibmvscsis_get_fabric_wwn,
4035        .tpg_get_tag                    = ibmvscsis_get_tag,
4036        .tpg_get_default_depth          = ibmvscsis_get_default_depth,
4037        .tpg_check_demo_mode            = ibmvscsis_check_true,
4038        .tpg_check_demo_mode_cache      = ibmvscsis_check_true,
4039        .tpg_check_demo_mode_write_protect = ibmvscsis_check_false,
4040        .tpg_check_prod_mode_write_protect = ibmvscsis_check_false,
4041        .tpg_get_inst_index             = ibmvscsis_tpg_get_inst_index,
4042        .check_stop_free                = ibmvscsis_check_stop_free,
4043        .release_cmd                    = ibmvscsis_release_cmd,
4044        .sess_get_index                 = ibmvscsis_sess_get_index,
4045        .write_pending                  = ibmvscsis_write_pending,
4046        .write_pending_status           = ibmvscsis_write_pending_status,
4047        .set_default_node_attributes    = ibmvscsis_set_default_node_attrs,
4048        .get_cmd_state                  = ibmvscsis_get_cmd_state,
4049        .queue_data_in                  = ibmvscsis_queue_data_in,
4050        .queue_status                   = ibmvscsis_queue_status,
4051        .queue_tm_rsp                   = ibmvscsis_queue_tm_rsp,
4052        .aborted_task                   = ibmvscsis_aborted_task,
4053        /*
4054         * Setup function pointers for logic in target_core_fabric_configfs.c
4055         */
4056        .fabric_make_wwn                = ibmvscsis_make_tport,
4057        .fabric_drop_wwn                = ibmvscsis_drop_tport,
4058        .fabric_make_tpg                = ibmvscsis_make_tpg,
4059        .fabric_drop_tpg                = ibmvscsis_drop_tpg,
4060
4061        .tfc_wwn_attrs                  = ibmvscsis_wwn_attrs,
4062        .tfc_tpg_base_attrs             = ibmvscsis_tpg_attrs,
4063};
4064
4065static void ibmvscsis_dev_release(struct device *dev) {};
4066
4067static struct device_attribute dev_attr_system_id =
4068        __ATTR(system_id, S_IRUGO, system_id_show, NULL);
4069
4070static struct device_attribute dev_attr_partition_number =
4071        __ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
4072
4073static struct device_attribute dev_attr_unit_address =
4074        __ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
4075
4076static struct attribute *ibmvscsis_dev_attrs[] = {
4077        &dev_attr_system_id.attr,
4078        &dev_attr_partition_number.attr,
4079        &dev_attr_unit_address.attr,
4080};
4081ATTRIBUTE_GROUPS(ibmvscsis_dev);
4082
4083static struct class ibmvscsis_class = {
4084        .name           = "ibmvscsis",
4085        .dev_release    = ibmvscsis_dev_release,
4086        .dev_groups     = ibmvscsis_dev_groups,
4087};
4088
4089static struct vio_device_id ibmvscsis_device_table[] = {
4090        { "v-scsi-host", "IBM,v-scsi-host" },
4091        { "", "" }
4092};
4093MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table);
4094
4095static struct vio_driver ibmvscsis_driver = {
4096        .name = "ibmvscsis",
4097        .id_table = ibmvscsis_device_table,
4098        .probe = ibmvscsis_probe,
4099        .remove = ibmvscsis_remove,
4100};
4101
4102/*
4103 * ibmvscsis_init() - Kernel Module initialization
4104 *
4105 * Note: vio_register_driver() registers callback functions, and at least one
4106 * of those callback functions calls TCM - Linux IO Target Subsystem, thus
4107 * the SCSI Target template must be registered before vio_register_driver()
4108 * is called.
4109 */
4110static int __init ibmvscsis_init(void)
4111{
4112        int rc = 0;
4113
4114        rc = ibmvscsis_get_system_info();
4115        if (rc) {
4116                pr_err("rc %d from get_system_info\n", rc);
4117                goto out;
4118        }
4119
4120        rc = class_register(&ibmvscsis_class);
4121        if (rc) {
4122                pr_err("failed class register\n");
4123                goto out;
4124        }
4125
4126        rc = target_register_template(&ibmvscsis_ops);
4127        if (rc) {
4128                pr_err("rc %d from target_register_template\n", rc);
4129                goto unregister_class;
4130        }
4131
4132        rc = vio_register_driver(&ibmvscsis_driver);
4133        if (rc) {
4134                pr_err("rc %d from vio_register_driver\n", rc);
4135                goto unregister_target;
4136        }
4137
4138        return 0;
4139
4140unregister_target:
4141        target_unregister_template(&ibmvscsis_ops);
4142unregister_class:
4143        class_unregister(&ibmvscsis_class);
4144out:
4145        return rc;
4146}
4147
4148static void __exit ibmvscsis_exit(void)
4149{
4150        pr_info("Unregister IBM virtual SCSI host driver\n");
4151        vio_unregister_driver(&ibmvscsis_driver);
4152        target_unregister_template(&ibmvscsis_ops);
4153        class_unregister(&ibmvscsis_class);
4154}
4155
4156MODULE_DESCRIPTION("IBMVSCSIS fabric driver");
4157MODULE_AUTHOR("Bryant G. Ly and Michael Cyr");
4158MODULE_LICENSE("GPL");
4159MODULE_VERSION(IBMVSCSIS_VERSION);
4160module_init(ibmvscsis_init);
4161module_exit(ibmvscsis_exit);
4162