linux/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * IBM Virtual SCSI Target Driver
   3 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
   4 *                         Santiago Leon (santil@us.ibm.com) IBM Corp.
   5 *                         Linda Xie (lxie@us.ibm.com) IBM Corp.
   6 *
   7 * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
   8 * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
   9 *
  10 * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
  11 * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License as published by
  15 * the Free Software Foundation; either version 2 of the License, or
  16 * (at your option) any later version.
  17 *
  18 * This program is distributed in the hope that it will be useful,
  19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  21 * GNU General Public License for more details.
  22 *
  23 ****************************************************************************/
  24
  25#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
  26
  27#include <linux/module.h>
  28#include <linux/kernel.h>
  29#include <linux/slab.h>
  30#include <linux/types.h>
  31#include <linux/list.h>
  32#include <linux/string.h>
  33
  34#include <target/target_core_base.h>
  35#include <target/target_core_fabric.h>
  36
  37#include <asm/hvcall.h>
  38#include <asm/vio.h>
  39
  40#include <scsi/viosrp.h>
  41
  42#include "ibmvscsi_tgt.h"
  43
  44#define IBMVSCSIS_VERSION       "v0.2"
  45
  46#define INITIAL_SRP_LIMIT       800
  47#define DEFAULT_MAX_SECTORS     256
  48
  49static uint max_vdma_size = MAX_H_COPY_RDMA;
  50
  51static char system_id[SYS_ID_NAME_LEN] = "";
  52static char partition_name[PARTITION_NAMELEN] = "UNKNOWN";
  53static uint partition_number = -1;
  54
  55/* Adapter list and lock to control it */
  56static DEFINE_SPINLOCK(ibmvscsis_dev_lock);
  57static LIST_HEAD(ibmvscsis_dev_list);
  58
  59static long ibmvscsis_parse_command(struct scsi_info *vscsi,
  60                                    struct viosrp_crq *crq);
  61
  62static void ibmvscsis_adapter_idle(struct scsi_info *vscsi);
  63
  64static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
  65                                      struct srp_rsp *rsp)
  66{
  67        u32 residual_count = se_cmd->residual_count;
  68
  69        if (!residual_count)
  70                return;
  71
  72        if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
  73                if (se_cmd->data_direction == DMA_TO_DEVICE) {
  74                        /* residual data from an underflow write */
  75                        rsp->flags = SRP_RSP_FLAG_DOUNDER;
  76                        rsp->data_out_res_cnt = cpu_to_be32(residual_count);
  77                } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
  78                        /* residual data from an underflow read */
  79                        rsp->flags = SRP_RSP_FLAG_DIUNDER;
  80                        rsp->data_in_res_cnt = cpu_to_be32(residual_count);
  81                }
  82        } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
  83                if (se_cmd->data_direction == DMA_TO_DEVICE) {
  84                        /*  residual data from an overflow write */
  85                        rsp->flags = SRP_RSP_FLAG_DOOVER;
  86                        rsp->data_out_res_cnt = cpu_to_be32(residual_count);
  87                } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
  88                        /* residual data from an overflow read */
  89                        rsp->flags = SRP_RSP_FLAG_DIOVER;
  90                        rsp->data_in_res_cnt = cpu_to_be32(residual_count);
  91                }
  92        }
  93}
  94
  95/**
  96 * connection_broken() - Determine if the connection to the client is good
  97 * @vscsi:      Pointer to our adapter structure
  98 *
  99 * This function attempts to send a ping MAD to the client. If the call to
 100 * queue the request returns H_CLOSED then the connection has been broken
 101 * and the function returns TRUE.
 102 *
 103 * EXECUTION ENVIRONMENT:
 104 *      Interrupt or Process environment
 105 */
 106static bool connection_broken(struct scsi_info *vscsi)
 107{
 108        struct viosrp_crq *crq;
 109        u64 buffer[2] = { 0, 0 };
 110        long h_return_code;
 111        bool rc = false;
 112
 113        /* create a PING crq */
 114        crq = (struct viosrp_crq *)&buffer;
 115        crq->valid = VALID_CMD_RESP_EL;
 116        crq->format = MESSAGE_IN_CRQ;
 117        crq->status = PING;
 118
 119        h_return_code = h_send_crq(vscsi->dds.unit_id,
 120                                   cpu_to_be64(buffer[MSG_HI]),
 121                                   cpu_to_be64(buffer[MSG_LOW]));
 122
 123        pr_debug("connection_broken: rc %ld\n", h_return_code);
 124
 125        if (h_return_code == H_CLOSED)
 126                rc = true;
 127
 128        return rc;
 129}
 130
 131/**
 132 * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue
 133 * @vscsi:      Pointer to our adapter structure
 134 *
 135 * This function calls h_free_q then frees the interrupt bit etc.
 136 * It must release the lock before doing so because of the time it can take
 137 * for h_free_crq in PHYP
 138 * NOTE: the caller must make sure that state and or flags will prevent
 139 *       interrupt handler from scheduling work.
 140 * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
 141 *       we can't do it here, because we don't have the lock
 142 *
 143 * EXECUTION ENVIRONMENT:
 144 *      Process level
 145 */
 146static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
 147{
 148        long qrc;
 149        long rc = ADAPT_SUCCESS;
 150        int ticks = 0;
 151
 152        do {
 153                qrc = h_free_crq(vscsi->dds.unit_id);
 154                switch (qrc) {
 155                case H_SUCCESS:
 156                        break;
 157
 158                case H_HARDWARE:
 159                case H_PARAMETER:
 160                        dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n",
 161                                qrc);
 162                        rc = ERROR;
 163                        break;
 164
 165                case H_BUSY:
 166                case H_LONG_BUSY_ORDER_1_MSEC:
 167                        /* msleep not good for small values */
 168                        usleep_range(1000, 2000);
 169                        ticks += 1;
 170                        break;
 171                case H_LONG_BUSY_ORDER_10_MSEC:
 172                        usleep_range(10000, 20000);
 173                        ticks += 10;
 174                        break;
 175                case H_LONG_BUSY_ORDER_100_MSEC:
 176                        msleep(100);
 177                        ticks += 100;
 178                        break;
 179                case H_LONG_BUSY_ORDER_1_SEC:
 180                        ssleep(1);
 181                        ticks += 1000;
 182                        break;
 183                case H_LONG_BUSY_ORDER_10_SEC:
 184                        ssleep(10);
 185                        ticks += 10000;
 186                        break;
 187                case H_LONG_BUSY_ORDER_100_SEC:
 188                        ssleep(100);
 189                        ticks += 100000;
 190                        break;
 191                default:
 192                        dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n",
 193                                qrc);
 194                        rc = ERROR;
 195                        break;
 196                }
 197
 198                /*
 199                 * dont wait more then 300 seconds
 200                 * ticks are in milliseconds more or less
 201                 */
 202                if (ticks > 300000 && qrc != H_SUCCESS) {
 203                        rc = ERROR;
 204                        dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n");
 205                }
 206        } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
 207
 208        pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
 209
 210        return rc;
 211}
 212
 213/**
 214 * ibmvscsis_delete_client_info() - Helper function to Delete Client Info
 215 * @vscsi:      Pointer to our adapter structure
 216 * @client_closed:      True if client closed its queue
 217 *
 218 * Deletes information specific to the client when the client goes away
 219 *
 220 * EXECUTION ENVIRONMENT:
 221 *      Interrupt or Process
 222 */
 223static void ibmvscsis_delete_client_info(struct scsi_info *vscsi,
 224                                         bool client_closed)
 225{
 226        vscsi->client_cap = 0;
 227
 228        /*
 229         * Some things we don't want to clear if we're closing the queue,
 230         * because some clients don't resend the host handshake when they
 231         * get a transport event.
 232         */
 233        if (client_closed)
 234                vscsi->client_data.os_type = 0;
 235}
 236
 237/**
 238 * ibmvscsis_free_command_q() - Free Command Queue
 239 * @vscsi:      Pointer to our adapter structure
 240 *
 241 * This function calls unregister_command_q, then clears interrupts and
 242 * any pending interrupt acknowledgments associated with the command q.
 243 * It also clears memory if there is no error.
 244 *
 245 * PHYP did not meet the PAPR architecture so that we must give up the
 246 * lock. This causes a timing hole regarding state change.  To close the
 247 * hole this routine does accounting on any change that occurred during
 248 * the time the lock is not held.
 249 * NOTE: must give up and then acquire the interrupt lock, the caller must
 250 *       make sure that state and or flags will prevent interrupt handler from
 251 *       scheduling work.
 252 *
 253 * EXECUTION ENVIRONMENT:
 254 *      Process level, interrupt lock is held
 255 */
 256static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
 257{
 258        int bytes;
 259        u32 flags_under_lock;
 260        u16 state_under_lock;
 261        long rc = ADAPT_SUCCESS;
 262
 263        if (!(vscsi->flags & CRQ_CLOSED)) {
 264                vio_disable_interrupts(vscsi->dma_dev);
 265
 266                state_under_lock = vscsi->new_state;
 267                flags_under_lock = vscsi->flags;
 268                vscsi->phyp_acr_state = 0;
 269                vscsi->phyp_acr_flags = 0;
 270
 271                spin_unlock_bh(&vscsi->intr_lock);
 272                rc = ibmvscsis_unregister_command_q(vscsi);
 273                spin_lock_bh(&vscsi->intr_lock);
 274
 275                if (state_under_lock != vscsi->new_state)
 276                        vscsi->phyp_acr_state = vscsi->new_state;
 277
 278                vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags);
 279
 280                if (rc == ADAPT_SUCCESS) {
 281                        bytes = vscsi->cmd_q.size * PAGE_SIZE;
 282                        memset(vscsi->cmd_q.base_addr, 0, bytes);
 283                        vscsi->cmd_q.index = 0;
 284                        vscsi->flags |= CRQ_CLOSED;
 285
 286                        ibmvscsis_delete_client_info(vscsi, false);
 287                }
 288
 289                pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
 290                         vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
 291                         vscsi->phyp_acr_state);
 292        }
 293        return rc;
 294}
 295
 296/**
 297 * ibmvscsis_cmd_q_dequeue() - Get valid Command element
 298 * @mask:       Mask to use in case index wraps
 299 * @current_index:      Current index into command queue
 300 * @base_addr:  Pointer to start of command queue
 301 *
 302 * Returns a pointer to a valid command element or NULL, if the command
 303 * queue is empty
 304 *
 305 * EXECUTION ENVIRONMENT:
 306 *      Interrupt environment, interrupt lock held
 307 */
 308static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
 309                                                  uint *current_index,
 310                                                  struct viosrp_crq *base_addr)
 311{
 312        struct viosrp_crq *ptr;
 313
 314        ptr = base_addr + *current_index;
 315
 316        if (ptr->valid) {
 317                *current_index = (*current_index + 1) & mask;
 318                dma_rmb();
 319        } else {
 320                ptr = NULL;
 321        }
 322
 323        return ptr;
 324}
 325
 326/**
 327 * ibmvscsis_send_init_message() -  send initialize message to the client
 328 * @vscsi:      Pointer to our adapter structure
 329 * @format:     Which Init Message format to send
 330 *
 331 * EXECUTION ENVIRONMENT:
 332 *      Interrupt environment interrupt lock held
 333 */
 334static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format)
 335{
 336        struct viosrp_crq *crq;
 337        u64 buffer[2] = { 0, 0 };
 338        long rc;
 339
 340        crq = (struct viosrp_crq *)&buffer;
 341        crq->valid = VALID_INIT_MSG;
 342        crq->format = format;
 343        rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
 344                        cpu_to_be64(buffer[MSG_LOW]));
 345
 346        return rc;
 347}
 348
 349/**
 350 * ibmvscsis_check_init_msg() - Check init message valid
 351 * @vscsi:      Pointer to our adapter structure
 352 * @format:     Pointer to return format of Init Message, if any.
 353 *              Set to UNUSED_FORMAT if no Init Message in queue.
 354 *
 355 * Checks if an initialize message was queued by the initiatior
 356 * after the queue was created and before the interrupt was enabled.
 357 *
 358 * EXECUTION ENVIRONMENT:
 359 *      Process level only, interrupt lock held
 360 */
 361static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
 362{
 363        struct viosrp_crq *crq;
 364        long rc = ADAPT_SUCCESS;
 365
 366        crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index,
 367                                      vscsi->cmd_q.base_addr);
 368        if (!crq) {
 369                *format = (uint)UNUSED_FORMAT;
 370        } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) {
 371                *format = (uint)INIT_MSG;
 372                crq->valid = INVALIDATE_CMD_RESP_EL;
 373                dma_rmb();
 374
 375                /*
 376                 * the caller has ensured no initialize message was
 377                 * sent after the queue was
 378                 * created so there should be no other message on the queue.
 379                 */
 380                crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask,
 381                                              &vscsi->cmd_q.index,
 382                                              vscsi->cmd_q.base_addr);
 383                if (crq) {
 384                        *format = (uint)(crq->format);
 385                        rc =  ERROR;
 386                        crq->valid = INVALIDATE_CMD_RESP_EL;
 387                        dma_rmb();
 388                }
 389        } else {
 390                *format = (uint)(crq->format);
 391                rc =  ERROR;
 392                crq->valid = INVALIDATE_CMD_RESP_EL;
 393                dma_rmb();
 394        }
 395
 396        return rc;
 397}
 398
 399/**
 400 * ibmvscsis_establish_new_q() - Establish new CRQ queue
 401 * @vscsi:      Pointer to our adapter structure
 402 * @new_state:  New state being established after resetting the queue
 403 *
 404 * Must be called with interrupt lock held.
 405 */
 406static long ibmvscsis_establish_new_q(struct scsi_info *vscsi,  uint new_state)
 407{
 408        long rc = ADAPT_SUCCESS;
 409        uint format;
 410
 411        vscsi->flags &= PRESERVE_FLAG_FIELDS;
 412        vscsi->rsp_q_timer.timer_pops = 0;
 413        vscsi->debit = 0;
 414        vscsi->credit = 0;
 415
 416        rc = vio_enable_interrupts(vscsi->dma_dev);
 417        if (rc) {
 418                pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
 419                        rc);
 420                return rc;
 421        }
 422
 423        rc = ibmvscsis_check_init_msg(vscsi, &format);
 424        if (rc) {
 425                dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
 426                        rc);
 427                return rc;
 428        }
 429
 430        if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
 431                rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
 432                switch (rc) {
 433                case H_SUCCESS:
 434                case H_DROPPED:
 435                case H_CLOSED:
 436                        rc = ADAPT_SUCCESS;
 437                        break;
 438
 439                case H_PARAMETER:
 440                case H_HARDWARE:
 441                        break;
 442
 443                default:
 444                        vscsi->state = UNDEFINED;
 445                        rc = H_HARDWARE;
 446                        break;
 447                }
 448        }
 449
 450        return rc;
 451}
 452
 453/**
 454 * ibmvscsis_reset_queue() - Reset CRQ Queue
 455 * @vscsi:      Pointer to our adapter structure
 456 * @new_state:  New state to establish after resetting the queue
 457 *
 458 * This function calls h_free_q and then calls h_reg_q and does all
 459 * of the bookkeeping to get us back to where we can communicate.
 460 *
 461 * Actually, we don't always call h_free_crq.  A problem was discovered
 462 * where one partition would close and reopen his queue, which would
 463 * cause his partner to get a transport event, which would cause him to
 464 * close and reopen his queue, which would cause the original partition
 465 * to get a transport event, etc., etc.  To prevent this, we don't
 466 * actually close our queue if the client initiated the reset, (i.e.
 467 * either we got a transport event or we have detected that the client's
 468 * queue is gone)
 469 *
 470 * EXECUTION ENVIRONMENT:
 471 *      Process environment, called with interrupt lock held
 472 */
 473static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
 474{
 475        int bytes;
 476        long rc = ADAPT_SUCCESS;
 477
 478        pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
 479
 480        /* don't reset, the client did it for us */
 481        if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
 482                vscsi->flags &=  PRESERVE_FLAG_FIELDS;
 483                vscsi->rsp_q_timer.timer_pops = 0;
 484                vscsi->debit = 0;
 485                vscsi->credit = 0;
 486                vscsi->state = new_state;
 487                vio_enable_interrupts(vscsi->dma_dev);
 488        } else {
 489                rc = ibmvscsis_free_command_q(vscsi);
 490                if (rc == ADAPT_SUCCESS) {
 491                        vscsi->state = new_state;
 492
 493                        bytes = vscsi->cmd_q.size * PAGE_SIZE;
 494                        rc = h_reg_crq(vscsi->dds.unit_id,
 495                                       vscsi->cmd_q.crq_token, bytes);
 496                        if (rc == H_CLOSED || rc == H_SUCCESS) {
 497                                rc = ibmvscsis_establish_new_q(vscsi,
 498                                                               new_state);
 499                        }
 500
 501                        if (rc != ADAPT_SUCCESS) {
 502                                pr_debug("reset_queue: reg_crq rc %ld\n", rc);
 503
 504                                vscsi->state = ERR_DISCONNECTED;
 505                                vscsi->flags |=  RESPONSE_Q_DOWN;
 506                                ibmvscsis_free_command_q(vscsi);
 507                        }
 508                } else {
 509                        vscsi->state = ERR_DISCONNECTED;
 510                        vscsi->flags |= RESPONSE_Q_DOWN;
 511                }
 512        }
 513}
 514
 515/**
 516 * ibmvscsis_free_cmd_resources() - Free command resources
 517 * @vscsi:      Pointer to our adapter structure
 518 * @cmd:        Command which is not longer in use
 519 *
 520 * Must be called with interrupt lock held.
 521 */
 522static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
 523                                         struct ibmvscsis_cmd *cmd)
 524{
 525        struct iu_entry *iue = cmd->iue;
 526
 527        switch (cmd->type) {
 528        case TASK_MANAGEMENT:
 529        case SCSI_CDB:
 530                /*
 531                 * When the queue goes down this value is cleared, so it
 532                 * cannot be cleared in this general purpose function.
 533                 */
 534                if (vscsi->debit)
 535                        vscsi->debit -= 1;
 536                break;
 537        case ADAPTER_MAD:
 538                vscsi->flags &= ~PROCESSING_MAD;
 539                break;
 540        case UNSET_TYPE:
 541                break;
 542        default:
 543                dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
 544                        cmd->type);
 545                break;
 546        }
 547
 548        cmd->iue = NULL;
 549        list_add_tail(&cmd->list, &vscsi->free_cmd);
 550        srp_iu_put(iue);
 551
 552        if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
 553            list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
 554                vscsi->flags &= ~WAIT_FOR_IDLE;
 555                complete(&vscsi->wait_idle);
 556        }
 557}
 558
 559/**
 560 * ibmvscsis_disconnect() - Helper function to disconnect
 561 * @work:       Pointer to work_struct, gives access to our adapter structure
 562 *
 563 * An error has occurred or the driver received a Transport event,
 564 * and the driver is requesting that the command queue be de-registered
 565 * in a safe manner. If there is no outstanding I/O then we can stop the
 566 * queue. If we are restarting the queue it will be reflected in the
 567 * the state of the adapter.
 568 *
 569 * EXECUTION ENVIRONMENT:
 570 *      Process environment
 571 */
 572static void ibmvscsis_disconnect(struct work_struct *work)
 573{
 574        struct scsi_info *vscsi = container_of(work, struct scsi_info,
 575                                               proc_work);
 576        u16 new_state;
 577        bool wait_idle = false;
 578        long rc = ADAPT_SUCCESS;
 579
 580        spin_lock_bh(&vscsi->intr_lock);
 581        new_state = vscsi->new_state;
 582        vscsi->new_state = 0;
 583
 584        pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
 585                 vscsi->state);
 586
 587        /*
 588         * check which state we are in and see if we
 589         * should transitition to the new state
 590         */
 591        switch (vscsi->state) {
 592        /*  Should never be called while in this state. */
 593        case NO_QUEUE:
 594        /*
 595         * Can never transition from this state;
 596         * igonore errors and logout.
 597         */
 598        case UNCONFIGURING:
 599                break;
 600
 601        /* can transition from this state to UNCONFIGURING */
 602        case ERR_DISCONNECT:
 603                if (new_state == UNCONFIGURING)
 604                        vscsi->state = new_state;
 605                break;
 606
 607        /*
 608         * Can transition from this state to to unconfiguring
 609         * or err disconnect.
 610         */
 611        case ERR_DISCONNECT_RECONNECT:
 612                switch (new_state) {
 613                case UNCONFIGURING:
 614                case ERR_DISCONNECT:
 615                        vscsi->state = new_state;
 616                        break;
 617
 618                case WAIT_IDLE:
 619                        break;
 620                default:
 621                        break;
 622                }
 623                break;
 624
 625        /* can transition from this state to UNCONFIGURING */
 626        case ERR_DISCONNECTED:
 627                if (new_state == UNCONFIGURING)
 628                        vscsi->state = new_state;
 629                break;
 630
 631        /*
 632         * If this is a transition into an error state.
 633         * a client is attempting to establish a connection
 634         * and has violated the RPA protocol.
 635         * There can be nothing pending on the adapter although
 636         * there can be requests in the command queue.
 637         */
 638        case WAIT_ENABLED:
 639        case PART_UP_WAIT_ENAB:
 640                switch (new_state) {
 641                case ERR_DISCONNECT:
 642                        vscsi->flags |= RESPONSE_Q_DOWN;
 643                        vscsi->state = new_state;
 644                        vscsi->flags &= ~(SCHEDULE_DISCONNECT |
 645                                          DISCONNECT_SCHEDULED);
 646                        ibmvscsis_free_command_q(vscsi);
 647                        break;
 648                case ERR_DISCONNECT_RECONNECT:
 649                        ibmvscsis_reset_queue(vscsi, WAIT_ENABLED);
 650                        break;
 651
 652                /* should never happen */
 653                case WAIT_IDLE:
 654                        rc = ERROR;
 655                        dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
 656                                vscsi->state);
 657                        break;
 658                }
 659                break;
 660
 661        case WAIT_IDLE:
 662                switch (new_state) {
 663                case ERR_DISCONNECT:
 664                case ERR_DISCONNECT_RECONNECT:
 665                        vscsi->state = new_state;
 666                        break;
 667                }
 668                break;
 669
 670        /*
 671         * Initiator has not done a successful srp login
 672         * or has done a successful srp logout ( adapter was not
 673         * busy). In the first case there can be responses queued
 674         * waiting for space on the initiators response queue (MAD)
 675         * The second case the adapter is idle. Assume the worse case,
 676         * i.e. the second case.
 677         */
 678        case WAIT_CONNECTION:
 679        case CONNECTED:
 680        case SRP_PROCESSING:
 681                wait_idle = true;
 682                vscsi->state = new_state;
 683                break;
 684
 685        /* can transition from this state to UNCONFIGURING */
 686        case UNDEFINED:
 687                if (new_state == UNCONFIGURING)
 688                        vscsi->state = new_state;
 689                break;
 690        default:
 691                break;
 692        }
 693
 694        if (wait_idle) {
 695                pr_debug("disconnect start wait, active %d, sched %d\n",
 696                         (int)list_empty(&vscsi->active_q),
 697                         (int)list_empty(&vscsi->schedule_q));
 698                if (!list_empty(&vscsi->active_q) ||
 699                    !list_empty(&vscsi->schedule_q)) {
 700                        vscsi->flags |= WAIT_FOR_IDLE;
 701                        pr_debug("disconnect flags 0x%x\n", vscsi->flags);
 702                        /*
 703                         * This routine is can not be called with the interrupt
 704                         * lock held.
 705                         */
 706                        spin_unlock_bh(&vscsi->intr_lock);
 707                        wait_for_completion(&vscsi->wait_idle);
 708                        spin_lock_bh(&vscsi->intr_lock);
 709                }
 710                pr_debug("disconnect stop wait\n");
 711
 712                ibmvscsis_adapter_idle(vscsi);
 713        }
 714
 715        spin_unlock_bh(&vscsi->intr_lock);
 716}
 717
 718/**
 719 * ibmvscsis_post_disconnect() - Schedule the disconnect
 720 * @vscsi:      Pointer to our adapter structure
 721 * @new_state:  State to move to after disconnecting
 722 * @flag_bits:  Flags to turn on in adapter structure
 723 *
 724 * If it's already been scheduled, then see if we need to "upgrade"
 725 * the new state (if the one passed in is more "severe" than the
 726 * previous one).
 727 *
 728 * PRECONDITION:
 729 *      interrupt lock is held
 730 */
 731static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
 732                                      uint flag_bits)
 733{
 734        uint state;
 735
 736        /* check the validity of the new state */
 737        switch (new_state) {
 738        case UNCONFIGURING:
 739        case ERR_DISCONNECT:
 740        case ERR_DISCONNECT_RECONNECT:
 741        case WAIT_IDLE:
 742                break;
 743
 744        default:
 745                dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n",
 746                        new_state);
 747                return;
 748        }
 749
 750        vscsi->flags |= flag_bits;
 751
 752        pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
 753                 new_state, flag_bits, vscsi->flags, vscsi->state);
 754
 755        if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
 756                vscsi->flags |= SCHEDULE_DISCONNECT;
 757                vscsi->new_state = new_state;
 758
 759                INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect);
 760                (void)queue_work(vscsi->work_q, &vscsi->proc_work);
 761        } else {
 762                if (vscsi->new_state)
 763                        state = vscsi->new_state;
 764                else
 765                        state = vscsi->state;
 766
 767                switch (state) {
 768                case NO_QUEUE:
 769                case UNCONFIGURING:
 770                        break;
 771
 772                case ERR_DISCONNECTED:
 773                case ERR_DISCONNECT:
 774                case UNDEFINED:
 775                        if (new_state == UNCONFIGURING)
 776                                vscsi->new_state = new_state;
 777                        break;
 778
 779                case ERR_DISCONNECT_RECONNECT:
 780                        switch (new_state) {
 781                        case UNCONFIGURING:
 782                        case ERR_DISCONNECT:
 783                                vscsi->new_state = new_state;
 784                                break;
 785                        default:
 786                                break;
 787                        }
 788                        break;
 789
 790                case WAIT_ENABLED:
 791                case PART_UP_WAIT_ENAB:
 792                case WAIT_IDLE:
 793                case WAIT_CONNECTION:
 794                case CONNECTED:
 795                case SRP_PROCESSING:
 796                        vscsi->new_state = new_state;
 797                        break;
 798
 799                default:
 800                        break;
 801                }
 802        }
 803
 804        pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
 805                 vscsi->flags, vscsi->new_state);
 806}
 807
 808/**
 809 * ibmvscsis_trans_event() - Handle a Transport Event
 810 * @vscsi:      Pointer to our adapter structure
 811 * @crq:        Pointer to CRQ entry containing the Transport Event
 812 *
 813 * Do the logic to close the I_T nexus.  This function may not
 814 * behave to specification.
 815 *
 816 * EXECUTION ENVIRONMENT:
 817 *      Interrupt, interrupt lock held
 818 */
 819static long ibmvscsis_trans_event(struct scsi_info *vscsi,
 820                                  struct viosrp_crq *crq)
 821{
 822        long rc = ADAPT_SUCCESS;
 823
 824        pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n",
 825                 (int)crq->format, vscsi->flags, vscsi->state);
 826
 827        switch (crq->format) {
 828        case MIGRATED:
 829        case PARTNER_FAILED:
 830        case PARTNER_DEREGISTER:
 831                ibmvscsis_delete_client_info(vscsi, true);
 832                break;
 833
 834        default:
 835                rc = ERROR;
 836                dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
 837                        (uint)crq->format);
 838                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
 839                                          RESPONSE_Q_DOWN);
 840                break;
 841        }
 842
 843        if (rc == ADAPT_SUCCESS) {
 844                switch (vscsi->state) {
 845                case NO_QUEUE:
 846                case ERR_DISCONNECTED:
 847                case UNDEFINED:
 848                        break;
 849
 850                case UNCONFIGURING:
 851                        vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
 852                        break;
 853
 854                case WAIT_ENABLED:
 855                        break;
 856
 857                case WAIT_CONNECTION:
 858                        break;
 859
 860                case CONNECTED:
 861                        ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
 862                                                  (RESPONSE_Q_DOWN |
 863                                                   TRANS_EVENT));
 864                        break;
 865
 866                case PART_UP_WAIT_ENAB:
 867                        vscsi->state = WAIT_ENABLED;
 868                        break;
 869
 870                case SRP_PROCESSING:
 871                        if ((vscsi->debit > 0) ||
 872                            !list_empty(&vscsi->schedule_q) ||
 873                            !list_empty(&vscsi->waiting_rsp) ||
 874                            !list_empty(&vscsi->active_q)) {
 875                                pr_debug("debit %d, sched %d, wait %d, active %d\n",
 876                                         vscsi->debit,
 877                                         (int)list_empty(&vscsi->schedule_q),
 878                                         (int)list_empty(&vscsi->waiting_rsp),
 879                                         (int)list_empty(&vscsi->active_q));
 880                                pr_warn("connection lost with outstanding work\n");
 881                        } else {
 882                                pr_debug("trans_event: SRP Processing, but no outstanding work\n");
 883                        }
 884
 885                        ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
 886                                                  (RESPONSE_Q_DOWN |
 887                                                   TRANS_EVENT));
 888                        break;
 889
 890                case ERR_DISCONNECT:
 891                case ERR_DISCONNECT_RECONNECT:
 892                case WAIT_IDLE:
 893                        vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
 894                        break;
 895                }
 896        }
 897
 898        rc =  vscsi->flags & SCHEDULE_DISCONNECT;
 899
 900        pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
 901                 vscsi->flags, vscsi->state, rc);
 902
 903        return rc;
 904}
 905
 906/**
 907 * ibmvscsis_poll_cmd_q() - Poll Command Queue
 908 * @vscsi:      Pointer to our adapter structure
 909 *
 910 * Called to handle command elements that may have arrived while
 911 * interrupts were disabled.
 912 *
 913 * EXECUTION ENVIRONMENT:
 914 *      intr_lock must be held
 915 */
 916static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
 917{
 918        struct viosrp_crq *crq;
 919        long rc;
 920        bool ack = true;
 921        volatile u8 valid;
 922
 923        pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
 924                 vscsi->flags, vscsi->state, vscsi->cmd_q.index);
 925
 926        rc = vscsi->flags & SCHEDULE_DISCONNECT;
 927        crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
 928        valid = crq->valid;
 929        dma_rmb();
 930
 931        while (valid) {
 932poll_work:
 933                vscsi->cmd_q.index =
 934                        (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
 935
 936                if (!rc) {
 937                        rc = ibmvscsis_parse_command(vscsi, crq);
 938                } else {
 939                        if ((uint)crq->valid == VALID_TRANS_EVENT) {
 940                                /*
 941                                 * must service the transport layer events even
 942                                 * in an error state, dont break out until all
 943                                 * the consecutive transport events have been
 944                                 * processed
 945                                 */
 946                                rc = ibmvscsis_trans_event(vscsi, crq);
 947                        } else if (vscsi->flags & TRANS_EVENT) {
 948                                /*
 949                                 * if a tranport event has occurred leave
 950                                 * everything but transport events on the queue
 951                                 */
 952                                pr_debug("poll_cmd_q, ignoring\n");
 953
 954                                /*
 955                                 * need to decrement the queue index so we can
 956                                 * look at the elment again
 957                                 */
 958                                if (vscsi->cmd_q.index)
 959                                        vscsi->cmd_q.index -= 1;
 960                                else
 961                                        /*
 962                                         * index is at 0 it just wrapped.
 963                                         * have it index last element in q
 964                                         */
 965                                        vscsi->cmd_q.index = vscsi->cmd_q.mask;
 966                                break;
 967                        }
 968                }
 969
 970                crq->valid = INVALIDATE_CMD_RESP_EL;
 971
 972                crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
 973                valid = crq->valid;
 974                dma_rmb();
 975        }
 976
 977        if (!rc) {
 978                if (ack) {
 979                        vio_enable_interrupts(vscsi->dma_dev);
 980                        ack = false;
 981                        pr_debug("poll_cmd_q, reenabling interrupts\n");
 982                }
 983                valid = crq->valid;
 984                dma_rmb();
 985                if (valid)
 986                        goto poll_work;
 987        }
 988
 989        pr_debug("Leaving poll_cmd_q: rc %ld\n", rc);
 990}
 991
 992/**
 993 * ibmvscsis_free_cmd_qs() - Free elements in queue
 994 * @vscsi:      Pointer to our adapter structure
 995 *
 996 * Free all of the elements on all queues that are waiting for
 997 * whatever reason.
 998 *
 999 * PRECONDITION:
1000 *      Called with interrupt lock held
1001 */
1002static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
1003{
1004        struct ibmvscsis_cmd *cmd, *nxt;
1005
1006        pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
1007                 (int)list_empty(&vscsi->waiting_rsp),
1008                 vscsi->rsp_q_timer.started);
1009
1010        list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
1011                list_del(&cmd->list);
1012                ibmvscsis_free_cmd_resources(vscsi, cmd);
1013        }
1014}
1015
1016/**
1017 * ibmvscsis_get_free_cmd() - Get free command from list
1018 * @vscsi:      Pointer to our adapter structure
1019 *
1020 * Must be called with interrupt lock held.
1021 */
1022static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
1023{
1024        struct ibmvscsis_cmd *cmd = NULL;
1025        struct iu_entry *iue;
1026
1027        iue = srp_iu_get(&vscsi->target);
1028        if (iue) {
1029                cmd = list_first_entry_or_null(&vscsi->free_cmd,
1030                                               struct ibmvscsis_cmd, list);
1031                if (cmd) {
1032                        list_del(&cmd->list);
1033                        cmd->iue = iue;
1034                        cmd->type = UNSET_TYPE;
1035                        memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd));
1036                } else {
1037                        srp_iu_put(iue);
1038                }
1039        }
1040
1041        return cmd;
1042}
1043
1044/**
1045 * ibmvscsis_adapter_idle() - Helper function to handle idle adapter
1046 * @vscsi:      Pointer to our adapter structure
1047 *
1048 * This function is called when the adapter is idle when the driver
1049 * is attempting to clear an error condition.
1050 * The adapter is considered busy if any of its cmd queues
1051 * are non-empty. This function can be invoked
1052 * from the off level disconnect function.
1053 *
1054 * EXECUTION ENVIRONMENT:
1055 *      Process environment called with interrupt lock held
1056 */
1057static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1058{
1059        int free_qs = false;
1060
1061        pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
1062                 vscsi->state);
1063
1064        /* Only need to free qs if we're disconnecting from client */
1065        if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
1066                free_qs = true;
1067
1068        switch (vscsi->state) {
1069        case ERR_DISCONNECT_RECONNECT:
1070                ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION);
1071                pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
1072                break;
1073
1074        case ERR_DISCONNECT:
1075                ibmvscsis_free_command_q(vscsi);
1076                vscsi->flags &= ~DISCONNECT_SCHEDULED;
1077                vscsi->flags |= RESPONSE_Q_DOWN;
1078                vscsi->state = ERR_DISCONNECTED;
1079                pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
1080                         vscsi->flags, vscsi->state);
1081                break;
1082
1083        case WAIT_IDLE:
1084                vscsi->rsp_q_timer.timer_pops = 0;
1085                vscsi->debit = 0;
1086                vscsi->credit = 0;
1087                if (vscsi->flags & TRANS_EVENT) {
1088                        vscsi->state = WAIT_CONNECTION;
1089                        vscsi->flags &= PRESERVE_FLAG_FIELDS;
1090                } else {
1091                        vscsi->state = CONNECTED;
1092                        vscsi->flags &= ~DISCONNECT_SCHEDULED;
1093                }
1094
1095                pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n",
1096                         vscsi->flags, vscsi->state);
1097                ibmvscsis_poll_cmd_q(vscsi);
1098                break;
1099
1100        case ERR_DISCONNECTED:
1101                vscsi->flags &= ~DISCONNECT_SCHEDULED;
1102                pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
1103                         vscsi->flags, vscsi->state);
1104                break;
1105
1106        default:
1107                dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n",
1108                        vscsi->state);
1109                break;
1110        }
1111
1112        if (free_qs)
1113                ibmvscsis_free_cmd_qs(vscsi);
1114
1115        /*
1116         * There is a timing window where we could lose a disconnect request.
1117         * The known path to this window occurs during the DISCONNECT_RECONNECT
1118         * case above: reset_queue calls free_command_q, which will release the
1119         * interrupt lock.  During that time, a new post_disconnect call can be
1120         * made with a "more severe" state (DISCONNECT or UNCONFIGURING).
1121         * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect
1122         * will only set the new_state.  Now free_command_q reacquires the intr
1123         * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_
1124         * FIELDS), and the disconnect is lost.  This is particularly bad when
1125         * the new disconnect was for UNCONFIGURING, since the unconfigure hangs
1126         * forever.
1127         * Fix is that free command queue sets acr state and acr flags if there
1128         * is a change under the lock
1129         * note free command queue writes to this state it clears it
1130         * before releasing the lock, different drivers call the free command
1131         * queue different times so dont initialize above
1132         */
1133        if (vscsi->phyp_acr_state != 0) {
1134                /*
1135                 * set any bits in flags that may have been cleared by
1136                 * a call to free command queue in switch statement
1137                 * or reset queue
1138                 */
1139                vscsi->flags |= vscsi->phyp_acr_flags;
1140                ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0);
1141                vscsi->phyp_acr_state = 0;
1142                vscsi->phyp_acr_flags = 0;
1143
1144                pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
1145                         vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
1146                         vscsi->phyp_acr_state);
1147        }
1148
1149        pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
1150                 vscsi->flags, vscsi->state, vscsi->new_state);
1151}
1152
1153/**
1154 * ibmvscsis_copy_crq_packet() - Copy CRQ Packet
1155 * @vscsi:      Pointer to our adapter structure
1156 * @cmd:        Pointer to command element to use to process the request
1157 * @crq:        Pointer to CRQ entry containing the request
1158 *
1159 * Copy the srp information unit from the hosted
1160 * partition using remote dma
1161 *
1162 * EXECUTION ENVIRONMENT:
1163 *      Interrupt, interrupt lock held
1164 */
1165static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
1166                                      struct ibmvscsis_cmd *cmd,
1167                                      struct viosrp_crq *crq)
1168{
1169        struct iu_entry *iue = cmd->iue;
1170        long rc = 0;
1171        u16 len;
1172
1173        len = be16_to_cpu(crq->IU_length);
1174        if ((len > SRP_MAX_IU_LEN) || (len == 0)) {
1175                dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len);
1176                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1177                return SRP_VIOLATION;
1178        }
1179
1180        rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn,
1181                         be64_to_cpu(crq->IU_data_ptr),
1182                         vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma);
1183
1184        switch (rc) {
1185        case H_SUCCESS:
1186                cmd->init_time = mftb();
1187                iue->remote_token = crq->IU_data_ptr;
1188                iue->iu_len = len;
1189                pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n",
1190                         be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
1191                break;
1192        case H_PERMISSION:
1193                if (connection_broken(vscsi))
1194                        ibmvscsis_post_disconnect(vscsi,
1195                                                  ERR_DISCONNECT_RECONNECT,
1196                                                  (RESPONSE_Q_DOWN |
1197                                                   CLIENT_FAILED));
1198                else
1199                        ibmvscsis_post_disconnect(vscsi,
1200                                                  ERR_DISCONNECT_RECONNECT, 0);
1201
1202                dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1203                        rc);
1204                break;
1205        case H_DEST_PARM:
1206        case H_SOURCE_PARM:
1207        default:
1208                dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1209                        rc);
1210                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1211                break;
1212        }
1213
1214        return rc;
1215}
1216
1217/**
1218 * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram
1219 * @vscsi:      Pointer to our adapter structure
1220 * @iue:        Information Unit containing the Adapter Info MAD request
1221 *
1222 * EXECUTION ENVIRONMENT:
1223 *      Interrupt adpater lock is held
1224 */
1225static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1226                                   struct iu_entry *iue)
1227{
1228        struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info;
1229        struct mad_adapter_info_data *info;
1230        uint flag_bits = 0;
1231        dma_addr_t token;
1232        long rc;
1233
1234        mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1235
1236        if (be16_to_cpu(mad->common.length) > sizeof(*info)) {
1237                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1238                return 0;
1239        }
1240
1241        info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
1242                                  GFP_KERNEL);
1243        if (!info) {
1244                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1245                        iue->target);
1246                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1247                return 0;
1248        }
1249
1250        /* Get remote info */
1251        rc = h_copy_rdma(be16_to_cpu(mad->common.length),
1252                         vscsi->dds.window[REMOTE].liobn,
1253                         be64_to_cpu(mad->buffer),
1254                         vscsi->dds.window[LOCAL].liobn, token);
1255
1256        if (rc != H_SUCCESS) {
1257                if (rc == H_PERMISSION) {
1258                        if (connection_broken(vscsi))
1259                                flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1260                }
1261                pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n",
1262                        rc);
1263                pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
1264                         be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
1265                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1266                                          flag_bits);
1267                goto free_dma;
1268        }
1269
1270        /*
1271         * Copy client info, but ignore partition number, which we
1272         * already got from phyp - unless we failed to get it from
1273         * phyp (e.g. if we're running on a p5 system).
1274         */
1275        if (vscsi->client_data.partition_number == 0)
1276                vscsi->client_data.partition_number =
1277                        be32_to_cpu(info->partition_number);
1278        strncpy(vscsi->client_data.srp_version, info->srp_version,
1279                sizeof(vscsi->client_data.srp_version));
1280        strncpy(vscsi->client_data.partition_name, info->partition_name,
1281                sizeof(vscsi->client_data.partition_name));
1282        vscsi->client_data.mad_version = be32_to_cpu(info->mad_version);
1283        vscsi->client_data.os_type = be32_to_cpu(info->os_type);
1284
1285        /* Copy our info */
1286        strncpy(info->srp_version, SRP_VERSION,
1287                sizeof(info->srp_version));
1288        strncpy(info->partition_name, vscsi->dds.partition_name,
1289                sizeof(info->partition_name));
1290        info->partition_number = cpu_to_be32(vscsi->dds.partition_num);
1291        info->mad_version = cpu_to_be32(MAD_VERSION_1);
1292        info->os_type = cpu_to_be32(LINUX);
1293        memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
1294        info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
1295
1296        dma_wmb();
1297        rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
1298                         token, vscsi->dds.window[REMOTE].liobn,
1299                         be64_to_cpu(mad->buffer));
1300        switch (rc) {
1301        case H_SUCCESS:
1302                break;
1303
1304        case H_SOURCE_PARM:
1305        case H_DEST_PARM:
1306        case H_PERMISSION:
1307                if (connection_broken(vscsi))
1308                        flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1309        default:
1310                dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
1311                        rc);
1312                ibmvscsis_post_disconnect(vscsi,
1313                                          ERR_DISCONNECT_RECONNECT,
1314                                          flag_bits);
1315                break;
1316        }
1317
1318free_dma:
1319        dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
1320        pr_debug("Leaving adapter_info, rc %ld\n", rc);
1321
1322        return rc;
1323}
1324
1325/**
1326 * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram
1327 * @vscsi:      Pointer to our adapter structure
1328 * @iue:        Information Unit containing the Capabilities MAD request
1329 *
1330 * NOTE: if you return an error from this routine you must be
1331 * disconnecting or you will cause a hang
1332 *
1333 * EXECUTION ENVIRONMENT:
1334 *      Interrupt called with adapter lock held
1335 */
1336static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1337{
1338        struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities;
1339        struct capabilities *cap;
1340        struct mad_capability_common *common;
1341        dma_addr_t token;
1342        u16 olen, len, status, min_len, cap_len;
1343        u32 flag;
1344        uint flag_bits = 0;
1345        long rc = 0;
1346
1347        olen = be16_to_cpu(mad->common.length);
1348        /*
1349         * struct capabilities hardcodes a couple capabilities after the
1350         * header, but the capabilities can actually be in any order.
1351         */
1352        min_len = offsetof(struct capabilities, migration);
1353        if ((olen < min_len) || (olen > PAGE_SIZE)) {
1354                pr_warn("cap_mad: invalid len %d\n", olen);
1355                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1356                return 0;
1357        }
1358
1359        cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
1360                                 GFP_KERNEL);
1361        if (!cap) {
1362                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1363                        iue->target);
1364                mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1365                return 0;
1366        }
1367        rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn,
1368                         be64_to_cpu(mad->buffer),
1369                         vscsi->dds.window[LOCAL].liobn, token);
1370        if (rc == H_SUCCESS) {
1371                strncpy(cap->name, dev_name(&vscsi->dma_dev->dev),
1372                        SRP_MAX_LOC_LEN);
1373
1374                len = olen - min_len;
1375                status = VIOSRP_MAD_SUCCESS;
1376                common = (struct mad_capability_common *)&cap->migration;
1377
1378                while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
1379                        pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n",
1380                                 len, be32_to_cpu(common->cap_type),
1381                                 be16_to_cpu(common->length));
1382
1383                        cap_len = be16_to_cpu(common->length);
1384                        if (cap_len > len) {
1385                                dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n");
1386                                status = VIOSRP_MAD_FAILED;
1387                                break;
1388                        }
1389
1390                        if (cap_len == 0) {
1391                                dev_err(&vscsi->dev, "cap_mad: cap len is 0\n");
1392                                status = VIOSRP_MAD_FAILED;
1393                                break;
1394                        }
1395
1396                        switch (common->cap_type) {
1397                        default:
1398                                pr_debug("cap_mad: unsupported capability\n");
1399                                common->server_support = 0;
1400                                flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
1401                                cap->flags &= ~flag;
1402                                break;
1403                        }
1404
1405                        len = len - cap_len;
1406                        common = (struct mad_capability_common *)
1407                                ((char *)common + cap_len);
1408                }
1409
1410                mad->common.status = cpu_to_be16(status);
1411
1412                dma_wmb();
1413                rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token,
1414                                 vscsi->dds.window[REMOTE].liobn,
1415                                 be64_to_cpu(mad->buffer));
1416
1417                if (rc != H_SUCCESS) {
1418                        pr_debug("cap_mad: failed to copy to client, rc %ld\n",
1419                                 rc);
1420
1421                        if (rc == H_PERMISSION) {
1422                                if (connection_broken(vscsi))
1423                                        flag_bits = (RESPONSE_Q_DOWN |
1424                                                     CLIENT_FAILED);
1425                        }
1426
1427                        pr_warn("cap_mad: error copying data to client, rc %ld\n",
1428                                rc);
1429                        ibmvscsis_post_disconnect(vscsi,
1430                                                  ERR_DISCONNECT_RECONNECT,
1431                                                  flag_bits);
1432                }
1433        }
1434
1435        dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
1436
1437        pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n",
1438                 rc, vscsi->client_cap);
1439
1440        return rc;
1441}
1442
1443/**
1444 * ibmvscsis_process_mad() - Service a MAnagement Data gram
1445 * @vscsi:      Pointer to our adapter structure
1446 * @iue:        Information Unit containing the MAD request
1447 *
1448 * Must be called with interrupt lock held.
1449 */
1450static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1451{
1452        struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1453        struct viosrp_empty_iu *empty;
1454        long rc = ADAPT_SUCCESS;
1455
1456        switch (be32_to_cpu(mad->type)) {
1457        case VIOSRP_EMPTY_IU_TYPE:
1458                empty = &vio_iu(iue)->mad.empty_iu;
1459                vscsi->empty_iu_id = be64_to_cpu(empty->buffer);
1460                vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag);
1461                mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1462                break;
1463        case VIOSRP_ADAPTER_INFO_TYPE:
1464                rc = ibmvscsis_adapter_info(vscsi, iue);
1465                break;
1466        case VIOSRP_CAPABILITIES_TYPE:
1467                rc = ibmvscsis_cap_mad(vscsi, iue);
1468                break;
1469        case VIOSRP_ENABLE_FAST_FAIL:
1470                if (vscsi->state == CONNECTED) {
1471                        vscsi->fast_fail = true;
1472                        mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1473                } else {
1474                        pr_warn("fast fail mad sent after login\n");
1475                        mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
1476                }
1477                break;
1478        default:
1479                mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED);
1480                break;
1481        }
1482
1483        return rc;
1484}
1485
1486/**
1487 * srp_snd_msg_failed() - Handle an error when sending a response
1488 * @vscsi:      Pointer to our adapter structure
1489 * @rc:         The return code from the h_send_crq command
1490 *
1491 * Must be called with interrupt lock held.
1492 */
1493static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
1494{
1495        ktime_t kt;
1496
1497        if (rc != H_DROPPED) {
1498                ibmvscsis_free_cmd_qs(vscsi);
1499
1500                if (rc == H_CLOSED)
1501                        vscsi->flags |= CLIENT_FAILED;
1502
1503                /* don't flag the same problem multiple times */
1504                if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1505                        vscsi->flags |= RESPONSE_Q_DOWN;
1506                        if (!(vscsi->state & (ERR_DISCONNECT |
1507                                              ERR_DISCONNECT_RECONNECT |
1508                                              ERR_DISCONNECTED | UNDEFINED))) {
1509                                dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n",
1510                                        vscsi->state, vscsi->flags, rc);
1511                        }
1512                        ibmvscsis_post_disconnect(vscsi,
1513                                                  ERR_DISCONNECT_RECONNECT, 0);
1514                }
1515                return;
1516        }
1517
1518        /*
1519         * The response queue is full.
1520         * If the server is processing SRP requests, i.e.
1521         * the client has successfully done an
1522         * SRP_LOGIN, then it will wait forever for room in
1523         * the queue.  However if the system admin
1524         * is attempting to unconfigure the server then one
1525         * or more children will be in a state where
1526         * they are being removed. So if there is even one
1527         * child being removed then the driver assumes
1528         * the system admin is attempting to break the
1529         * connection with the client and MAX_TIMER_POPS
1530         * is honored.
1531         */
1532        if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
1533            (vscsi->state == SRP_PROCESSING)) {
1534                pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
1535                         vscsi->flags, (int)vscsi->rsp_q_timer.started,
1536                         vscsi->rsp_q_timer.timer_pops);
1537
1538                /*
1539                 * Check if the timer is running; if it
1540                 * is not then start it up.
1541                 */
1542                if (!vscsi->rsp_q_timer.started) {
1543                        if (vscsi->rsp_q_timer.timer_pops <
1544                            MAX_TIMER_POPS) {
1545                                kt = ktime_set(0, WAIT_NANO_SECONDS);
1546                        } else {
1547                                /*
1548                                 * slide the timeslice if the maximum
1549                                 * timer pops have already happened
1550                                 */
1551                                kt = ktime_set(WAIT_SECONDS, 0);
1552                        }
1553
1554                        vscsi->rsp_q_timer.started = true;
1555                        hrtimer_start(&vscsi->rsp_q_timer.timer, kt,
1556                                      HRTIMER_MODE_REL);
1557                }
1558        } else {
1559                /*
1560                 * TBD: Do we need to worry about this? Need to get
1561                 *      remove working.
1562                 */
1563                /*
1564                 * waited a long time and it appears the system admin
1565                 * is bring this driver down
1566                 */
1567                vscsi->flags |= RESPONSE_Q_DOWN;
1568                ibmvscsis_free_cmd_qs(vscsi);
1569                /*
1570                 * if the driver is already attempting to disconnect
1571                 * from the client and has already logged an error
1572                 * trace this event but don't put it in the error log
1573                 */
1574                if (!(vscsi->state & (ERR_DISCONNECT |
1575                                      ERR_DISCONNECT_RECONNECT |
1576                                      ERR_DISCONNECTED | UNDEFINED))) {
1577                        dev_err(&vscsi->dev, "client crq full too long\n");
1578                        ibmvscsis_post_disconnect(vscsi,
1579                                                  ERR_DISCONNECT_RECONNECT,
1580                                                  0);
1581                }
1582        }
1583}
1584
1585/**
1586 * ibmvscsis_send_messages() - Send a Response
1587 * @vscsi:      Pointer to our adapter structure
1588 *
1589 * Send a response, first checking the waiting queue. Responses are
1590 * sent in order they are received. If the response cannot be sent,
1591 * because the client queue is full, it stays on the waiting queue.
1592 *
1593 * PRECONDITION:
1594 *      Called with interrupt lock held
1595 */
1596static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1597{
1598        u64 msg_hi = 0;
1599        /* note do not attmempt to access the IU_data_ptr with this pointer
1600         * it is not valid
1601         */
1602        struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
1603        struct ibmvscsis_cmd *cmd, *nxt;
1604        struct iu_entry *iue;
1605        long rc = ADAPT_SUCCESS;
1606
1607        if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1608                list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
1609                        iue = cmd->iue;
1610
1611                        crq->valid = VALID_CMD_RESP_EL;
1612                        crq->format = cmd->rsp.format;
1613
1614                        if (cmd->flags & CMD_FAST_FAIL)
1615                                crq->status = VIOSRP_ADAPTER_FAIL;
1616
1617                        crq->IU_length = cpu_to_be16(cmd->rsp.len);
1618
1619                        rc = h_send_crq(vscsi->dma_dev->unit_address,
1620                                        be64_to_cpu(msg_hi),
1621                                        be64_to_cpu(cmd->rsp.tag));
1622
1623                        pr_debug("send_messages: tag 0x%llx, rc %ld\n",
1624                                 be64_to_cpu(cmd->rsp.tag), rc);
1625
1626                        /* if all ok free up the command element resources */
1627                        if (rc == H_SUCCESS) {
1628                                /* some movement has occurred */
1629                                vscsi->rsp_q_timer.timer_pops = 0;
1630                                list_del(&cmd->list);
1631
1632                                ibmvscsis_free_cmd_resources(vscsi, cmd);
1633                        } else {
1634                                srp_snd_msg_failed(vscsi, rc);
1635                                break;
1636                        }
1637                }
1638
1639                if (!rc) {
1640                        /*
1641                         * The timer could pop with the queue empty.  If
1642                         * this happens, rc will always indicate a
1643                         * success; clear the pop count.
1644                         */
1645                        vscsi->rsp_q_timer.timer_pops = 0;
1646                }
1647        } else {
1648                ibmvscsis_free_cmd_qs(vscsi);
1649        }
1650}
1651
1652/* Called with intr lock held */
1653static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
1654                                    struct ibmvscsis_cmd *cmd,
1655                                    struct viosrp_crq *crq)
1656{
1657        struct iu_entry *iue = cmd->iue;
1658        struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1659        uint flag_bits = 0;
1660        long rc;
1661
1662        dma_wmb();
1663        rc = h_copy_rdma(sizeof(struct mad_common),
1664                         vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
1665                         vscsi->dds.window[REMOTE].liobn,
1666                         be64_to_cpu(crq->IU_data_ptr));
1667        if (!rc) {
1668                cmd->rsp.format = VIOSRP_MAD_FORMAT;
1669                cmd->rsp.len = sizeof(struct mad_common);
1670                cmd->rsp.tag = mad->tag;
1671                list_add_tail(&cmd->list, &vscsi->waiting_rsp);
1672                ibmvscsis_send_messages(vscsi);
1673        } else {
1674                pr_debug("Error sending mad response, rc %ld\n", rc);
1675                if (rc == H_PERMISSION) {
1676                        if (connection_broken(vscsi))
1677                                flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1678                }
1679                dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n",
1680                        rc);
1681
1682                ibmvscsis_free_cmd_resources(vscsi, cmd);
1683                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1684                                          flag_bits);
1685        }
1686}
1687
1688/**
1689 * ibmvscsis_mad() - Service a MAnagement Data gram.
1690 * @vscsi:      Pointer to our adapter structure
1691 * @crq:        Pointer to the CRQ entry containing the MAD request
1692 *
1693 * EXECUTION ENVIRONMENT:
1694 *      Interrupt  called with adapter lock held
1695 */
1696static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
1697{
1698        struct iu_entry *iue;
1699        struct ibmvscsis_cmd *cmd;
1700        struct mad_common *mad;
1701        long rc = ADAPT_SUCCESS;
1702
1703        switch (vscsi->state) {
1704                /*
1705                 * We have not exchanged Init Msgs yet, so this MAD was sent
1706                 * before the last Transport Event; client will not be
1707                 * expecting a response.
1708                 */
1709        case WAIT_CONNECTION:
1710                pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n",
1711                         vscsi->flags);
1712                return ADAPT_SUCCESS;
1713
1714        case SRP_PROCESSING:
1715        case CONNECTED:
1716                break;
1717
1718                /*
1719                 * We should never get here while we're in these states.
1720                 * Just log an error and get out.
1721                 */
1722        case UNCONFIGURING:
1723        case WAIT_IDLE:
1724        case ERR_DISCONNECT:
1725        case ERR_DISCONNECT_RECONNECT:
1726        default:
1727                dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n",
1728                        vscsi->state);
1729                return ADAPT_SUCCESS;
1730        }
1731
1732        cmd = ibmvscsis_get_free_cmd(vscsi);
1733        if (!cmd) {
1734                dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n",
1735                        vscsi->debit);
1736                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1737                return ERROR;
1738        }
1739        iue = cmd->iue;
1740        cmd->type = ADAPTER_MAD;
1741
1742        rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
1743        if (!rc) {
1744                mad = (struct mad_common *)&vio_iu(iue)->mad;
1745
1746                pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
1747
1748                if (be16_to_cpu(mad->length) < 0) {
1749                        dev_err(&vscsi->dev, "mad: length is < 0\n");
1750                        ibmvscsis_post_disconnect(vscsi,
1751                                                  ERR_DISCONNECT_RECONNECT, 0);
1752                        rc = SRP_VIOLATION;
1753                } else {
1754                        rc = ibmvscsis_process_mad(vscsi, iue);
1755                }
1756
1757                pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
1758                         rc);
1759
1760                if (!rc)
1761                        ibmvscsis_send_mad_resp(vscsi, cmd, crq);
1762        } else {
1763                ibmvscsis_free_cmd_resources(vscsi, cmd);
1764        }
1765
1766        pr_debug("Leaving mad, rc %ld\n", rc);
1767        return rc;
1768}
1769
1770/**
1771 * ibmvscsis_login_rsp() - Create/copy a login response notice to the client
1772 * @vscsi:      Pointer to our adapter structure
1773 * @cmd:        Pointer to the command for the SRP Login request
1774 *
1775 * EXECUTION ENVIRONMENT:
1776 *      Interrupt, interrupt lock held
1777 */
1778static long ibmvscsis_login_rsp(struct scsi_info *vscsi,
1779                                struct ibmvscsis_cmd *cmd)
1780{
1781        struct iu_entry *iue = cmd->iue;
1782        struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp;
1783        struct format_code *fmt;
1784        uint flag_bits = 0;
1785        long rc = ADAPT_SUCCESS;
1786
1787        memset(rsp, 0, sizeof(struct srp_login_rsp));
1788
1789        rsp->opcode = SRP_LOGIN_RSP;
1790        rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit);
1791        rsp->tag = cmd->rsp.tag;
1792        rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
1793        rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
1794        fmt = (struct format_code *)&rsp->buf_fmt;
1795        fmt->buffers = SUPPORTED_FORMATS;
1796        vscsi->credit = 0;
1797
1798        cmd->rsp.len = sizeof(struct srp_login_rsp);
1799
1800        dma_wmb();
1801        rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
1802                         iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
1803                         be64_to_cpu(iue->remote_token));
1804
1805        switch (rc) {
1806        case H_SUCCESS:
1807                break;
1808
1809        case H_PERMISSION:
1810                if (connection_broken(vscsi))
1811                        flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
1812                dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
1813                        rc);
1814                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1815                                          flag_bits);
1816                break;
1817        case H_SOURCE_PARM:
1818        case H_DEST_PARM:
1819        default:
1820                dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
1821                        rc);
1822                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1823                break;
1824        }
1825
1826        return rc;
1827}
1828
1829/**
1830 * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client
1831 * @vscsi:      Pointer to our adapter structure
1832 * @cmd:        Pointer to the command for the SRP Login request
1833 * @reason:     The reason the SRP Login is being rejected, per SRP protocol
1834 *
1835 * EXECUTION ENVIRONMENT:
1836 *      Interrupt, interrupt lock held
1837 */
1838static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
1839                                    struct ibmvscsis_cmd *cmd, u32 reason)
1840{
1841        struct iu_entry *iue = cmd->iue;
1842        struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej;
1843        struct format_code *fmt;
1844        uint flag_bits = 0;
1845        long rc = ADAPT_SUCCESS;
1846
1847        memset(rej, 0, sizeof(*rej));
1848
1849        rej->opcode = SRP_LOGIN_REJ;
1850        rej->reason = cpu_to_be32(reason);
1851        rej->tag = cmd->rsp.tag;
1852        fmt = (struct format_code *)&rej->buf_fmt;
1853        fmt->buffers = SUPPORTED_FORMATS;
1854
1855        cmd->rsp.len = sizeof(*rej);
1856
1857        dma_wmb();
1858        rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
1859                         iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
1860                         be64_to_cpu(iue->remote_token));
1861
1862        switch (rc) {
1863        case H_SUCCESS:
1864                break;
1865        case H_PERMISSION:
1866                if (connection_broken(vscsi))
1867                        flag_bits =  RESPONSE_Q_DOWN | CLIENT_FAILED;
1868                dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
1869                        rc);
1870                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1871                                          flag_bits);
1872                break;
1873        case H_SOURCE_PARM:
1874        case H_DEST_PARM:
1875        default:
1876                dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
1877                        rc);
1878                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1879                break;
1880        }
1881
1882        return rc;
1883}
1884
1885static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
1886{
1887        char *name = tport->tport_name;
1888        struct ibmvscsis_nexus *nexus;
1889        int rc;
1890
1891        if (tport->ibmv_nexus) {
1892                pr_debug("tport->ibmv_nexus already exists\n");
1893                return 0;
1894        }
1895
1896        nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
1897        if (!nexus) {
1898                pr_err("Unable to allocate struct ibmvscsis_nexus\n");
1899                return -ENOMEM;
1900        }
1901
1902        nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0,
1903                                              TARGET_PROT_NORMAL, name, nexus,
1904                                              NULL);
1905        if (IS_ERR(nexus->se_sess)) {
1906                rc = PTR_ERR(nexus->se_sess);
1907                goto transport_init_fail;
1908        }
1909
1910        tport->ibmv_nexus = nexus;
1911
1912        return 0;
1913
1914transport_init_fail:
1915        kfree(nexus);
1916        return rc;
1917}
1918
1919static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
1920{
1921        struct se_session *se_sess;
1922        struct ibmvscsis_nexus *nexus;
1923
1924        nexus = tport->ibmv_nexus;
1925        if (!nexus)
1926                return -ENODEV;
1927
1928        se_sess = nexus->se_sess;
1929        if (!se_sess)
1930                return -ENODEV;
1931
1932        /*
1933         * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
1934         */
1935        target_wait_for_sess_cmds(se_sess);
1936        transport_deregister_session_configfs(se_sess);
1937        transport_deregister_session(se_sess);
1938        tport->ibmv_nexus = NULL;
1939        kfree(nexus);
1940
1941        return 0;
1942}
1943
1944/**
1945 * ibmvscsis_srp_login() - Process an SRP Login Request
1946 * @vscsi:      Pointer to our adapter structure
1947 * @cmd:        Command element to use to process the SRP Login request
1948 * @crq:        Pointer to CRQ entry containing the SRP Login request
1949 *
1950 * EXECUTION ENVIRONMENT:
1951 *      Interrupt, called with interrupt lock held
1952 */
1953static long ibmvscsis_srp_login(struct scsi_info *vscsi,
1954                                struct ibmvscsis_cmd *cmd,
1955                                struct viosrp_crq *crq)
1956{
1957        struct iu_entry *iue = cmd->iue;
1958        struct srp_login_req *req = &vio_iu(iue)->srp.login_req;
1959        struct port_id {
1960                __be64 id_extension;
1961                __be64 io_guid;
1962        } *iport, *tport;
1963        struct format_code *fmt;
1964        u32 reason = 0x0;
1965        long rc = ADAPT_SUCCESS;
1966
1967        iport = (struct port_id *)req->initiator_port_id;
1968        tport = (struct port_id *)req->target_port_id;
1969        fmt = (struct format_code *)&req->req_buf_fmt;
1970        if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN)
1971                reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE;
1972        else if (be32_to_cpu(req->req_it_iu_len) < 64)
1973                reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
1974        else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) ||
1975                 (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1)))
1976                reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL;
1977        else if (req->req_flags & SRP_MULTICHAN_MULTI)
1978                reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
1979        else if (fmt->buffers & (~SUPPORTED_FORMATS))
1980                reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
1981        else if ((fmt->buffers & SUPPORTED_FORMATS) == 0)
1982                reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
1983
1984        if (vscsi->state == SRP_PROCESSING)
1985                reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED;
1986
1987        rc = ibmvscsis_make_nexus(&vscsi->tport);
1988        if (rc)
1989                reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
1990
1991        cmd->rsp.format = VIOSRP_SRP_FORMAT;
1992        cmd->rsp.tag = req->tag;
1993
1994        pr_debug("srp_login: reason 0x%x\n", reason);
1995
1996        if (reason)
1997                rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
1998        else
1999                rc = ibmvscsis_login_rsp(vscsi, cmd);
2000
2001        if (!rc) {
2002                if (!reason)
2003                        vscsi->state = SRP_PROCESSING;
2004
2005                list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2006                ibmvscsis_send_messages(vscsi);
2007        } else {
2008                ibmvscsis_free_cmd_resources(vscsi, cmd);
2009        }
2010
2011        pr_debug("Leaving srp_login, rc %ld\n", rc);
2012        return rc;
2013}
2014
2015/**
2016 * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus
2017 * @vscsi:      Pointer to our adapter structure
2018 * @cmd:        Command element to use to process the Implicit Logout request
2019 * @crq:        Pointer to CRQ entry containing the Implicit Logout request
2020 *
2021 * Do the logic to close the I_T nexus.  This function may not
2022 * behave to specification.
2023 *
2024 * EXECUTION ENVIRONMENT:
2025 *      Interrupt, interrupt lock held
2026 */
2027static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi,
2028                                   struct ibmvscsis_cmd *cmd,
2029                                   struct viosrp_crq *crq)
2030{
2031        struct iu_entry *iue = cmd->iue;
2032        struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout;
2033        long rc = ADAPT_SUCCESS;
2034
2035        if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) ||
2036            !list_empty(&vscsi->waiting_rsp)) {
2037                dev_err(&vscsi->dev, "i_logout: outstanding work\n");
2038                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2039        } else {
2040                cmd->rsp.format = SRP_FORMAT;
2041                cmd->rsp.tag = log_out->tag;
2042                cmd->rsp.len = sizeof(struct mad_common);
2043                list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2044                ibmvscsis_send_messages(vscsi);
2045
2046                ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
2047        }
2048
2049        return rc;
2050}
2051
2052/* Called with intr lock held */
2053static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
2054{
2055        struct ibmvscsis_cmd *cmd;
2056        struct iu_entry *iue;
2057        struct srp_cmd *srp;
2058        struct srp_tsk_mgmt *tsk;
2059        long rc;
2060
2061        if (vscsi->request_limit - vscsi->debit <= 0) {
2062                /* Client has exceeded request limit */
2063                dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n",
2064                        vscsi->request_limit, vscsi->debit);
2065                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2066                return;
2067        }
2068
2069        cmd = ibmvscsis_get_free_cmd(vscsi);
2070        if (!cmd) {
2071                dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n",
2072                        vscsi->debit);
2073                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2074                return;
2075        }
2076        iue = cmd->iue;
2077        srp = &vio_iu(iue)->srp.cmd;
2078
2079        rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
2080        if (rc) {
2081                ibmvscsis_free_cmd_resources(vscsi, cmd);
2082                return;
2083        }
2084
2085        if (vscsi->state == SRP_PROCESSING) {
2086                switch (srp->opcode) {
2087                case SRP_LOGIN_REQ:
2088                        rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2089                        break;
2090
2091                case SRP_TSK_MGMT:
2092                        tsk = &vio_iu(iue)->srp.tsk_mgmt;
2093                        pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
2094                                 tsk->tag);
2095                        cmd->rsp.tag = tsk->tag;
2096                        vscsi->debit += 1;
2097                        cmd->type = TASK_MANAGEMENT;
2098                        list_add_tail(&cmd->list, &vscsi->schedule_q);
2099                        queue_work(vscsi->work_q, &cmd->work);
2100                        break;
2101
2102                case SRP_CMD:
2103                        pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
2104                                 srp->tag);
2105                        cmd->rsp.tag = srp->tag;
2106                        vscsi->debit += 1;
2107                        cmd->type = SCSI_CDB;
2108                        /*
2109                         * We want to keep track of work waiting for
2110                         * the workqueue.
2111                         */
2112                        list_add_tail(&cmd->list, &vscsi->schedule_q);
2113                        queue_work(vscsi->work_q, &cmd->work);
2114                        break;
2115
2116                case SRP_I_LOGOUT:
2117                        rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq);
2118                        break;
2119
2120                case SRP_CRED_RSP:
2121                case SRP_AER_RSP:
2122                default:
2123                        ibmvscsis_free_cmd_resources(vscsi, cmd);
2124                        dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n",
2125                                (uint)srp->opcode);
2126                        ibmvscsis_post_disconnect(vscsi,
2127                                                  ERR_DISCONNECT_RECONNECT, 0);
2128                        break;
2129                }
2130        } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) {
2131                rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2132        } else {
2133                ibmvscsis_free_cmd_resources(vscsi, cmd);
2134                dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n",
2135                        vscsi->state);
2136                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2137        }
2138}
2139
2140/**
2141 * ibmvscsis_ping_response() - Respond to a ping request
2142 * @vscsi:      Pointer to our adapter structure
2143 *
2144 * Let the client know that the server is alive and waiting on
2145 * its native I/O stack.
2146 * If any type of error occurs from the call to queue a ping
2147 * response then the client is either not accepting or receiving
2148 * interrupts.  Disconnect with an error.
2149 *
2150 * EXECUTION ENVIRONMENT:
2151 *      Interrupt, interrupt lock held
2152 */
2153static long ibmvscsis_ping_response(struct scsi_info *vscsi)
2154{
2155        struct viosrp_crq *crq;
2156        u64 buffer[2] = { 0, 0 };
2157        long rc;
2158
2159        crq = (struct viosrp_crq *)&buffer;
2160        crq->valid = VALID_CMD_RESP_EL;
2161        crq->format = (u8)MESSAGE_IN_CRQ;
2162        crq->status = PING_RESPONSE;
2163
2164        rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
2165                        cpu_to_be64(buffer[MSG_LOW]));
2166
2167        switch (rc) {
2168        case H_SUCCESS:
2169                break;
2170        case H_CLOSED:
2171                vscsi->flags |= CLIENT_FAILED;
2172        case H_DROPPED:
2173                vscsi->flags |= RESPONSE_Q_DOWN;
2174        case H_REMOTE_PARM:
2175                dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
2176                        rc);
2177                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2178                break;
2179        default:
2180                dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n",
2181                        rc);
2182                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2183                break;
2184        }
2185
2186        return rc;
2187}
2188
2189/**
2190 * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
2191 * @vscsi:      Pointer to our adapter structure
2192 *
2193 * Must be called with interrupt lock held.
2194 */
2195static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
2196{
2197        long rc = ADAPT_SUCCESS;
2198
2199        switch (vscsi->state) {
2200        case NO_QUEUE:
2201        case ERR_DISCONNECT:
2202        case ERR_DISCONNECT_RECONNECT:
2203        case ERR_DISCONNECTED:
2204        case UNCONFIGURING:
2205        case UNDEFINED:
2206                rc = ERROR;
2207                break;
2208
2209        case WAIT_CONNECTION:
2210                vscsi->state = CONNECTED;
2211                break;
2212
2213        case WAIT_IDLE:
2214        case SRP_PROCESSING:
2215        case CONNECTED:
2216        case WAIT_ENABLED:
2217        case PART_UP_WAIT_ENAB:
2218        default:
2219                rc = ERROR;
2220                dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
2221                        vscsi->state);
2222                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2223                break;
2224        }
2225
2226        return rc;
2227}
2228
2229/**
2230 * ibmvscsis_handle_init_msg() - Respond to an Init Message
2231 * @vscsi:      Pointer to our adapter structure
2232 *
2233 * Must be called with interrupt lock held.
2234 */
2235static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
2236{
2237        long rc = ADAPT_SUCCESS;
2238
2239        switch (vscsi->state) {
2240        case WAIT_ENABLED:
2241                vscsi->state = PART_UP_WAIT_ENAB;
2242                break;
2243
2244        case WAIT_CONNECTION:
2245                rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
2246                switch (rc) {
2247                case H_SUCCESS:
2248                        vscsi->state = CONNECTED;
2249                        break;
2250
2251                case H_PARAMETER:
2252                        dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
2253                                rc);
2254                        ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2255                        break;
2256
2257                case H_DROPPED:
2258                        dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
2259                                rc);
2260                        rc = ERROR;
2261                        ibmvscsis_post_disconnect(vscsi,
2262                                                  ERR_DISCONNECT_RECONNECT, 0);
2263                        break;
2264
2265                case H_CLOSED:
2266                        pr_warn("init_msg: failed to send, rc %ld\n", rc);
2267                        rc = 0;
2268                        break;
2269                }
2270                break;
2271
2272        case UNDEFINED:
2273                rc = ERROR;
2274                break;
2275
2276        case UNCONFIGURING:
2277                break;
2278
2279        case PART_UP_WAIT_ENAB:
2280        case CONNECTED:
2281        case SRP_PROCESSING:
2282        case WAIT_IDLE:
2283        case NO_QUEUE:
2284        case ERR_DISCONNECT:
2285        case ERR_DISCONNECT_RECONNECT:
2286        case ERR_DISCONNECTED:
2287        default:
2288                rc = ERROR;
2289                dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
2290                        vscsi->state);
2291                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2292                break;
2293        }
2294
2295        return rc;
2296}
2297
2298/**
2299 * ibmvscsis_init_msg() - Respond to an init message
2300 * @vscsi:      Pointer to our adapter structure
2301 * @crq:        Pointer to CRQ element containing the Init Message
2302 *
2303 * EXECUTION ENVIRONMENT:
2304 *      Interrupt, interrupt lock held
2305 */
2306static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
2307{
2308        long rc = ADAPT_SUCCESS;
2309
2310        pr_debug("init_msg: state 0x%hx\n", vscsi->state);
2311
2312        rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
2313                      (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
2314                      0);
2315        if (rc == H_SUCCESS) {
2316                vscsi->client_data.partition_number =
2317                        be64_to_cpu(*(u64 *)vscsi->map_buf);
2318                pr_debug("init_msg, part num %d\n",
2319                         vscsi->client_data.partition_number);
2320        } else {
2321                pr_debug("init_msg h_vioctl rc %ld\n", rc);
2322                rc = ADAPT_SUCCESS;
2323        }
2324
2325        if (crq->format == INIT_MSG) {
2326                rc = ibmvscsis_handle_init_msg(vscsi);
2327        } else if (crq->format == INIT_COMPLETE_MSG) {
2328                rc = ibmvscsis_handle_init_compl_msg(vscsi);
2329        } else {
2330                rc = ERROR;
2331                dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
2332                        (uint)crq->format);
2333                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2334        }
2335
2336        return rc;
2337}
2338
2339/**
2340 * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
2341 * @vscsi:      Pointer to our adapter structure
2342 * @crq:        Pointer to CRQ element containing the SRP request
2343 *
2344 * This function will return success if the command queue element is valid
2345 * and the srp iu or MAD request it pointed to was also valid.  That does
2346 * not mean that an error was not returned to the client.
2347 *
2348 * EXECUTION ENVIRONMENT:
2349 *      Interrupt, intr lock held
2350 */
2351static long ibmvscsis_parse_command(struct scsi_info *vscsi,
2352                                    struct viosrp_crq *crq)
2353{
2354        long rc = ADAPT_SUCCESS;
2355
2356        switch (crq->valid) {
2357        case VALID_CMD_RESP_EL:
2358                switch (crq->format) {
2359                case OS400_FORMAT:
2360                case AIX_FORMAT:
2361                case LINUX_FORMAT:
2362                case MAD_FORMAT:
2363                        if (vscsi->flags & PROCESSING_MAD) {
2364                                rc = ERROR;
2365                                dev_err(&vscsi->dev, "parse_command: already processing mad\n");
2366                                ibmvscsis_post_disconnect(vscsi,
2367                                                       ERR_DISCONNECT_RECONNECT,
2368                                                       0);
2369                        } else {
2370                                vscsi->flags |= PROCESSING_MAD;
2371                                rc = ibmvscsis_mad(vscsi, crq);
2372                        }
2373                        break;
2374
2375                case SRP_FORMAT:
2376                        ibmvscsis_srp_cmd(vscsi, crq);
2377                        break;
2378
2379                case MESSAGE_IN_CRQ:
2380                        if (crq->status == PING)
2381                                ibmvscsis_ping_response(vscsi);
2382                        break;
2383
2384                default:
2385                        dev_err(&vscsi->dev, "parse_command: invalid format %d\n",
2386                                (uint)crq->format);
2387                        ibmvscsis_post_disconnect(vscsi,
2388                                                  ERR_DISCONNECT_RECONNECT, 0);
2389                        break;
2390                }
2391                break;
2392
2393        case VALID_TRANS_EVENT:
2394                rc =  ibmvscsis_trans_event(vscsi, crq);
2395                break;
2396
2397        case VALID_INIT_MSG:
2398                rc = ibmvscsis_init_msg(vscsi, crq);
2399                break;
2400
2401        default:
2402                dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n",
2403                        (uint)crq->valid);
2404                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2405                break;
2406        }
2407
2408        /*
2409         * Return only what the interrupt handler cares
2410         * about. Most errors we keep right on trucking.
2411         */
2412        rc = vscsi->flags & SCHEDULE_DISCONNECT;
2413
2414        return rc;
2415}
2416
2417static int read_dma_window(struct scsi_info *vscsi)
2418{
2419        struct vio_dev *vdev = vscsi->dma_dev;
2420        const __be32 *dma_window;
2421        const __be32 *prop;
2422
2423        /* TODO Using of_parse_dma_window would be better, but it doesn't give
2424         * a way to read multiple windows without already knowing the size of
2425         * a window or the number of windows.
2426         */
2427        dma_window = (const __be32 *)vio_get_attribute(vdev,
2428                                                       "ibm,my-dma-window",
2429                                                       NULL);
2430        if (!dma_window) {
2431                pr_err("Couldn't find ibm,my-dma-window property\n");
2432                return -1;
2433        }
2434
2435        vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window);
2436        dma_window++;
2437
2438        prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2439                                                 NULL);
2440        if (!prop) {
2441                pr_warn("Couldn't find ibm,#dma-address-cells property\n");
2442                dma_window++;
2443        } else {
2444                dma_window += be32_to_cpu(*prop);
2445        }
2446
2447        prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2448                                                 NULL);
2449        if (!prop) {
2450                pr_warn("Couldn't find ibm,#dma-size-cells property\n");
2451                dma_window++;
2452        } else {
2453                dma_window += be32_to_cpu(*prop);
2454        }
2455
2456        /* dma_window should point to the second window now */
2457        vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window);
2458
2459        return 0;
2460}
2461
2462static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name)
2463{
2464        struct ibmvscsis_tport *tport = NULL;
2465        struct vio_dev *vdev;
2466        struct scsi_info *vscsi;
2467
2468        spin_lock_bh(&ibmvscsis_dev_lock);
2469        list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) {
2470                vdev = vscsi->dma_dev;
2471                if (!strcmp(dev_name(&vdev->dev), name)) {
2472                        tport = &vscsi->tport;
2473                        break;
2474                }
2475        }
2476        spin_unlock_bh(&ibmvscsis_dev_lock);
2477
2478        return tport;
2479}
2480
2481/**
2482 * ibmvscsis_parse_cmd() - Parse SRP Command
2483 * @vscsi:      Pointer to our adapter structure
2484 * @cmd:        Pointer to command element with SRP command
2485 *
2486 * Parse the srp command; if it is valid then submit it to tcm.
2487 * Note: The return code does not reflect the status of the SCSI CDB.
2488 *
2489 * EXECUTION ENVIRONMENT:
2490 *      Process level
2491 */
2492static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
2493                                struct ibmvscsis_cmd *cmd)
2494{
2495        struct iu_entry *iue = cmd->iue;
2496        struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
2497        struct ibmvscsis_nexus *nexus;
2498        u64 data_len = 0;
2499        enum dma_data_direction dir;
2500        int attr = 0;
2501        int rc = 0;
2502
2503        nexus = vscsi->tport.ibmv_nexus;
2504        /*
2505         * additional length in bytes.  Note that the SRP spec says that
2506         * additional length is in 4-byte words, but technically the
2507         * additional length field is only the upper 6 bits of the byte.
2508         * The lower 2 bits are reserved.  If the lower 2 bits are 0 (as
2509         * all reserved fields should be), then interpreting the byte as
2510         * an int will yield the length in bytes.
2511         */
2512        if (srp->add_cdb_len & 0x03) {
2513                dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n");
2514                spin_lock_bh(&vscsi->intr_lock);
2515                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2516                ibmvscsis_free_cmd_resources(vscsi, cmd);
2517                spin_unlock_bh(&vscsi->intr_lock);
2518                return;
2519        }
2520
2521        if (srp_get_desc_table(srp, &dir, &data_len)) {
2522                dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
2523                        srp->tag);
2524                goto fail;
2525                return;
2526        }
2527
2528        cmd->rsp.sol_not = srp->sol_not;
2529
2530        switch (srp->task_attr) {
2531        case SRP_SIMPLE_TASK:
2532                attr = TCM_SIMPLE_TAG;
2533                break;
2534        case SRP_ORDERED_TASK:
2535                attr = TCM_ORDERED_TAG;
2536                break;
2537        case SRP_HEAD_TASK:
2538                attr = TCM_HEAD_TAG;
2539                break;
2540        case SRP_ACA_TASK:
2541                attr = TCM_ACA_TAG;
2542                break;
2543        default:
2544                dev_err(&vscsi->dev, "Invalid task attribute %d\n",
2545                        srp->task_attr);
2546                goto fail;
2547        }
2548
2549        cmd->se_cmd.tag = be64_to_cpu(srp->tag);
2550
2551        spin_lock_bh(&vscsi->intr_lock);
2552        list_add_tail(&cmd->list, &vscsi->active_q);
2553        spin_unlock_bh(&vscsi->intr_lock);
2554
2555        srp->lun.scsi_lun[0] &= 0x3f;
2556
2557        rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
2558                               cmd->sense_buf, scsilun_to_int(&srp->lun),
2559                               data_len, attr, dir, 0);
2560        if (rc) {
2561                dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
2562                goto fail;
2563        }
2564        return;
2565
2566fail:
2567        spin_lock_bh(&vscsi->intr_lock);
2568        ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2569        spin_unlock_bh(&vscsi->intr_lock);
2570}
2571
2572/**
2573 * ibmvscsis_parse_task() - Parse SRP Task Management Request
2574 * @vscsi:      Pointer to our adapter structure
2575 * @cmd:        Pointer to command element with SRP task management request
2576 *
2577 * Parse the srp task management request; if it is valid then submit it to tcm.
2578 * Note: The return code does not reflect the status of the task management
2579 * request.
2580 *
2581 * EXECUTION ENVIRONMENT:
2582 *      Processor level
2583 */
2584static void ibmvscsis_parse_task(struct scsi_info *vscsi,
2585                                 struct ibmvscsis_cmd *cmd)
2586{
2587        struct iu_entry *iue = cmd->iue;
2588        struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
2589        int tcm_type;
2590        u64 tag_to_abort = 0;
2591        int rc = 0;
2592        struct ibmvscsis_nexus *nexus;
2593
2594        nexus = vscsi->tport.ibmv_nexus;
2595
2596        cmd->rsp.sol_not = srp_tsk->sol_not;
2597
2598        switch (srp_tsk->tsk_mgmt_func) {
2599        case SRP_TSK_ABORT_TASK:
2600                tcm_type = TMR_ABORT_TASK;
2601                tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
2602                break;
2603        case SRP_TSK_ABORT_TASK_SET:
2604                tcm_type = TMR_ABORT_TASK_SET;
2605                break;
2606        case SRP_TSK_CLEAR_TASK_SET:
2607                tcm_type = TMR_CLEAR_TASK_SET;
2608                break;
2609        case SRP_TSK_LUN_RESET:
2610                tcm_type = TMR_LUN_RESET;
2611                break;
2612        case SRP_TSK_CLEAR_ACA:
2613                tcm_type = TMR_CLEAR_ACA;
2614                break;
2615        default:
2616                dev_err(&vscsi->dev, "unknown task mgmt func %d\n",
2617                        srp_tsk->tsk_mgmt_func);
2618                cmd->se_cmd.se_tmr_req->response =
2619                        TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
2620                rc = -1;
2621                break;
2622        }
2623
2624        if (!rc) {
2625                cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag);
2626
2627                spin_lock_bh(&vscsi->intr_lock);
2628                list_add_tail(&cmd->list, &vscsi->active_q);
2629                spin_unlock_bh(&vscsi->intr_lock);
2630
2631                srp_tsk->lun.scsi_lun[0] &= 0x3f;
2632
2633                pr_debug("calling submit_tmr, func %d\n",
2634                         srp_tsk->tsk_mgmt_func);
2635                rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
2636                                       scsilun_to_int(&srp_tsk->lun), srp_tsk,
2637                                       tcm_type, GFP_KERNEL, tag_to_abort, 0);
2638                if (rc) {
2639                        dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
2640                                rc);
2641                        cmd->se_cmd.se_tmr_req->response =
2642                                TMR_FUNCTION_REJECTED;
2643                }
2644        }
2645
2646        if (rc)
2647                transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0);
2648}
2649
2650static void ibmvscsis_scheduler(struct work_struct *work)
2651{
2652        struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd,
2653                                                 work);
2654        struct scsi_info *vscsi = cmd->adapter;
2655
2656        spin_lock_bh(&vscsi->intr_lock);
2657
2658        /* Remove from schedule_q */
2659        list_del(&cmd->list);
2660
2661        /* Don't submit cmd if we're disconnecting */
2662        if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) {
2663                ibmvscsis_free_cmd_resources(vscsi, cmd);
2664
2665                /* ibmvscsis_disconnect might be waiting for us */
2666                if (list_empty(&vscsi->active_q) &&
2667                    list_empty(&vscsi->schedule_q) &&
2668                    (vscsi->flags & WAIT_FOR_IDLE)) {
2669                        vscsi->flags &= ~WAIT_FOR_IDLE;
2670                        complete(&vscsi->wait_idle);
2671                }
2672
2673                spin_unlock_bh(&vscsi->intr_lock);
2674                return;
2675        }
2676
2677        spin_unlock_bh(&vscsi->intr_lock);
2678
2679        switch (cmd->type) {
2680        case SCSI_CDB:
2681                ibmvscsis_parse_cmd(vscsi, cmd);
2682                break;
2683        case TASK_MANAGEMENT:
2684                ibmvscsis_parse_task(vscsi, cmd);
2685                break;
2686        default:
2687                dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n",
2688                        cmd->type);
2689                spin_lock_bh(&vscsi->intr_lock);
2690                ibmvscsis_free_cmd_resources(vscsi, cmd);
2691                spin_unlock_bh(&vscsi->intr_lock);
2692                break;
2693        }
2694}
2695
2696static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num)
2697{
2698        struct ibmvscsis_cmd *cmd;
2699        int i;
2700
2701        INIT_LIST_HEAD(&vscsi->free_cmd);
2702        vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd),
2703                                  GFP_KERNEL);
2704        if (!vscsi->cmd_pool)
2705                return -ENOMEM;
2706
2707        for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
2708             i++, cmd++) {
2709                cmd->adapter = vscsi;
2710                INIT_WORK(&cmd->work, ibmvscsis_scheduler);
2711                list_add_tail(&cmd->list, &vscsi->free_cmd);
2712        }
2713
2714        return 0;
2715}
2716
2717static void ibmvscsis_free_cmds(struct scsi_info *vscsi)
2718{
2719        kfree(vscsi->cmd_pool);
2720        vscsi->cmd_pool = NULL;
2721        INIT_LIST_HEAD(&vscsi->free_cmd);
2722}
2723
2724/**
2725 * ibmvscsis_service_wait_q() - Service Waiting Queue
2726 * @timer:      Pointer to timer which has expired
2727 *
2728 * This routine is called when the timer pops to service the waiting
2729 * queue. Elements on the queue have completed, their responses have been
2730 * copied to the client, but the client's response queue was full so
2731 * the queue message could not be sent. The routine grabs the proper locks
2732 * and calls send messages.
2733 *
2734 * EXECUTION ENVIRONMENT:
2735 *      called at interrupt level
2736 */
2737static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer)
2738{
2739        struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer);
2740        struct scsi_info *vscsi = container_of(p_timer, struct scsi_info,
2741                                               rsp_q_timer);
2742
2743        spin_lock_bh(&vscsi->intr_lock);
2744        p_timer->timer_pops += 1;
2745        p_timer->started = false;
2746        ibmvscsis_send_messages(vscsi);
2747        spin_unlock_bh(&vscsi->intr_lock);
2748
2749        return HRTIMER_NORESTART;
2750}
2751
2752static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
2753{
2754        struct timer_cb *p_timer;
2755
2756        p_timer = &vscsi->rsp_q_timer;
2757        hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2758
2759        p_timer->timer.function = ibmvscsis_service_wait_q;
2760        p_timer->started = false;
2761        p_timer->timer_pops = 0;
2762
2763        return ADAPT_SUCCESS;
2764}
2765
2766static void ibmvscsis_freetimer(struct scsi_info *vscsi)
2767{
2768        struct timer_cb *p_timer;
2769
2770        p_timer = &vscsi->rsp_q_timer;
2771
2772        (void)hrtimer_cancel(&p_timer->timer);
2773
2774        p_timer->started = false;
2775        p_timer->timer_pops = 0;
2776}
2777
2778static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
2779{
2780        struct scsi_info *vscsi = data;
2781
2782        vio_disable_interrupts(vscsi->dma_dev);
2783        tasklet_schedule(&vscsi->work_task);
2784
2785        return IRQ_HANDLED;
2786}
2787
2788/**
2789 * ibmvscsis_check_q() - Helper function to Check Init Message Valid
2790 * @vscsi:      Pointer to our adapter structure
2791 *
2792 * Checks if a initialize message was queued by the initiatior
2793 * while the timing window was open.  This function is called from
2794 * probe after the CRQ is created and interrupts are enabled.
2795 * It would only be used by adapters who wait for some event before
2796 * completing the init handshake with the client.  For ibmvscsi, this
2797 * event is waiting for the port to be enabled.
2798 *
2799 * EXECUTION ENVIRONMENT:
2800 *      Process level only, interrupt lock held
2801 */
2802static long ibmvscsis_check_q(struct scsi_info *vscsi)
2803{
2804        uint format;
2805        long rc;
2806
2807        rc = ibmvscsis_check_init_msg(vscsi, &format);
2808        if (rc)
2809                ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2810        else if (format == UNUSED_FORMAT)
2811                vscsi->state = WAIT_ENABLED;
2812        else
2813                vscsi->state = PART_UP_WAIT_ENAB;
2814
2815        return rc;
2816}
2817
2818/**
2819 * ibmvscsis_enable_change_state() - Set new state based on enabled status
2820 * @vscsi:      Pointer to our adapter structure
2821 *
2822 * This function determines our new state now that we are enabled.  This
2823 * may involve sending an Init Complete message to the client.
2824 *
2825 * Must be called with interrupt lock held.
2826 */
2827static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
2828{
2829        long rc = ADAPT_SUCCESS;
2830
2831handle_state_change:
2832        switch (vscsi->state) {
2833        case WAIT_ENABLED:
2834                rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
2835                switch (rc) {
2836                case H_SUCCESS:
2837                case H_DROPPED:
2838                case H_CLOSED:
2839                        vscsi->state =  WAIT_CONNECTION;
2840                        rc = ADAPT_SUCCESS;
2841                        break;
2842
2843                case H_PARAMETER:
2844                        break;
2845
2846                case H_HARDWARE:
2847                        break;
2848
2849                default:
2850                        vscsi->state = UNDEFINED;
2851                        rc = H_HARDWARE;
2852                        break;
2853                }
2854                break;
2855        case PART_UP_WAIT_ENAB:
2856                rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
2857                switch (rc) {
2858                case H_SUCCESS:
2859                        vscsi->state = CONNECTED;
2860                        rc = ADAPT_SUCCESS;
2861                        break;
2862
2863                case H_DROPPED:
2864                case H_CLOSED:
2865                        vscsi->state = WAIT_ENABLED;
2866                        goto handle_state_change;
2867
2868                case H_PARAMETER:
2869                        break;
2870
2871                case H_HARDWARE:
2872                        break;
2873
2874                default:
2875                        rc = H_HARDWARE;
2876                        break;
2877                }
2878                break;
2879
2880        case WAIT_CONNECTION:
2881        case WAIT_IDLE:
2882        case SRP_PROCESSING:
2883        case CONNECTED:
2884                rc = ADAPT_SUCCESS;
2885                break;
2886                /* should not be able to get here */
2887        case UNCONFIGURING:
2888                rc = ERROR;
2889                vscsi->state = UNDEFINED;
2890                break;
2891
2892                /* driver should never allow this to happen */
2893        case ERR_DISCONNECT:
2894        case ERR_DISCONNECT_RECONNECT:
2895        default:
2896                dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
2897                        vscsi->state);
2898                rc = ADAPT_SUCCESS;
2899                break;
2900        }
2901
2902        return rc;
2903}
2904
2905/**
2906 * ibmvscsis_create_command_q() - Create Command Queue
2907 * @vscsi:      Pointer to our adapter structure
2908 * @num_cmds:   Currently unused.  In the future, may be used to determine
2909 *              the size of the CRQ.
2910 *
2911 * Allocates memory for command queue maps remote memory into an ioba
2912 * initializes the command response queue
2913 *
2914 * EXECUTION ENVIRONMENT:
2915 *      Process level only
2916 */
2917static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
2918{
2919        long rc = 0;
2920        int pages;
2921        struct vio_dev *vdev = vscsi->dma_dev;
2922
2923        /* We might support multiple pages in the future, but just 1 for now */
2924        pages = 1;
2925
2926        vscsi->cmd_q.size = pages;
2927
2928        vscsi->cmd_q.base_addr =
2929                (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
2930        if (!vscsi->cmd_q.base_addr)
2931                return -ENOMEM;
2932
2933        vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1;
2934
2935        vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev,
2936                                                vscsi->cmd_q.base_addr,
2937                                                PAGE_SIZE, DMA_BIDIRECTIONAL);
2938        if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) {
2939                free_page((unsigned long)vscsi->cmd_q.base_addr);
2940                return -ENOMEM;
2941        }
2942
2943        rc =  h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE);
2944        if (rc) {
2945                if (rc == H_CLOSED) {
2946                        vscsi->state = WAIT_ENABLED;
2947                        rc = 0;
2948                } else {
2949                        dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
2950                                         PAGE_SIZE, DMA_BIDIRECTIONAL);
2951                        free_page((unsigned long)vscsi->cmd_q.base_addr);
2952                        rc = -ENODEV;
2953                }
2954        } else {
2955                vscsi->state = WAIT_ENABLED;
2956        }
2957
2958        return rc;
2959}
2960
2961/**
2962 * ibmvscsis_destroy_command_q - Destroy Command Queue
2963 * @vscsi:      Pointer to our adapter structure
2964 *
2965 * Releases memory for command queue and unmaps mapped remote memory.
2966 *
2967 * EXECUTION ENVIRONMENT:
2968 *      Process level only
2969 */
2970static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi)
2971{
2972        dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token,
2973                         PAGE_SIZE, DMA_BIDIRECTIONAL);
2974        free_page((unsigned long)vscsi->cmd_q.base_addr);
2975        vscsi->cmd_q.base_addr = NULL;
2976        vscsi->state = NO_QUEUE;
2977}
2978
2979static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi,
2980                              struct ibmvscsis_cmd *cmd)
2981{
2982        struct iu_entry *iue = cmd->iue;
2983        struct se_cmd *se_cmd = &cmd->se_cmd;
2984        struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
2985        struct scsi_sense_hdr sshdr;
2986        u8 rc = se_cmd->scsi_status;
2987
2988        if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb)))
2989                if (scsi_normalize_sense(se_cmd->sense_buffer,
2990                                         se_cmd->scsi_sense_length, &sshdr))
2991                        if (sshdr.sense_key == HARDWARE_ERROR &&
2992                            (se_cmd->residual_count == 0 ||
2993                             se_cmd->residual_count == se_cmd->data_length)) {
2994                                rc = NO_SENSE;
2995                                cmd->flags |= CMD_FAST_FAIL;
2996                        }
2997
2998        return rc;
2999}
3000
3001/**
3002 * srp_build_response() - Build an SRP response buffer
3003 * @vscsi:      Pointer to our adapter structure
3004 * @cmd:        Pointer to command for which to send the response
3005 * @len_p:      Where to return the length of the IU response sent.  This
3006 *              is needed to construct the CRQ response.
3007 *
3008 * Build the SRP response buffer and copy it to the client's memory space.
3009 */
3010static long srp_build_response(struct scsi_info *vscsi,
3011                               struct ibmvscsis_cmd *cmd, uint *len_p)
3012{
3013        struct iu_entry *iue = cmd->iue;
3014        struct se_cmd *se_cmd = &cmd->se_cmd;
3015        struct srp_rsp *rsp;
3016        uint len;
3017        u32 rsp_code;
3018        char *data;
3019        u32 *tsk_status;
3020        long rc = ADAPT_SUCCESS;
3021
3022        spin_lock_bh(&vscsi->intr_lock);
3023
3024        rsp = &vio_iu(iue)->srp.rsp;
3025        len = sizeof(*rsp);
3026        memset(rsp, 0, len);
3027        data = rsp->data;
3028
3029        rsp->opcode = SRP_RSP;
3030
3031        if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
3032                rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
3033        else
3034                rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
3035        rsp->tag = cmd->rsp.tag;
3036        rsp->flags = 0;
3037
3038        if (cmd->type == SCSI_CDB) {
3039                rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
3040                if (rsp->status) {
3041                        pr_debug("build_resp: cmd %p, scsi status %d\n", cmd,
3042                                 (int)rsp->status);
3043                        ibmvscsis_determine_resid(se_cmd, rsp);
3044                        if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
3045                                rsp->sense_data_len =
3046                                        cpu_to_be32(se_cmd->scsi_sense_length);
3047                                rsp->flags |= SRP_RSP_FLAG_SNSVALID;
3048                                len += se_cmd->scsi_sense_length;
3049                                memcpy(data, se_cmd->sense_buffer,
3050                                       se_cmd->scsi_sense_length);
3051                        }
3052                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3053                                UCSOLNT_RESP_SHIFT;
3054                } else if (cmd->flags & CMD_FAST_FAIL) {
3055                        pr_debug("build_resp: cmd %p, fast fail\n", cmd);
3056                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3057                                UCSOLNT_RESP_SHIFT;
3058                } else {
3059                        rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3060                                SCSOLNT_RESP_SHIFT;
3061                }
3062        } else {
3063                /* this is task management */
3064                rsp->status = 0;
3065                rsp->resp_data_len = cpu_to_be32(4);
3066                rsp->flags |= SRP_RSP_FLAG_RSPVALID;
3067
3068                switch (se_cmd->se_tmr_req->response) {
3069                case TMR_FUNCTION_COMPLETE:
3070                case TMR_TASK_DOES_NOT_EXIST:
3071                        rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE;
3072                        rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3073                                SCSOLNT_RESP_SHIFT;
3074                        break;
3075                case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3076                case TMR_LUN_DOES_NOT_EXIST:
3077                        rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED;
3078                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3079                                UCSOLNT_RESP_SHIFT;
3080                        break;
3081                case TMR_FUNCTION_FAILED:
3082                case TMR_FUNCTION_REJECTED:
3083                default:
3084                        rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED;
3085                        rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3086                                UCSOLNT_RESP_SHIFT;
3087                        break;
3088                }
3089
3090                tsk_status = (u32 *)data;
3091                *tsk_status = cpu_to_be32(rsp_code);
3092                data = (char *)(tsk_status + 1);
3093                len += 4;
3094        }
3095
3096        dma_wmb();
3097        rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
3098                         vscsi->dds.window[REMOTE].liobn,
3099                         be64_to_cpu(iue->remote_token));
3100
3101        switch (rc) {
3102        case H_SUCCESS:
3103                vscsi->credit = 0;
3104                *len_p = len;
3105                break;
3106        case H_PERMISSION:
3107                if (connection_broken(vscsi))
3108                        vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED;
3109
3110                dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n",
3111                        rc, vscsi->flags, vscsi->state);
3112                break;
3113        case H_SOURCE_PARM:
3114        case H_DEST_PARM:
3115        default:
3116                dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n",
3117                        rc);
3118                break;
3119        }
3120
3121        spin_unlock_bh(&vscsi->intr_lock);
3122
3123        return rc;
3124}
3125
3126static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
3127                          int nsg, struct srp_direct_buf *md, int nmd,
3128                          enum dma_data_direction dir, unsigned int bytes)
3129{
3130        struct iu_entry *iue = cmd->iue;
3131        struct srp_target *target = iue->target;
3132        struct scsi_info *vscsi = target->ldata;
3133        struct scatterlist *sgp;
3134        dma_addr_t client_ioba, server_ioba;
3135        ulong buf_len;
3136        ulong client_len, server_len;
3137        int md_idx;
3138        long tx_len;
3139        long rc = 0;
3140
3141        if (bytes == 0)
3142                return 0;
3143
3144        sgp = sg;
3145        client_len = 0;
3146        server_len = 0;
3147        md_idx = 0;
3148        tx_len = bytes;
3149
3150        do {
3151                if (client_len == 0) {
3152                        if (md_idx >= nmd) {
3153                                dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n");
3154                                rc = -EIO;
3155                                break;
3156                        }
3157                        client_ioba = be64_to_cpu(md[md_idx].va);
3158                        client_len = be32_to_cpu(md[md_idx].len);
3159                }
3160                if (server_len == 0) {
3161                        if (!sgp) {
3162                                dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n");
3163                                rc = -EIO;
3164                                break;
3165                        }
3166                        server_ioba = sg_dma_address(sgp);
3167                        server_len = sg_dma_len(sgp);
3168                }
3169
3170                buf_len = tx_len;
3171
3172                if (buf_len > client_len)
3173                        buf_len = client_len;
3174
3175                if (buf_len > server_len)
3176                        buf_len = server_len;
3177
3178                if (buf_len > max_vdma_size)
3179                        buf_len = max_vdma_size;
3180
3181                if (dir == DMA_TO_DEVICE) {
3182                        /* read from client */
3183                        rc = h_copy_rdma(buf_len,
3184                                         vscsi->dds.window[REMOTE].liobn,
3185                                         client_ioba,
3186                                         vscsi->dds.window[LOCAL].liobn,
3187                                         server_ioba);
3188                } else {
3189                        /* The h_copy_rdma will cause phyp, running in another
3190                         * partition, to read memory, so we need to make sure
3191                         * the data has been written out, hence these syncs.
3192                         */
3193                        /* ensure that everything is in memory */
3194                        isync();
3195                        /* ensure that memory has been made visible */
3196                        dma_wmb();
3197                        rc = h_copy_rdma(buf_len,
3198                                         vscsi->dds.window[LOCAL].liobn,
3199                                         server_ioba,
3200                                         vscsi->dds.window[REMOTE].liobn,
3201                                         client_ioba);
3202                }
3203                switch (rc) {
3204                case H_SUCCESS:
3205                        break;
3206                case H_PERMISSION:
3207                case H_SOURCE_PARM:
3208                case H_DEST_PARM:
3209                        if (connection_broken(vscsi)) {
3210                                spin_lock_bh(&vscsi->intr_lock);
3211                                vscsi->flags |=
3212                                        (RESPONSE_Q_DOWN | CLIENT_FAILED);
3213                                spin_unlock_bh(&vscsi->intr_lock);
3214                        }
3215                        dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n",
3216                                rc);
3217                        break;
3218
3219                default:
3220                        dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n",
3221                                rc);
3222                        break;
3223                }
3224
3225                if (!rc) {
3226                        tx_len -= buf_len;
3227                        if (tx_len) {
3228                                client_len -= buf_len;
3229                                if (client_len == 0)
3230                                        md_idx++;
3231                                else
3232                                        client_ioba += buf_len;
3233
3234                                server_len -= buf_len;
3235                                if (server_len == 0)
3236                                        sgp = sg_next(sgp);
3237                                else
3238                                        server_ioba += buf_len;
3239                        } else {
3240                                break;
3241                        }
3242                }
3243        } while (!rc);
3244
3245        return rc;
3246}
3247
3248/**
3249 * ibmvscsis_handle_crq() - Handle CRQ
3250 * @data:       Pointer to our adapter structure
3251 *
3252 * Read the command elements from the command queue and copy the payloads
3253 * associated with the command elements to local memory and execute the
3254 * SRP requests.
3255 *
3256 * Note: this is an edge triggered interrupt. It can not be shared.
3257 */
3258static void ibmvscsis_handle_crq(unsigned long data)
3259{
3260        struct scsi_info *vscsi = (struct scsi_info *)data;
3261        struct viosrp_crq *crq;
3262        long rc;
3263        bool ack = true;
3264        volatile u8 valid;
3265
3266        spin_lock_bh(&vscsi->intr_lock);
3267
3268        pr_debug("got interrupt\n");
3269
3270        /*
3271         * if we are in a path where we are waiting for all pending commands
3272         * to complete because we received a transport event and anything in
3273         * the command queue is for a new connection,  do nothing
3274         */
3275        if (TARGET_STOP(vscsi)) {
3276                vio_enable_interrupts(vscsi->dma_dev);
3277
3278                pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n",
3279                         vscsi->flags, vscsi->state);
3280                spin_unlock_bh(&vscsi->intr_lock);
3281                return;
3282        }
3283
3284        rc = vscsi->flags & SCHEDULE_DISCONNECT;
3285        crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3286        valid = crq->valid;
3287        dma_rmb();
3288
3289        while (valid) {
3290                /*
3291                 * These are edege triggered interrupts. After dropping out of
3292                 * the while loop, the code must check for work since an
3293                 * interrupt could be lost, and an elment be left on the queue,
3294                 * hence the label.
3295                 */
3296cmd_work:
3297                vscsi->cmd_q.index =
3298                        (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
3299
3300                if (!rc) {
3301                        rc = ibmvscsis_parse_command(vscsi, crq);
3302                } else {
3303                        if ((uint)crq->valid == VALID_TRANS_EVENT) {
3304                                /*
3305                                 * must service the transport layer events even
3306                                 * in an error state, dont break out until all
3307                                 * the consecutive transport events have been
3308                                 * processed
3309                                 */
3310                                rc = ibmvscsis_trans_event(vscsi, crq);
3311                        } else if (vscsi->flags & TRANS_EVENT) {
3312                                /*
3313                                 * if a transport event has occurred leave
3314                                 * everything but transport events on the queue
3315                                 *
3316                                 * need to decrement the queue index so we can
3317                                 * look at the elment again
3318                                 */
3319                                if (vscsi->cmd_q.index)
3320                                        vscsi->cmd_q.index -= 1;
3321                                else
3322                                        /*
3323                                         * index is at 0 it just wrapped.
3324                                         * have it index last element in q
3325                                         */
3326                                        vscsi->cmd_q.index = vscsi->cmd_q.mask;
3327                                break;
3328                        }
3329                }
3330
3331                crq->valid = INVALIDATE_CMD_RESP_EL;
3332
3333                crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3334                valid = crq->valid;
3335                dma_rmb();
3336        }
3337
3338        if (!rc) {
3339                if (ack) {
3340                        vio_enable_interrupts(vscsi->dma_dev);
3341                        ack = false;
3342                        pr_debug("handle_crq, reenabling interrupts\n");
3343                }
3344                valid = crq->valid;
3345                dma_rmb();
3346                if (valid)
3347                        goto cmd_work;
3348        } else {
3349                pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
3350                         vscsi->flags, vscsi->state, vscsi->cmd_q.index);
3351        }
3352
3353        pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
3354                 (int)list_empty(&vscsi->schedule_q), vscsi->flags,
3355                 vscsi->state);
3356
3357        spin_unlock_bh(&vscsi->intr_lock);
3358}
3359
3360static int ibmvscsis_probe(struct vio_dev *vdev,
3361                           const struct vio_device_id *id)
3362{
3363        struct scsi_info *vscsi;
3364        int rc = 0;
3365        long hrc = 0;
3366        char wq_name[24];
3367
3368        vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
3369        if (!vscsi) {
3370                rc = -ENOMEM;
3371                pr_err("probe: allocation of adapter failed\n");
3372                return rc;
3373        }
3374
3375        vscsi->dma_dev = vdev;
3376        vscsi->dev = vdev->dev;
3377        INIT_LIST_HEAD(&vscsi->schedule_q);
3378        INIT_LIST_HEAD(&vscsi->waiting_rsp);
3379        INIT_LIST_HEAD(&vscsi->active_q);
3380
3381        snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev));
3382
3383        pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
3384
3385        rc = read_dma_window(vscsi);
3386        if (rc)
3387                goto free_adapter;
3388        pr_debug("Probe: liobn 0x%x, riobn 0x%x\n",
3389                 vscsi->dds.window[LOCAL].liobn,
3390                 vscsi->dds.window[REMOTE].liobn);
3391
3392        strcpy(vscsi->eye, "VSCSI ");
3393        strncat(vscsi->eye, vdev->name, MAX_EYE);
3394
3395        vscsi->dds.unit_id = vdev->unit_address;
3396
3397        spin_lock_bh(&ibmvscsis_dev_lock);
3398        list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
3399        spin_unlock_bh(&ibmvscsis_dev_lock);
3400
3401        /*
3402         * TBD: How do we determine # of cmds to request?  Do we know how
3403         * many "children" we have?
3404         */
3405        vscsi->request_limit = INITIAL_SRP_LIMIT;
3406        rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit,
3407                              SRP_MAX_IU_LEN);
3408        if (rc)
3409                goto rem_list;
3410
3411        vscsi->target.ldata = vscsi;
3412
3413        rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit);
3414        if (rc) {
3415                dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n",
3416                        rc, vscsi->request_limit);
3417                goto free_target;
3418        }
3419
3420        /*
3421         * Note: the lock is used in freeing timers, so must initialize
3422         * first so that ordering in case of error is correct.
3423         */
3424        spin_lock_init(&vscsi->intr_lock);
3425
3426        rc = ibmvscsis_alloctimer(vscsi);
3427        if (rc) {
3428                dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc);
3429                goto free_cmds;
3430        }
3431
3432        rc = ibmvscsis_create_command_q(vscsi, 256);
3433        if (rc) {
3434                dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n",
3435                        rc);
3436                goto free_timer;
3437        }
3438
3439        vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
3440        if (!vscsi->map_buf) {
3441                rc = -ENOMEM;
3442                dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n");
3443                goto destroy_queue;
3444        }
3445
3446        vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
3447                                         DMA_BIDIRECTIONAL);
3448        if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
3449                rc = -ENOMEM;
3450                dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
3451                goto free_buf;
3452        }
3453
3454        hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
3455                       (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
3456                       0);
3457        if (hrc == H_SUCCESS)
3458                vscsi->client_data.partition_number =
3459                        be64_to_cpu(*(u64 *)vscsi->map_buf);
3460        /*
3461         * We expect the VIOCTL to fail if we're configured as "any
3462         * client can connect" and the client isn't activated yet.
3463         * We'll make the call again when he sends an init msg.
3464         */
3465        pr_debug("probe hrc %ld, client partition num %d\n",
3466                 hrc, vscsi->client_data.partition_number);
3467
3468        tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
3469                     (unsigned long)vscsi);
3470
3471        init_completion(&vscsi->wait_idle);
3472
3473        snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
3474        vscsi->work_q = create_workqueue(wq_name);
3475        if (!vscsi->work_q) {
3476                rc = -ENOMEM;
3477                dev_err(&vscsi->dev, "create_workqueue failed\n");
3478                goto unmap_buf;
3479        }
3480
3481        rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi);
3482        if (rc) {
3483                rc = -EPERM;
3484                dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc);
3485                goto destroy_WQ;
3486        }
3487
3488        spin_lock_bh(&vscsi->intr_lock);
3489        vio_enable_interrupts(vdev);
3490        if (rc) {
3491                dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
3492                rc = -ENODEV;
3493                spin_unlock_bh(&vscsi->intr_lock);
3494                goto free_irq;
3495        }
3496
3497        if (ibmvscsis_check_q(vscsi)) {
3498                rc = ERROR;
3499                dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
3500                spin_unlock_bh(&vscsi->intr_lock);
3501                goto disable_interrupt;
3502        }
3503        spin_unlock_bh(&vscsi->intr_lock);
3504
3505        dev_set_drvdata(&vdev->dev, vscsi);
3506
3507        return 0;
3508
3509disable_interrupt:
3510        vio_disable_interrupts(vdev);
3511free_irq:
3512        free_irq(vdev->irq, vscsi);
3513destroy_WQ:
3514        destroy_workqueue(vscsi->work_q);
3515unmap_buf:
3516        dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3517                         DMA_BIDIRECTIONAL);
3518free_buf:
3519        kfree(vscsi->map_buf);
3520destroy_queue:
3521        tasklet_kill(&vscsi->work_task);
3522        ibmvscsis_unregister_command_q(vscsi);
3523        ibmvscsis_destroy_command_q(vscsi);
3524free_timer:
3525        ibmvscsis_freetimer(vscsi);
3526free_cmds:
3527        ibmvscsis_free_cmds(vscsi);
3528free_target:
3529        srp_target_free(&vscsi->target);
3530rem_list:
3531        spin_lock_bh(&ibmvscsis_dev_lock);
3532        list_del(&vscsi->list);
3533        spin_unlock_bh(&ibmvscsis_dev_lock);
3534free_adapter:
3535        kfree(vscsi);
3536
3537        return rc;
3538}
3539
3540static int ibmvscsis_remove(struct vio_dev *vdev)
3541{
3542        struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
3543
3544        pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
3545
3546        /*
3547         * TBD: Need to handle if there are commands on the waiting_rsp q
3548         *      Actually, can there still be cmds outstanding to tcm?
3549         */
3550
3551        vio_disable_interrupts(vdev);
3552        free_irq(vdev->irq, vscsi);
3553        destroy_workqueue(vscsi->work_q);
3554        dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3555                         DMA_BIDIRECTIONAL);
3556        kfree(vscsi->map_buf);
3557        tasklet_kill(&vscsi->work_task);
3558        ibmvscsis_unregister_command_q(vscsi);
3559        ibmvscsis_destroy_command_q(vscsi);
3560        ibmvscsis_freetimer(vscsi);
3561        ibmvscsis_free_cmds(vscsi);
3562        srp_target_free(&vscsi->target);
3563        spin_lock_bh(&ibmvscsis_dev_lock);
3564        list_del(&vscsi->list);
3565        spin_unlock_bh(&ibmvscsis_dev_lock);
3566        kfree(vscsi);
3567
3568        return 0;
3569}
3570
3571static ssize_t system_id_show(struct device *dev,
3572                              struct device_attribute *attr, char *buf)
3573{
3574        return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
3575}
3576
3577static ssize_t partition_number_show(struct device *dev,
3578                                     struct device_attribute *attr, char *buf)
3579{
3580        return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
3581}
3582
3583static ssize_t unit_address_show(struct device *dev,
3584                                 struct device_attribute *attr, char *buf)
3585{
3586        struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev);
3587
3588        return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address);
3589}
3590
3591static int ibmvscsis_get_system_info(void)
3592{
3593        struct device_node *rootdn, *vdevdn;
3594        const char *id, *model, *name;
3595        const uint *num;
3596
3597        rootdn = of_find_node_by_path("/");
3598        if (!rootdn)
3599                return -ENOENT;
3600
3601        model = of_get_property(rootdn, "model", NULL);
3602        id = of_get_property(rootdn, "system-id", NULL);
3603        if (model && id)
3604                snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
3605
3606        name = of_get_property(rootdn, "ibm,partition-name", NULL);
3607        if (name)
3608                strncpy(partition_name, name, sizeof(partition_name));
3609
3610        num = of_get_property(rootdn, "ibm,partition-no", NULL);
3611        if (num)
3612                partition_number = *num;
3613
3614        of_node_put(rootdn);
3615
3616        vdevdn = of_find_node_by_path("/vdevice");
3617        if (vdevdn) {
3618                const uint *mvds;
3619
3620                mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size",
3621                                       NULL);
3622                if (mvds)
3623                        max_vdma_size = *mvds;
3624                of_node_put(vdevdn);
3625        }
3626
3627        return 0;
3628}
3629
3630static char *ibmvscsis_get_fabric_name(void)
3631{
3632        return "ibmvscsis";
3633}
3634
3635static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
3636{
3637        struct ibmvscsis_tport *tport =
3638                container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3639
3640        return tport->tport_name;
3641}
3642
3643static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg)
3644{
3645        struct ibmvscsis_tport *tport =
3646                container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3647
3648        return tport->tport_tpgt;
3649}
3650
3651static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg)
3652{
3653        return 1;
3654}
3655
3656static int ibmvscsis_check_true(struct se_portal_group *se_tpg)
3657{
3658        return 1;
3659}
3660
3661static int ibmvscsis_check_false(struct se_portal_group *se_tpg)
3662{
3663        return 0;
3664}
3665
3666static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg)
3667{
3668        return 1;
3669}
3670
3671static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd)
3672{
3673        return target_put_sess_cmd(se_cmd);
3674}
3675
3676static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
3677{
3678        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3679                                                 se_cmd);
3680        struct scsi_info *vscsi = cmd->adapter;
3681
3682        spin_lock_bh(&vscsi->intr_lock);
3683        /* Remove from active_q */
3684        list_move_tail(&cmd->list, &vscsi->waiting_rsp);
3685        ibmvscsis_send_messages(vscsi);
3686        spin_unlock_bh(&vscsi->intr_lock);
3687}
3688
3689static u32 ibmvscsis_sess_get_index(struct se_session *se_sess)
3690{
3691        return 0;
3692}
3693
3694static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3695{
3696        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3697                                                 se_cmd);
3698        struct iu_entry *iue = cmd->iue;
3699        int rc;
3700
3701        rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
3702                               1, 1);
3703        if (rc) {
3704                pr_err("srp_transfer_data() failed: %d\n", rc);
3705                return -EAGAIN;
3706        }
3707        /*
3708         * We now tell TCM to add this WRITE CDB directly into the TCM storage
3709         * object execution queue.
3710         */
3711        target_execute_cmd(se_cmd);
3712        return 0;
3713}
3714
3715static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd)
3716{
3717        return 0;
3718}
3719
3720static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
3721{
3722}
3723
3724static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd)
3725{
3726        return 0;
3727}
3728
3729static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
3730{
3731        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3732                                                 se_cmd);
3733        struct iu_entry *iue = cmd->iue;
3734        struct scsi_info *vscsi = cmd->adapter;
3735        char *sd;
3736        uint len = 0;
3737        int rc;
3738
3739        rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
3740                               1);
3741        if (rc) {
3742                pr_err("srp_transfer_data failed: %d\n", rc);
3743                sd = se_cmd->sense_buffer;
3744                se_cmd->scsi_sense_length = 18;
3745                memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
3746                /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
3747                scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR,
3748                                        0x08, 0x01);
3749        }
3750
3751        srp_build_response(vscsi, cmd, &len);
3752        cmd->rsp.format = SRP_FORMAT;
3753        cmd->rsp.len = len;
3754
3755        return 0;
3756}
3757
3758static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
3759{
3760        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3761                                                 se_cmd);
3762        struct scsi_info *vscsi = cmd->adapter;
3763        uint len;
3764
3765        pr_debug("queue_status %p\n", se_cmd);
3766
3767        srp_build_response(vscsi, cmd, &len);
3768        cmd->rsp.format = SRP_FORMAT;
3769        cmd->rsp.len = len;
3770
3771        return 0;
3772}
3773
3774static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
3775{
3776        struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3777                                                 se_cmd);
3778        struct scsi_info *vscsi = cmd->adapter;
3779        uint len;
3780
3781        pr_debug("queue_tm_rsp %p, status %d\n",
3782                 se_cmd, (int)se_cmd->se_tmr_req->response);
3783
3784        srp_build_response(vscsi, cmd, &len);
3785        cmd->rsp.format = SRP_FORMAT;
3786        cmd->rsp.len = len;
3787}
3788
3789static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
3790{
3791        /* TBD: What (if anything) should we do here? */
3792        pr_debug("ibmvscsis_aborted_task %p\n", se_cmd);
3793}
3794
3795static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
3796                                           struct config_group *group,
3797                                           const char *name)
3798{
3799        struct ibmvscsis_tport *tport;
3800
3801        tport = ibmvscsis_lookup_port(name);
3802        if (tport) {
3803                tport->tport_proto_id = SCSI_PROTOCOL_SRP;
3804                pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n",
3805                         name, tport, tport->tport_proto_id);
3806                return &tport->tport_wwn;
3807        }
3808
3809        return ERR_PTR(-EINVAL);
3810}
3811
3812static void ibmvscsis_drop_tport(struct se_wwn *wwn)
3813{
3814        struct ibmvscsis_tport *tport = container_of(wwn,
3815                                                     struct ibmvscsis_tport,
3816                                                     tport_wwn);
3817
3818        pr_debug("drop_tport(%s)\n",
3819                 config_item_name(&tport->tport_wwn.wwn_group.cg_item));
3820}
3821
3822static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
3823                                                  struct config_group *group,
3824                                                  const char *name)
3825{
3826        struct ibmvscsis_tport *tport =
3827                container_of(wwn, struct ibmvscsis_tport, tport_wwn);
3828        int rc;
3829
3830        tport->releasing = false;
3831
3832        rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
3833                               tport->tport_proto_id);
3834        if (rc)
3835                return ERR_PTR(rc);
3836
3837        return &tport->se_tpg;
3838}
3839
3840static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg)
3841{
3842        struct ibmvscsis_tport *tport = container_of(se_tpg,
3843                                                     struct ibmvscsis_tport,
3844                                                     se_tpg);
3845
3846        tport->releasing = true;
3847        tport->enabled = false;
3848
3849        /*
3850         * Release the virtual I_T Nexus for this ibmvscsis TPG
3851         */
3852        ibmvscsis_drop_nexus(tport);
3853        /*
3854         * Deregister the se_tpg from TCM..
3855         */
3856        core_tpg_deregister(se_tpg);
3857}
3858
3859static ssize_t ibmvscsis_wwn_version_show(struct config_item *item,
3860                                          char *page)
3861{
3862        return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION);
3863}
3864CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version);
3865
3866static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
3867        &ibmvscsis_wwn_attr_version,
3868        NULL,
3869};
3870
3871static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item,
3872                                         char *page)
3873{
3874        struct se_portal_group *se_tpg = to_tpg(item);
3875        struct ibmvscsis_tport *tport = container_of(se_tpg,
3876                                                     struct ibmvscsis_tport,
3877                                                     se_tpg);
3878
3879        return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
3880}
3881
3882static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
3883                                          const char *page, size_t count)
3884{
3885        struct se_portal_group *se_tpg = to_tpg(item);
3886        struct ibmvscsis_tport *tport = container_of(se_tpg,
3887                                                     struct ibmvscsis_tport,
3888                                                     se_tpg);
3889        struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
3890        unsigned long tmp;
3891        int rc;
3892        long lrc;
3893
3894        rc = kstrtoul(page, 0, &tmp);
3895        if (rc < 0) {
3896                pr_err("Unable to extract srpt_tpg_store_enable\n");
3897                return -EINVAL;
3898        }
3899
3900        if ((tmp != 0) && (tmp != 1)) {
3901                pr_err("Illegal value for srpt_tpg_store_enable\n");
3902                return -EINVAL;
3903        }
3904
3905        if (tmp) {
3906                tport->enabled = true;
3907                spin_lock_bh(&vscsi->intr_lock);
3908                lrc = ibmvscsis_enable_change_state(vscsi);
3909                if (lrc)
3910                        pr_err("enable_change_state failed, rc %ld state %d\n",
3911                               lrc, vscsi->state);
3912                spin_unlock_bh(&vscsi->intr_lock);
3913        } else {
3914                tport->enabled = false;
3915        }
3916
3917        pr_debug("tpg_enable_store, state %d\n", vscsi->state);
3918
3919        return count;
3920}
3921CONFIGFS_ATTR(ibmvscsis_tpg_, enable);
3922
3923static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
3924        &ibmvscsis_tpg_attr_enable,
3925        NULL,
3926};
3927
3928static const struct target_core_fabric_ops ibmvscsis_ops = {
3929        .module                         = THIS_MODULE,
3930        .name                           = "ibmvscsis",
3931        .get_fabric_name                = ibmvscsis_get_fabric_name,
3932        .tpg_get_wwn                    = ibmvscsis_get_fabric_wwn,
3933        .tpg_get_tag                    = ibmvscsis_get_tag,
3934        .tpg_get_default_depth          = ibmvscsis_get_default_depth,
3935        .tpg_check_demo_mode            = ibmvscsis_check_true,
3936        .tpg_check_demo_mode_cache      = ibmvscsis_check_true,
3937        .tpg_check_demo_mode_write_protect = ibmvscsis_check_false,
3938        .tpg_check_prod_mode_write_protect = ibmvscsis_check_false,
3939        .tpg_get_inst_index             = ibmvscsis_tpg_get_inst_index,
3940        .check_stop_free                = ibmvscsis_check_stop_free,
3941        .release_cmd                    = ibmvscsis_release_cmd,
3942        .sess_get_index                 = ibmvscsis_sess_get_index,
3943        .write_pending                  = ibmvscsis_write_pending,
3944        .write_pending_status           = ibmvscsis_write_pending_status,
3945        .set_default_node_attributes    = ibmvscsis_set_default_node_attrs,
3946        .get_cmd_state                  = ibmvscsis_get_cmd_state,
3947        .queue_data_in                  = ibmvscsis_queue_data_in,
3948        .queue_status                   = ibmvscsis_queue_status,
3949        .queue_tm_rsp                   = ibmvscsis_queue_tm_rsp,
3950        .aborted_task                   = ibmvscsis_aborted_task,
3951        /*
3952         * Setup function pointers for logic in target_core_fabric_configfs.c
3953         */
3954        .fabric_make_wwn                = ibmvscsis_make_tport,
3955        .fabric_drop_wwn                = ibmvscsis_drop_tport,
3956        .fabric_make_tpg                = ibmvscsis_make_tpg,
3957        .fabric_drop_tpg                = ibmvscsis_drop_tpg,
3958
3959        .tfc_wwn_attrs                  = ibmvscsis_wwn_attrs,
3960        .tfc_tpg_base_attrs             = ibmvscsis_tpg_attrs,
3961};
3962
3963static void ibmvscsis_dev_release(struct device *dev) {};
3964
3965static struct class_attribute ibmvscsis_class_attrs[] = {
3966        __ATTR_NULL,
3967};
3968
3969static struct device_attribute dev_attr_system_id =
3970        __ATTR(system_id, S_IRUGO, system_id_show, NULL);
3971
3972static struct device_attribute dev_attr_partition_number =
3973        __ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
3974
3975static struct device_attribute dev_attr_unit_address =
3976        __ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
3977
3978static struct attribute *ibmvscsis_dev_attrs[] = {
3979        &dev_attr_system_id.attr,
3980        &dev_attr_partition_number.attr,
3981        &dev_attr_unit_address.attr,
3982};
3983ATTRIBUTE_GROUPS(ibmvscsis_dev);
3984
3985static struct class ibmvscsis_class = {
3986        .name           = "ibmvscsis",
3987        .dev_release    = ibmvscsis_dev_release,
3988        .class_attrs    = ibmvscsis_class_attrs,
3989        .dev_groups     = ibmvscsis_dev_groups,
3990};
3991
3992static struct vio_device_id ibmvscsis_device_table[] = {
3993        { "v-scsi-host", "IBM,v-scsi-host" },
3994        { "", "" }
3995};
3996MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table);
3997
3998static struct vio_driver ibmvscsis_driver = {
3999        .name = "ibmvscsis",
4000        .id_table = ibmvscsis_device_table,
4001        .probe = ibmvscsis_probe,
4002        .remove = ibmvscsis_remove,
4003};
4004
4005/*
4006 * ibmvscsis_init() - Kernel Module initialization
4007 *
4008 * Note: vio_register_driver() registers callback functions, and at least one
4009 * of those callback functions calls TCM - Linux IO Target Subsystem, thus
4010 * the SCSI Target template must be registered before vio_register_driver()
4011 * is called.
4012 */
4013static int __init ibmvscsis_init(void)
4014{
4015        int rc = 0;
4016
4017        rc = ibmvscsis_get_system_info();
4018        if (rc) {
4019                pr_err("rc %d from get_system_info\n", rc);
4020                goto out;
4021        }
4022
4023        rc = class_register(&ibmvscsis_class);
4024        if (rc) {
4025                pr_err("failed class register\n");
4026                goto out;
4027        }
4028
4029        rc = target_register_template(&ibmvscsis_ops);
4030        if (rc) {
4031                pr_err("rc %d from target_register_template\n", rc);
4032                goto unregister_class;
4033        }
4034
4035        rc = vio_register_driver(&ibmvscsis_driver);
4036        if (rc) {
4037                pr_err("rc %d from vio_register_driver\n", rc);
4038                goto unregister_target;
4039        }
4040
4041        return 0;
4042
4043unregister_target:
4044        target_unregister_template(&ibmvscsis_ops);
4045unregister_class:
4046        class_unregister(&ibmvscsis_class);
4047out:
4048        return rc;
4049}
4050
4051static void __exit ibmvscsis_exit(void)
4052{
4053        pr_info("Unregister IBM virtual SCSI host driver\n");
4054        vio_unregister_driver(&ibmvscsis_driver);
4055        target_unregister_template(&ibmvscsis_ops);
4056        class_unregister(&ibmvscsis_class);
4057}
4058
4059MODULE_DESCRIPTION("IBMVSCSIS fabric driver");
4060MODULE_AUTHOR("Bryant G. Ly and Michael Cyr");
4061MODULE_LICENSE("GPL");
4062MODULE_VERSION(IBMVSCSIS_VERSION);
4063module_init(ibmvscsis_init);
4064module_exit(ibmvscsis_exit);
4065