linux/drivers/scsi/ibmvscsi/ibmvscsi.c
<<
>>
Prefs
   1/* ------------------------------------------------------------
   2 * ibmvscsi.c
   3 * (C) Copyright IBM Corporation 1994, 2004
   4 * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
   5 *          Santiago Leon (santil@us.ibm.com)
   6 *          Dave Boutcher (sleddog@us.ibm.com)
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
  21 * USA
  22 *
  23 * ------------------------------------------------------------
  24 * Emulation of a SCSI host adapter for Virtual I/O devices
  25 *
  26 * This driver supports the SCSI adapter implemented by the IBM
  27 * Power5 firmware.  That SCSI adapter is not a physical adapter,
  28 * but allows Linux SCSI peripheral drivers to directly
  29 * access devices in another logical partition on the physical system.
  30 *
  31 * The virtual adapter(s) are present in the open firmware device
  32 * tree just like real adapters.
  33 *
  34 * One of the capabilities provided on these systems is the ability
  35 * to DMA between partitions.  The architecture states that for VSCSI,
  36 * the server side is allowed to DMA to and from the client.  The client
  37 * is never trusted to DMA to or from the server directly.
  38 *
  39 * Messages are sent between partitions on a "Command/Response Queue" 
  40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's 
  41 * Senders cannot access the buffer directly, but send messages by
  42 * making a hypervisor call and passing in the 16 bytes.  The hypervisor
  43 * puts the message in the next 16 byte space in round-robin fashion,
  44 * turns on the high order bit of the message (the valid bit), and 
  45 * generates an interrupt to the receiver (if interrupts are turned on.) 
  46 * The receiver just turns off the valid bit when they have copied out
  47 * the message.
  48 *
  49 * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
  50 * (IU) (as defined in the T10 standard available at www.t10.org), gets 
  51 * a DMA address for the message, and sends it to the server as the
  52 * payload of a CRQ message.  The server DMAs the SRP IU and processes it,
  53 * including doing any additional data transfers.  When it is done, it
  54 * DMAs the SRP response back to the same address as the request came from,
  55 * and sends a CRQ message back to inform the client that the request has
  56 * completed.
  57 *
  58 * TODO: This is currently pretty tied to the IBM pSeries hypervisor
  59 * interfaces.  It would be really nice to abstract this above an RDMA
  60 * layer.
  61 */
  62
  63#include <linux/module.h>
  64#include <linux/moduleparam.h>
  65#include <linux/dma-mapping.h>
  66#include <linux/delay.h>
  67#include <linux/slab.h>
  68#include <linux/of.h>
  69#include <linux/pm.h>
  70#include <linux/kthread.h>
  71#include <asm/firmware.h>
  72#include <asm/vio.h>
  73#include <scsi/scsi.h>
  74#include <scsi/scsi_cmnd.h>
  75#include <scsi/scsi_host.h>
  76#include <scsi/scsi_device.h>
  77#include <scsi/scsi_transport_srp.h>
  78#include "ibmvscsi.h"
  79
  80/* The values below are somewhat arbitrary default values, but 
  81 * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
  82 * Note that there are 3 bits of channel value, 6 bits of id, and
  83 * 5 bits of LUN.
  84 */
  85static int max_id = 64;
  86static int max_channel = 3;
  87static int init_timeout = 300;
  88static int login_timeout = 60;
  89static int info_timeout = 30;
  90static int abort_timeout = 60;
  91static int reset_timeout = 60;
  92static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
  93static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
  94static int fast_fail = 1;
  95static int client_reserve = 1;
  96static char partition_name[96] = "UNKNOWN";
  97static unsigned int partition_number = -1;
  98static LIST_HEAD(ibmvscsi_head);
  99static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
 100
 101static struct scsi_transport_template *ibmvscsi_transport_template;
 102
 103#define IBMVSCSI_VERSION "1.5.9"
 104
 105MODULE_DESCRIPTION("IBM Virtual SCSI");
 106MODULE_AUTHOR("Dave Boutcher");
 107MODULE_LICENSE("GPL");
 108MODULE_VERSION(IBMVSCSI_VERSION);
 109
 110module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
 111MODULE_PARM_DESC(max_id, "Largest ID value for each channel [Default=64]");
 112module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
 113MODULE_PARM_DESC(max_channel, "Largest channel value [Default=3]");
 114module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
 115MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
 116module_param_named(max_requests, max_requests, int, S_IRUGO);
 117MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
 118module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
 119MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
 120module_param_named(client_reserve, client_reserve, int, S_IRUGO );
 121MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
 122
 123static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
 124                                struct ibmvscsi_host_data *hostdata);
 125
 126/* ------------------------------------------------------------
 127 * Routines for managing the command/response queue
 128 */
 129/**
 130 * ibmvscsi_handle_event: - Interrupt handler for crq events
 131 * @irq:        number of irq to handle, not used
 132 * @dev_instance: ibmvscsi_host_data of host that received interrupt
 133 *
 134 * Disables interrupts and schedules srp_task
 135 * Always returns IRQ_HANDLED
 136 */
 137static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
 138{
 139        struct ibmvscsi_host_data *hostdata =
 140            (struct ibmvscsi_host_data *)dev_instance;
 141        vio_disable_interrupts(to_vio_dev(hostdata->dev));
 142        tasklet_schedule(&hostdata->srp_task);
 143        return IRQ_HANDLED;
 144}
 145
 146/**
 147 * release_crq_queue: - Deallocates data and unregisters CRQ
 148 * @queue:      crq_queue to initialize and register
 149 * @host_data:  ibmvscsi_host_data of host
 150 *
 151 * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
 152 * the crq with the hypervisor.
 153 */
 154static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
 155                                       struct ibmvscsi_host_data *hostdata,
 156                                       int max_requests)
 157{
 158        long rc = 0;
 159        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 160        free_irq(vdev->irq, (void *)hostdata);
 161        tasklet_kill(&hostdata->srp_task);
 162        do {
 163                if (rc)
 164                        msleep(100);
 165                rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
 166        } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
 167        dma_unmap_single(hostdata->dev,
 168                         queue->msg_token,
 169                         queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
 170        free_page((unsigned long)queue->msgs);
 171}
 172
 173/**
 174 * crq_queue_next_crq: - Returns the next entry in message queue
 175 * @queue:      crq_queue to use
 176 *
 177 * Returns pointer to next entry in queue, or NULL if there are no new
 178 * entried in the CRQ.
 179 */
 180static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
 181{
 182        struct viosrp_crq *crq;
 183        unsigned long flags;
 184
 185        spin_lock_irqsave(&queue->lock, flags);
 186        crq = &queue->msgs[queue->cur];
 187        if (crq->valid != VIOSRP_CRQ_FREE) {
 188                if (++queue->cur == queue->size)
 189                        queue->cur = 0;
 190
 191                /* Ensure the read of the valid bit occurs before reading any
 192                 * other bits of the CRQ entry
 193                 */
 194                rmb();
 195        } else
 196                crq = NULL;
 197        spin_unlock_irqrestore(&queue->lock, flags);
 198
 199        return crq;
 200}
 201
 202/**
 203 * ibmvscsi_send_crq: - Send a CRQ
 204 * @hostdata:   the adapter
 205 * @word1:      the first 64 bits of the data
 206 * @word2:      the second 64 bits of the data
 207 */
 208static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
 209                             u64 word1, u64 word2)
 210{
 211        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 212
 213        /*
 214         * Ensure the command buffer is flushed to memory before handing it
 215         * over to the VIOS to prevent it from fetching any stale data.
 216         */
 217        mb();
 218        return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
 219}
 220
 221/**
 222 * ibmvscsi_task: - Process srps asynchronously
 223 * @data:       ibmvscsi_host_data of host
 224 */
 225static void ibmvscsi_task(void *data)
 226{
 227        struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
 228        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 229        struct viosrp_crq *crq;
 230        int done = 0;
 231
 232        while (!done) {
 233                /* Pull all the valid messages off the CRQ */
 234                while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
 235                        ibmvscsi_handle_crq(crq, hostdata);
 236                        crq->valid = VIOSRP_CRQ_FREE;
 237                        wmb();
 238                }
 239
 240                vio_enable_interrupts(vdev);
 241                crq = crq_queue_next_crq(&hostdata->queue);
 242                if (crq != NULL) {
 243                        vio_disable_interrupts(vdev);
 244                        ibmvscsi_handle_crq(crq, hostdata);
 245                        crq->valid = VIOSRP_CRQ_FREE;
 246                        wmb();
 247                } else {
 248                        done = 1;
 249                }
 250        }
 251}
 252
 253static void gather_partition_info(void)
 254{
 255        const char *ppartition_name;
 256        const __be32 *p_number_ptr;
 257
 258        /* Retrieve information about this partition */
 259        if (!of_root)
 260                return;
 261
 262        of_node_get(of_root);
 263
 264        ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
 265        if (ppartition_name)
 266                strlcpy(partition_name, ppartition_name,
 267                                sizeof(partition_name));
 268        p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
 269        if (p_number_ptr)
 270                partition_number = of_read_number(p_number_ptr, 1);
 271        of_node_put(of_root);
 272}
 273
 274static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
 275{
 276        memset(&hostdata->madapter_info, 0x00,
 277                        sizeof(hostdata->madapter_info));
 278
 279        dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
 280        strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
 281
 282        strncpy(hostdata->madapter_info.partition_name, partition_name,
 283                        sizeof(hostdata->madapter_info.partition_name));
 284
 285        hostdata->madapter_info.partition_number =
 286                                        cpu_to_be32(partition_number);
 287
 288        hostdata->madapter_info.mad_version = cpu_to_be32(SRP_MAD_VERSION_1);
 289        hostdata->madapter_info.os_type = cpu_to_be32(SRP_MAD_OS_LINUX);
 290}
 291
 292/**
 293 * reset_crq_queue: - resets a crq after a failure
 294 * @queue:      crq_queue to initialize and register
 295 * @hostdata:   ibmvscsi_host_data of host
 296 *
 297 */
 298static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
 299                                    struct ibmvscsi_host_data *hostdata)
 300{
 301        int rc = 0;
 302        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 303
 304        /* Close the CRQ */
 305        do {
 306                if (rc)
 307                        msleep(100);
 308                rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
 309        } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
 310
 311        /* Clean out the queue */
 312        memset(queue->msgs, 0x00, PAGE_SIZE);
 313        queue->cur = 0;
 314
 315        set_adapter_info(hostdata);
 316
 317        /* And re-open it again */
 318        rc = plpar_hcall_norets(H_REG_CRQ,
 319                                vdev->unit_address,
 320                                queue->msg_token, PAGE_SIZE);
 321        if (rc == H_CLOSED) {
 322                /* Adapter is good, but other end is not ready */
 323                dev_warn(hostdata->dev, "Partner adapter not ready\n");
 324        } else if (rc != 0) {
 325                dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
 326        }
 327        return rc;
 328}
 329
 330/**
 331 * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
 332 * @queue:      crq_queue to initialize and register
 333 * @hostdata:   ibmvscsi_host_data of host
 334 *
 335 * Allocates a page for messages, maps it for dma, and registers
 336 * the crq with the hypervisor.
 337 * Returns zero on success.
 338 */
 339static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
 340                                   struct ibmvscsi_host_data *hostdata,
 341                                   int max_requests)
 342{
 343        int rc;
 344        int retrc;
 345        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 346
 347        queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
 348
 349        if (!queue->msgs)
 350                goto malloc_failed;
 351        queue->size = PAGE_SIZE / sizeof(*queue->msgs);
 352
 353        queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
 354                                          queue->size * sizeof(*queue->msgs),
 355                                          DMA_BIDIRECTIONAL);
 356
 357        if (dma_mapping_error(hostdata->dev, queue->msg_token))
 358                goto map_failed;
 359
 360        gather_partition_info();
 361        set_adapter_info(hostdata);
 362
 363        retrc = rc = plpar_hcall_norets(H_REG_CRQ,
 364                                vdev->unit_address,
 365                                queue->msg_token, PAGE_SIZE);
 366        if (rc == H_RESOURCE)
 367                /* maybe kexecing and resource is busy. try a reset */
 368                rc = ibmvscsi_reset_crq_queue(queue,
 369                                              hostdata);
 370
 371        if (rc == H_CLOSED) {
 372                /* Adapter is good, but other end is not ready */
 373                dev_warn(hostdata->dev, "Partner adapter not ready\n");
 374                retrc = 0;
 375        } else if (rc != 0) {
 376                dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
 377                goto reg_crq_failed;
 378        }
 379
 380        queue->cur = 0;
 381        spin_lock_init(&queue->lock);
 382
 383        tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
 384                     (unsigned long)hostdata);
 385
 386        if (request_irq(vdev->irq,
 387                        ibmvscsi_handle_event,
 388                        0, "ibmvscsi", (void *)hostdata) != 0) {
 389                dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
 390                        vdev->irq);
 391                goto req_irq_failed;
 392        }
 393
 394        rc = vio_enable_interrupts(vdev);
 395        if (rc != 0) {
 396                dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
 397                goto req_irq_failed;
 398        }
 399
 400        return retrc;
 401
 402      req_irq_failed:
 403        tasklet_kill(&hostdata->srp_task);
 404        rc = 0;
 405        do {
 406                if (rc)
 407                        msleep(100);
 408                rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
 409        } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
 410      reg_crq_failed:
 411        dma_unmap_single(hostdata->dev,
 412                         queue->msg_token,
 413                         queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
 414      map_failed:
 415        free_page((unsigned long)queue->msgs);
 416      malloc_failed:
 417        return -1;
 418}
 419
 420/**
 421 * reenable_crq_queue: - reenables a crq after
 422 * @queue:      crq_queue to initialize and register
 423 * @hostdata:   ibmvscsi_host_data of host
 424 *
 425 */
 426static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
 427                                       struct ibmvscsi_host_data *hostdata)
 428{
 429        int rc = 0;
 430        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 431
 432        set_adapter_info(hostdata);
 433
 434        /* Re-enable the CRQ */
 435        do {
 436                if (rc)
 437                        msleep(100);
 438                rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
 439        } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
 440
 441        if (rc)
 442                dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
 443        return rc;
 444}
 445
 446/* ------------------------------------------------------------
 447 * Routines for the event pool and event structs
 448 */
 449/**
 450 * initialize_event_pool: - Allocates and initializes the event pool for a host
 451 * @pool:       event_pool to be initialized
 452 * @size:       Number of events in pool
 453 * @hostdata:   ibmvscsi_host_data who owns the event pool
 454 *
 455 * Returns zero on success.
 456*/
 457static int initialize_event_pool(struct event_pool *pool,
 458                                 int size, struct ibmvscsi_host_data *hostdata)
 459{
 460        int i;
 461
 462        pool->size = size;
 463        pool->next = 0;
 464        pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
 465        if (!pool->events)
 466                return -ENOMEM;
 467
 468        pool->iu_storage =
 469            dma_alloc_coherent(hostdata->dev,
 470                               pool->size * sizeof(*pool->iu_storage),
 471                               &pool->iu_token, 0);
 472        if (!pool->iu_storage) {
 473                kfree(pool->events);
 474                return -ENOMEM;
 475        }
 476
 477        for (i = 0; i < pool->size; ++i) {
 478                struct srp_event_struct *evt = &pool->events[i];
 479                memset(&evt->crq, 0x00, sizeof(evt->crq));
 480                atomic_set(&evt->free, 1);
 481                evt->crq.valid = VIOSRP_CRQ_CMD_RSP;
 482                evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
 483                evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
 484                        sizeof(*evt->xfer_iu) * i);
 485                evt->xfer_iu = pool->iu_storage + i;
 486                evt->hostdata = hostdata;
 487                evt->ext_list = NULL;
 488                evt->ext_list_token = 0;
 489        }
 490
 491        return 0;
 492}
 493
 494/**
 495 * release_event_pool: - Frees memory of an event pool of a host
 496 * @pool:       event_pool to be released
 497 * @hostdata:   ibmvscsi_host_data who owns the even pool
 498 *
 499 * Returns zero on success.
 500*/
 501static void release_event_pool(struct event_pool *pool,
 502                               struct ibmvscsi_host_data *hostdata)
 503{
 504        int i, in_use = 0;
 505        for (i = 0; i < pool->size; ++i) {
 506                if (atomic_read(&pool->events[i].free) != 1)
 507                        ++in_use;
 508                if (pool->events[i].ext_list) {
 509                        dma_free_coherent(hostdata->dev,
 510                                  SG_ALL * sizeof(struct srp_direct_buf),
 511                                  pool->events[i].ext_list,
 512                                  pool->events[i].ext_list_token);
 513                }
 514        }
 515        if (in_use)
 516                dev_warn(hostdata->dev, "releasing event pool with %d "
 517                         "events still in use?\n", in_use);
 518        kfree(pool->events);
 519        dma_free_coherent(hostdata->dev,
 520                          pool->size * sizeof(*pool->iu_storage),
 521                          pool->iu_storage, pool->iu_token);
 522}
 523
 524/**
 525 * valid_event_struct: - Determines if event is valid.
 526 * @pool:       event_pool that contains the event
 527 * @evt:        srp_event_struct to be checked for validity
 528 *
 529 * Returns zero if event is invalid, one otherwise.
 530*/
 531static int valid_event_struct(struct event_pool *pool,
 532                                struct srp_event_struct *evt)
 533{
 534        int index = evt - pool->events;
 535        if (index < 0 || index >= pool->size)   /* outside of bounds */
 536                return 0;
 537        if (evt != pool->events + index)        /* unaligned */
 538                return 0;
 539        return 1;
 540}
 541
 542/**
 543 * ibmvscsi_free-event_struct: - Changes status of event to "free"
 544 * @pool:       event_pool that contains the event
 545 * @evt:        srp_event_struct to be modified
 546 *
 547*/
 548static void free_event_struct(struct event_pool *pool,
 549                                       struct srp_event_struct *evt)
 550{
 551        if (!valid_event_struct(pool, evt)) {
 552                dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
 553                        "(not in pool %p)\n", evt, pool->events);
 554                return;
 555        }
 556        if (atomic_inc_return(&evt->free) != 1) {
 557                dev_err(evt->hostdata->dev, "Freeing event_struct %p "
 558                        "which is not in use!\n", evt);
 559                return;
 560        }
 561}
 562
 563/**
 564 * get_evt_struct: - Gets the next free event in pool
 565 * @pool:       event_pool that contains the events to be searched
 566 *
 567 * Returns the next event in "free" state, and NULL if none are free.
 568 * Note that no synchronization is done here, we assume the host_lock
 569 * will syncrhonze things.
 570*/
 571static struct srp_event_struct *get_event_struct(struct event_pool *pool)
 572{
 573        int i;
 574        int poolsize = pool->size;
 575        int offset = pool->next;
 576
 577        for (i = 0; i < poolsize; i++) {
 578                offset = (offset + 1) % poolsize;
 579                if (!atomic_dec_if_positive(&pool->events[offset].free)) {
 580                        pool->next = offset;
 581                        return &pool->events[offset];
 582                }
 583        }
 584
 585        printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
 586        return NULL;
 587}
 588
 589/**
 590 * init_event_struct: Initialize fields in an event struct that are always 
 591 *                    required.
 592 * @evt:        The event
 593 * @done:       Routine to call when the event is responded to
 594 * @format:     SRP or MAD format
 595 * @timeout:    timeout value set in the CRQ
 596 */
 597static void init_event_struct(struct srp_event_struct *evt_struct,
 598                              void (*done) (struct srp_event_struct *),
 599                              u8 format,
 600                              int timeout)
 601{
 602        evt_struct->cmnd = NULL;
 603        evt_struct->cmnd_done = NULL;
 604        evt_struct->sync_srp = NULL;
 605        evt_struct->crq.format = format;
 606        evt_struct->crq.timeout = cpu_to_be16(timeout);
 607        evt_struct->done = done;
 608}
 609
 610/* ------------------------------------------------------------
 611 * Routines for receiving SCSI responses from the hosting partition
 612 */
 613
 614/**
 615 * set_srp_direction: Set the fields in the srp related to data
 616 *     direction and number of buffers based on the direction in
 617 *     the scsi_cmnd and the number of buffers
 618 */
 619static void set_srp_direction(struct scsi_cmnd *cmd,
 620                              struct srp_cmd *srp_cmd, 
 621                              int numbuf)
 622{
 623        u8 fmt;
 624
 625        if (numbuf == 0)
 626                return;
 627        
 628        if (numbuf == 1)
 629                fmt = SRP_DATA_DESC_DIRECT;
 630        else {
 631                fmt = SRP_DATA_DESC_INDIRECT;
 632                numbuf = min(numbuf, MAX_INDIRECT_BUFS);
 633
 634                if (cmd->sc_data_direction == DMA_TO_DEVICE)
 635                        srp_cmd->data_out_desc_cnt = numbuf;
 636                else
 637                        srp_cmd->data_in_desc_cnt = numbuf;
 638        }
 639
 640        if (cmd->sc_data_direction == DMA_TO_DEVICE)
 641                srp_cmd->buf_fmt = fmt << 4;
 642        else
 643                srp_cmd->buf_fmt = fmt;
 644}
 645
 646/**
 647 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
 648 * @cmd:        srp_cmd whose additional_data member will be unmapped
 649 * @dev:        device for which the memory is mapped
 650 *
 651*/
 652static void unmap_cmd_data(struct srp_cmd *cmd,
 653                           struct srp_event_struct *evt_struct,
 654                           struct device *dev)
 655{
 656        u8 out_fmt, in_fmt;
 657
 658        out_fmt = cmd->buf_fmt >> 4;
 659        in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
 660
 661        if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
 662                return;
 663
 664        if (evt_struct->cmnd)
 665                scsi_dma_unmap(evt_struct->cmnd);
 666}
 667
 668static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
 669                       struct srp_direct_buf *md)
 670{
 671        int i;
 672        struct scatterlist *sg;
 673        u64 total_length = 0;
 674
 675        scsi_for_each_sg(cmd, sg, nseg, i) {
 676                struct srp_direct_buf *descr = md + i;
 677                descr->va = cpu_to_be64(sg_dma_address(sg));
 678                descr->len = cpu_to_be32(sg_dma_len(sg));
 679                descr->key = 0;
 680                total_length += sg_dma_len(sg);
 681        }
 682        return total_length;
 683}
 684
 685/**
 686 * map_sg_data: - Maps dma for a scatterlist and initializes descriptor fields
 687 * @cmd:        struct scsi_cmnd with the scatterlist
 688 * @srp_cmd:    srp_cmd that contains the memory descriptor
 689 * @dev:        device for which to map dma memory
 690 *
 691 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
 692 * Returns 1 on success.
 693*/
 694static int map_sg_data(struct scsi_cmnd *cmd,
 695                       struct srp_event_struct *evt_struct,
 696                       struct srp_cmd *srp_cmd, struct device *dev)
 697{
 698
 699        int sg_mapped;
 700        u64 total_length = 0;
 701        struct srp_direct_buf *data =
 702                (struct srp_direct_buf *) srp_cmd->add_data;
 703        struct srp_indirect_buf *indirect =
 704                (struct srp_indirect_buf *) data;
 705
 706        sg_mapped = scsi_dma_map(cmd);
 707        if (!sg_mapped)
 708                return 1;
 709        else if (sg_mapped < 0)
 710                return 0;
 711
 712        set_srp_direction(cmd, srp_cmd, sg_mapped);
 713
 714        /* special case; we can use a single direct descriptor */
 715        if (sg_mapped == 1) {
 716                map_sg_list(cmd, sg_mapped, data);
 717                return 1;
 718        }
 719
 720        indirect->table_desc.va = 0;
 721        indirect->table_desc.len = cpu_to_be32(sg_mapped *
 722                                               sizeof(struct srp_direct_buf));
 723        indirect->table_desc.key = 0;
 724
 725        if (sg_mapped <= MAX_INDIRECT_BUFS) {
 726                total_length = map_sg_list(cmd, sg_mapped,
 727                                           &indirect->desc_list[0]);
 728                indirect->len = cpu_to_be32(total_length);
 729                return 1;
 730        }
 731
 732        /* get indirect table */
 733        if (!evt_struct->ext_list) {
 734                evt_struct->ext_list = (struct srp_direct_buf *)
 735                        dma_alloc_coherent(dev,
 736                                           SG_ALL * sizeof(struct srp_direct_buf),
 737                                           &evt_struct->ext_list_token, 0);
 738                if (!evt_struct->ext_list) {
 739                        if (!firmware_has_feature(FW_FEATURE_CMO))
 740                                sdev_printk(KERN_ERR, cmd->device,
 741                                            "Can't allocate memory "
 742                                            "for indirect table\n");
 743                        scsi_dma_unmap(cmd);
 744                        return 0;
 745                }
 746        }
 747
 748        total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
 749
 750        indirect->len = cpu_to_be32(total_length);
 751        indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token);
 752        indirect->table_desc.len = cpu_to_be32(sg_mapped *
 753                                               sizeof(indirect->desc_list[0]));
 754        memcpy(indirect->desc_list, evt_struct->ext_list,
 755               MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
 756        return 1;
 757}
 758
 759/**
 760 * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
 761 * @cmd:        struct scsi_cmnd with the memory to be mapped
 762 * @srp_cmd:    srp_cmd that contains the memory descriptor
 763 * @dev:        dma device for which to map dma memory
 764 *
 765 * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds 
 766 * Returns 1 on success.
 767*/
 768static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
 769                                struct srp_event_struct *evt_struct,
 770                                struct srp_cmd *srp_cmd, struct device *dev)
 771{
 772        switch (cmd->sc_data_direction) {
 773        case DMA_FROM_DEVICE:
 774        case DMA_TO_DEVICE:
 775                break;
 776        case DMA_NONE:
 777                return 1;
 778        case DMA_BIDIRECTIONAL:
 779                sdev_printk(KERN_ERR, cmd->device,
 780                            "Can't map DMA_BIDIRECTIONAL to read/write\n");
 781                return 0;
 782        default:
 783                sdev_printk(KERN_ERR, cmd->device,
 784                            "Unknown data direction 0x%02x; can't map!\n",
 785                            cmd->sc_data_direction);
 786                return 0;
 787        }
 788
 789        return map_sg_data(cmd, evt_struct, srp_cmd, dev);
 790}
 791
 792/**
 793 * purge_requests: Our virtual adapter just shut down.  purge any sent requests
 794 * @hostdata:    the adapter
 795 */
 796static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
 797{
 798        struct srp_event_struct *evt;
 799        unsigned long flags;
 800
 801        spin_lock_irqsave(hostdata->host->host_lock, flags);
 802        while (!list_empty(&hostdata->sent)) {
 803                evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list);
 804                list_del(&evt->list);
 805                del_timer(&evt->timer);
 806
 807                spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 808                if (evt->cmnd) {
 809                        evt->cmnd->result = (error_code << 16);
 810                        unmap_cmd_data(&evt->iu.srp.cmd, evt,
 811                                       evt->hostdata->dev);
 812                        if (evt->cmnd_done)
 813                                evt->cmnd_done(evt->cmnd);
 814                } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
 815                           evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
 816                        evt->done(evt);
 817                free_event_struct(&evt->hostdata->pool, evt);
 818                spin_lock_irqsave(hostdata->host->host_lock, flags);
 819        }
 820        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 821}
 822
 823/**
 824 * ibmvscsi_set_request_limit - Set the adapter request_limit in response to
 825 * an adapter failure, reset, or SRP Login. Done under host lock to prevent
 826 * race with SCSI command submission.
 827 * @hostdata:   adapter to adjust
 828 * @limit:      new request limit
 829 */
 830static void ibmvscsi_set_request_limit(struct ibmvscsi_host_data *hostdata, int limit)
 831{
 832        unsigned long flags;
 833
 834        spin_lock_irqsave(hostdata->host->host_lock, flags);
 835        atomic_set(&hostdata->request_limit, limit);
 836        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 837}
 838
 839/**
 840 * ibmvscsi_reset_host - Reset the connection to the server
 841 * @hostdata:   struct ibmvscsi_host_data to reset
 842*/
 843static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
 844{
 845        scsi_block_requests(hostdata->host);
 846        ibmvscsi_set_request_limit(hostdata, 0);
 847
 848        purge_requests(hostdata, DID_ERROR);
 849        hostdata->action = IBMVSCSI_HOST_ACTION_RESET;
 850        wake_up(&hostdata->work_wait_q);
 851}
 852
 853/**
 854 * ibmvscsi_timeout - Internal command timeout handler
 855 * @evt_struct: struct srp_event_struct that timed out
 856 *
 857 * Called when an internally generated command times out
 858*/
 859static void ibmvscsi_timeout(struct timer_list *t)
 860{
 861        struct srp_event_struct *evt_struct = from_timer(evt_struct, t, timer);
 862        struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
 863
 864        dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
 865                evt_struct->iu.srp.cmd.opcode);
 866
 867        ibmvscsi_reset_host(hostdata);
 868}
 869
 870
 871/* ------------------------------------------------------------
 872 * Routines for sending and receiving SRPs
 873 */
 874/**
 875 * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
 876 * @evt_struct: evt_struct to be sent
 877 * @hostdata:   ibmvscsi_host_data of host
 878 * @timeout:    timeout in seconds - 0 means do not time command
 879 *
 880 * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
 881 * Note that this routine assumes that host_lock is held for synchronization
 882*/
 883static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
 884                                   struct ibmvscsi_host_data *hostdata,
 885                                   unsigned long timeout)
 886{
 887        __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;
 888        int request_status = 0;
 889        int rc;
 890        int srp_req = 0;
 891
 892        /* If we have exhausted our request limit, just fail this request,
 893         * unless it is for a reset or abort.
 894         * Note that there are rare cases involving driver generated requests 
 895         * (such as task management requests) that the mid layer may think we
 896         * can handle more requests (can_queue) when we actually can't
 897         */
 898        if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
 899                srp_req = 1;
 900                request_status =
 901                        atomic_dec_if_positive(&hostdata->request_limit);
 902                /* If request limit was -1 when we started, it is now even
 903                 * less than that
 904                 */
 905                if (request_status < -1)
 906                        goto send_error;
 907                /* Otherwise, we may have run out of requests. */
 908                /* If request limit was 0 when we started the adapter is in the
 909                 * process of performing a login with the server adapter, or
 910                 * we may have run out of requests.
 911                 */
 912                else if (request_status == -1 &&
 913                         evt_struct->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
 914                        goto send_busy;
 915                /* Abort and reset calls should make it through.
 916                 * Nothing except abort and reset should use the last two
 917                 * slots unless we had two or less to begin with.
 918                 */
 919                else if (request_status < 2 &&
 920                         evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
 921                        /* In the case that we have less than two requests
 922                         * available, check the server limit as a combination
 923                         * of the request limit and the number of requests
 924                         * in-flight (the size of the send list).  If the
 925                         * server limit is greater than 2, return busy so
 926                         * that the last two are reserved for reset and abort.
 927                         */
 928                        int server_limit = request_status;
 929                        struct srp_event_struct *tmp_evt;
 930
 931                        list_for_each_entry(tmp_evt, &hostdata->sent, list) {
 932                                server_limit++;
 933                        }
 934
 935                        if (server_limit > 2)
 936                                goto send_busy;
 937                }
 938        }
 939
 940        /* Copy the IU into the transfer area */
 941        *evt_struct->xfer_iu = evt_struct->iu;
 942        evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
 943
 944        /* Add this to the sent list.  We need to do this 
 945         * before we actually send 
 946         * in case it comes back REALLY fast
 947         */
 948        list_add_tail(&evt_struct->list, &hostdata->sent);
 949
 950        timer_setup(&evt_struct->timer, ibmvscsi_timeout, 0);
 951        if (timeout) {
 952                evt_struct->timer.expires = jiffies + (timeout * HZ);
 953                add_timer(&evt_struct->timer);
 954        }
 955
 956        rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]),
 957                               be64_to_cpu(crq_as_u64[1]));
 958        if (rc != 0) {
 959                list_del(&evt_struct->list);
 960                del_timer(&evt_struct->timer);
 961
 962                /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
 963                 * Firmware will send a CRQ with a transport event (0xFF) to
 964                 * tell this client what has happened to the transport.  This
 965                 * will be handled in ibmvscsi_handle_crq()
 966                 */
 967                if (rc == H_CLOSED) {
 968                        dev_warn(hostdata->dev, "send warning. "
 969                                 "Receive queue closed, will retry.\n");
 970                        goto send_busy;
 971                }
 972                dev_err(hostdata->dev, "send error %d\n", rc);
 973                if (srp_req)
 974                        atomic_inc(&hostdata->request_limit);
 975                goto send_error;
 976        }
 977
 978        return 0;
 979
 980 send_busy:
 981        unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
 982
 983        free_event_struct(&hostdata->pool, evt_struct);
 984        if (srp_req && request_status != -1)
 985                atomic_inc(&hostdata->request_limit);
 986        return SCSI_MLQUEUE_HOST_BUSY;
 987
 988 send_error:
 989        unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
 990
 991        if (evt_struct->cmnd != NULL) {
 992                evt_struct->cmnd->result = DID_ERROR << 16;
 993                evt_struct->cmnd_done(evt_struct->cmnd);
 994        } else if (evt_struct->done)
 995                evt_struct->done(evt_struct);
 996
 997        free_event_struct(&hostdata->pool, evt_struct);
 998        return 0;
 999}
1000
1001/**
1002 * handle_cmd_rsp: -  Handle responses from commands
1003 * @evt_struct: srp_event_struct to be handled
1004 *
1005 * Used as a callback by when sending scsi cmds.
1006 * Gets called by ibmvscsi_handle_crq()
1007*/
1008static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
1009{
1010        struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
1011        struct scsi_cmnd *cmnd = evt_struct->cmnd;
1012
1013        if (unlikely(rsp->opcode != SRP_RSP)) {
1014                if (printk_ratelimit())
1015                        dev_warn(evt_struct->hostdata->dev,
1016                                 "bad SRP RSP type %#02x\n", rsp->opcode);
1017        }
1018        
1019        if (cmnd) {
1020                cmnd->result |= rsp->status;
1021                if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
1022                        memcpy(cmnd->sense_buffer,
1023                               rsp->data,
1024                               be32_to_cpu(rsp->sense_data_len));
1025                unmap_cmd_data(&evt_struct->iu.srp.cmd, 
1026                               evt_struct, 
1027                               evt_struct->hostdata->dev);
1028
1029                if (rsp->flags & SRP_RSP_FLAG_DOOVER)
1030                        scsi_set_resid(cmnd,
1031                                       be32_to_cpu(rsp->data_out_res_cnt));
1032                else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
1033                        scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));
1034        }
1035
1036        if (evt_struct->cmnd_done)
1037                evt_struct->cmnd_done(cmnd);
1038}
1039
1040/**
1041 * lun_from_dev: - Returns the lun of the scsi device
1042 * @dev:        struct scsi_device
1043 *
1044*/
1045static inline u16 lun_from_dev(struct scsi_device *dev)
1046{
1047        return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
1048}
1049
1050/**
1051 * ibmvscsi_queue: - The queuecommand function of the scsi template 
1052 * @cmd:        struct scsi_cmnd to be executed
1053 * @done:       Callback function to be called when cmd is completed
1054*/
1055static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
1056                                 void (*done) (struct scsi_cmnd *))
1057{
1058        struct srp_cmd *srp_cmd;
1059        struct srp_event_struct *evt_struct;
1060        struct srp_indirect_buf *indirect;
1061        struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host);
1062        u16 lun = lun_from_dev(cmnd->device);
1063        u8 out_fmt, in_fmt;
1064
1065        cmnd->result = (DID_OK << 16);
1066        evt_struct = get_event_struct(&hostdata->pool);
1067        if (!evt_struct)
1068                return SCSI_MLQUEUE_HOST_BUSY;
1069
1070        /* Set up the actual SRP IU */
1071        srp_cmd = &evt_struct->iu.srp.cmd;
1072        memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
1073        srp_cmd->opcode = SRP_CMD;
1074        memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
1075        int_to_scsilun(lun, &srp_cmd->lun);
1076
1077        if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
1078                if (!firmware_has_feature(FW_FEATURE_CMO))
1079                        sdev_printk(KERN_ERR, cmnd->device,
1080                                    "couldn't convert cmd to srp_cmd\n");
1081                free_event_struct(&hostdata->pool, evt_struct);
1082                return SCSI_MLQUEUE_HOST_BUSY;
1083        }
1084
1085        init_event_struct(evt_struct,
1086                          handle_cmd_rsp,
1087                          VIOSRP_SRP_FORMAT,
1088                          cmnd->request->timeout/HZ);
1089
1090        evt_struct->cmnd = cmnd;
1091        evt_struct->cmnd_done = done;
1092
1093        /* Fix up dma address of the buffer itself */
1094        indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
1095        out_fmt = srp_cmd->buf_fmt >> 4;
1096        in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
1097        if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
1098             out_fmt == SRP_DATA_DESC_INDIRECT) &&
1099            indirect->table_desc.va == 0) {
1100                indirect->table_desc.va =
1101                        cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +
1102                        offsetof(struct srp_cmd, add_data) +
1103                        offsetof(struct srp_indirect_buf, desc_list));
1104        }
1105
1106        return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
1107}
1108
1109static DEF_SCSI_QCMD(ibmvscsi_queuecommand)
1110
1111/* ------------------------------------------------------------
1112 * Routines for driver initialization
1113 */
1114
1115/**
1116 * map_persist_bufs: - Pre-map persistent data for adapter logins
1117 * @hostdata:   ibmvscsi_host_data of host
1118 *
1119 * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
1120 * Return 1 on error, 0 on success.
1121 */
1122static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
1123{
1124
1125        hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
1126                                             sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
1127
1128        if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
1129                dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
1130                return 1;
1131        }
1132
1133        hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
1134                                                     &hostdata->madapter_info,
1135                                                     sizeof(hostdata->madapter_info),
1136                                                     DMA_BIDIRECTIONAL);
1137        if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
1138                dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
1139                dma_unmap_single(hostdata->dev, hostdata->caps_addr,
1140                                 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
1141                return 1;
1142        }
1143
1144        return 0;
1145}
1146
1147/**
1148 * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
1149 * @hostdata:   ibmvscsi_host_data of host
1150 *
1151 * Unmap the capabilities and adapter info DMA buffers
1152 */
1153static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
1154{
1155        dma_unmap_single(hostdata->dev, hostdata->caps_addr,
1156                         sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
1157
1158        dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
1159                         sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
1160}
1161
1162/**
1163 * login_rsp: - Handle response to SRP login request
1164 * @evt_struct: srp_event_struct with the response
1165 *
1166 * Used as a "done" callback by when sending srp_login. Gets called
1167 * by ibmvscsi_handle_crq()
1168*/
1169static void login_rsp(struct srp_event_struct *evt_struct)
1170{
1171        struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1172        switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
1173        case SRP_LOGIN_RSP:     /* it worked! */
1174                break;
1175        case SRP_LOGIN_REJ:     /* refused! */
1176                dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
1177                         evt_struct->xfer_iu->srp.login_rej.reason);
1178                /* Login failed.  */
1179                ibmvscsi_set_request_limit(hostdata, -1);
1180                return;
1181        default:
1182                dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
1183                        evt_struct->xfer_iu->srp.login_rsp.opcode);
1184                /* Login failed.  */
1185                ibmvscsi_set_request_limit(hostdata, -1);
1186                return;
1187        }
1188
1189        dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
1190        hostdata->client_migrated = 0;
1191
1192        /* Now we know what the real request-limit is.
1193         * This value is set rather than added to request_limit because
1194         * request_limit could have been set to -1 by this client.
1195         */
1196        ibmvscsi_set_request_limit(hostdata,
1197                   be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
1198
1199        /* If we had any pending I/Os, kick them */
1200        hostdata->action = IBMVSCSI_HOST_ACTION_UNBLOCK;
1201        wake_up(&hostdata->work_wait_q);
1202}
1203
1204/**
1205 * send_srp_login: - Sends the srp login
1206 * @hostdata:   ibmvscsi_host_data of host
1207 *
1208 * Returns zero if successful.
1209*/
1210static int send_srp_login(struct ibmvscsi_host_data *hostdata)
1211{
1212        int rc;
1213        unsigned long flags;
1214        struct srp_login_req *login;
1215        struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
1216
1217        BUG_ON(!evt_struct);
1218        init_event_struct(evt_struct, login_rsp,
1219                          VIOSRP_SRP_FORMAT, login_timeout);
1220
1221        login = &evt_struct->iu.srp.login_req;
1222        memset(login, 0, sizeof(*login));
1223        login->opcode = SRP_LOGIN_REQ;
1224        login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
1225        login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
1226                                         SRP_BUF_FORMAT_INDIRECT);
1227
1228        /* Start out with a request limit of 0, since this is negotiated in
1229         * the login request we are just sending and login requests always
1230         * get sent by the driver regardless of request_limit.
1231         */
1232        ibmvscsi_set_request_limit(hostdata, 0);
1233
1234        spin_lock_irqsave(hostdata->host->host_lock, flags);
1235        rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
1236        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1237        dev_info(hostdata->dev, "sent SRP login\n");
1238        return rc;
1239};
1240
1241/**
1242 * capabilities_rsp: - Handle response to MAD adapter capabilities request
1243 * @evt_struct: srp_event_struct with the response
1244 *
1245 * Used as a "done" callback by when sending adapter_info.
1246 */
1247static void capabilities_rsp(struct srp_event_struct *evt_struct)
1248{
1249        struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1250
1251        if (evt_struct->xfer_iu->mad.capabilities.common.status) {
1252                dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
1253                        evt_struct->xfer_iu->mad.capabilities.common.status);
1254        } else {
1255                if (hostdata->caps.migration.common.server_support !=
1256                    cpu_to_be16(SERVER_SUPPORTS_CAP))
1257                        dev_info(hostdata->dev, "Partition migration not supported\n");
1258
1259                if (client_reserve) {
1260                        if (hostdata->caps.reserve.common.server_support ==
1261                            cpu_to_be16(SERVER_SUPPORTS_CAP))
1262                                dev_info(hostdata->dev, "Client reserve enabled\n");
1263                        else
1264                                dev_info(hostdata->dev, "Client reserve not supported\n");
1265                }
1266        }
1267
1268        send_srp_login(hostdata);
1269}
1270
1271/**
1272 * send_mad_capabilities: - Sends the mad capabilities request
1273 *      and stores the result so it can be retrieved with
1274 * @hostdata:   ibmvscsi_host_data of host
1275 */
1276static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
1277{
1278        struct viosrp_capabilities *req;
1279        struct srp_event_struct *evt_struct;
1280        unsigned long flags;
1281        struct device_node *of_node = hostdata->dev->of_node;
1282        const char *location;
1283
1284        evt_struct = get_event_struct(&hostdata->pool);
1285        BUG_ON(!evt_struct);
1286
1287        init_event_struct(evt_struct, capabilities_rsp,
1288                          VIOSRP_MAD_FORMAT, info_timeout);
1289
1290        req = &evt_struct->iu.mad.capabilities;
1291        memset(req, 0, sizeof(*req));
1292
1293        hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);
1294        if (hostdata->client_migrated)
1295                hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
1296
1297        strlcpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
1298                sizeof(hostdata->caps.name));
1299
1300        location = of_get_property(of_node, "ibm,loc-code", NULL);
1301        location = location ? location : dev_name(hostdata->dev);
1302        strlcpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
1303
1304        req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
1305        req->buffer = cpu_to_be64(hostdata->caps_addr);
1306
1307        hostdata->caps.migration.common.cap_type =
1308                                cpu_to_be32(MIGRATION_CAPABILITIES);
1309        hostdata->caps.migration.common.length =
1310                                cpu_to_be16(sizeof(hostdata->caps.migration));
1311        hostdata->caps.migration.common.server_support =
1312                                cpu_to_be16(SERVER_SUPPORTS_CAP);
1313        hostdata->caps.migration.ecl = cpu_to_be32(1);
1314
1315        if (client_reserve) {
1316                hostdata->caps.reserve.common.cap_type =
1317                                        cpu_to_be32(RESERVATION_CAPABILITIES);
1318                hostdata->caps.reserve.common.length =
1319                                cpu_to_be16(sizeof(hostdata->caps.reserve));
1320                hostdata->caps.reserve.common.server_support =
1321                                cpu_to_be16(SERVER_SUPPORTS_CAP);
1322                hostdata->caps.reserve.type =
1323                                cpu_to_be32(CLIENT_RESERVE_SCSI_2);
1324                req->common.length =
1325                                cpu_to_be16(sizeof(hostdata->caps));
1326        } else
1327                req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
1328                                                sizeof(hostdata->caps.reserve));
1329
1330        spin_lock_irqsave(hostdata->host->host_lock, flags);
1331        if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1332                dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
1333        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1334};
1335
1336/**
1337 * fast_fail_rsp: - Handle response to MAD enable fast fail
1338 * @evt_struct: srp_event_struct with the response
1339 *
1340 * Used as a "done" callback by when sending enable fast fail. Gets called
1341 * by ibmvscsi_handle_crq()
1342 */
1343static void fast_fail_rsp(struct srp_event_struct *evt_struct)
1344{
1345        struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1346        u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);
1347
1348        if (status == VIOSRP_MAD_NOT_SUPPORTED)
1349                dev_err(hostdata->dev, "fast_fail not supported in server\n");
1350        else if (status == VIOSRP_MAD_FAILED)
1351                dev_err(hostdata->dev, "fast_fail request failed\n");
1352        else if (status != VIOSRP_MAD_SUCCESS)
1353                dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
1354
1355        send_mad_capabilities(hostdata);
1356}
1357
1358/**
1359 * init_host - Start host initialization
1360 * @hostdata:   ibmvscsi_host_data of host
1361 *
1362 * Returns zero if successful.
1363 */
1364static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
1365{
1366        int rc;
1367        unsigned long flags;
1368        struct viosrp_fast_fail *fast_fail_mad;
1369        struct srp_event_struct *evt_struct;
1370
1371        if (!fast_fail) {
1372                send_mad_capabilities(hostdata);
1373                return 0;
1374        }
1375
1376        evt_struct = get_event_struct(&hostdata->pool);
1377        BUG_ON(!evt_struct);
1378
1379        init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
1380
1381        fast_fail_mad = &evt_struct->iu.mad.fast_fail;
1382        memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
1383        fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL);
1384        fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));
1385
1386        spin_lock_irqsave(hostdata->host->host_lock, flags);
1387        rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1388        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1389        return rc;
1390}
1391
1392/**
1393 * adapter_info_rsp: - Handle response to MAD adapter info request
1394 * @evt_struct: srp_event_struct with the response
1395 *
1396 * Used as a "done" callback by when sending adapter_info. Gets called
1397 * by ibmvscsi_handle_crq()
1398*/
1399static void adapter_info_rsp(struct srp_event_struct *evt_struct)
1400{
1401        struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1402
1403        if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
1404                dev_err(hostdata->dev, "error %d getting adapter info\n",
1405                        evt_struct->xfer_iu->mad.adapter_info.common.status);
1406        } else {
1407                dev_info(hostdata->dev, "host srp version: %s, "
1408                         "host partition %s (%d), OS %d, max io %u\n",
1409                         hostdata->madapter_info.srp_version,
1410                         hostdata->madapter_info.partition_name,
1411                         be32_to_cpu(hostdata->madapter_info.partition_number),
1412                         be32_to_cpu(hostdata->madapter_info.os_type),
1413                         be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));
1414                
1415                if (hostdata->madapter_info.port_max_txu[0]) 
1416                        hostdata->host->max_sectors = 
1417                                be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
1418                
1419                if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX &&
1420                    strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
1421                        dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
1422                                hostdata->madapter_info.srp_version);
1423                        dev_err(hostdata->dev, "limiting scatterlists to %d\n",
1424                                MAX_INDIRECT_BUFS);
1425                        hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
1426                }
1427
1428                if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX) {
1429                        enable_fast_fail(hostdata);
1430                        return;
1431                }
1432        }
1433
1434        send_srp_login(hostdata);
1435}
1436
1437/**
1438 * send_mad_adapter_info: - Sends the mad adapter info request
1439 *      and stores the result so it can be retrieved with
1440 *      sysfs.  We COULD consider causing a failure if the
1441 *      returned SRP version doesn't match ours.
1442 * @hostdata:   ibmvscsi_host_data of host
1443 * 
1444 * Returns zero if successful.
1445*/
1446static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
1447{
1448        struct viosrp_adapter_info *req;
1449        struct srp_event_struct *evt_struct;
1450        unsigned long flags;
1451
1452        evt_struct = get_event_struct(&hostdata->pool);
1453        BUG_ON(!evt_struct);
1454
1455        init_event_struct(evt_struct,
1456                          adapter_info_rsp,
1457                          VIOSRP_MAD_FORMAT,
1458                          info_timeout);
1459        
1460        req = &evt_struct->iu.mad.adapter_info;
1461        memset(req, 0x00, sizeof(*req));
1462        
1463        req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
1464        req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
1465        req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
1466
1467        spin_lock_irqsave(hostdata->host->host_lock, flags);
1468        if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1469                dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
1470        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1471};
1472
1473/**
1474 * init_adapter: Start virtual adapter initialization sequence
1475 *
1476 */
1477static void init_adapter(struct ibmvscsi_host_data *hostdata)
1478{
1479        send_mad_adapter_info(hostdata);
1480}
1481
1482/**
1483 * sync_completion: Signal that a synchronous command has completed
1484 * Note that after returning from this call, the evt_struct is freed.
1485 * the caller waiting on this completion shouldn't touch the evt_struct
1486 * again.
1487 */
1488static void sync_completion(struct srp_event_struct *evt_struct)
1489{
1490        /* copy the response back */
1491        if (evt_struct->sync_srp)
1492                *evt_struct->sync_srp = *evt_struct->xfer_iu;
1493        
1494        complete(&evt_struct->comp);
1495}
1496
1497/**
1498 * ibmvscsi_abort: Abort a command...from scsi host template
1499 * send this over to the server and wait synchronously for the response
1500 */
1501static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1502{
1503        struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1504        struct srp_tsk_mgmt *tsk_mgmt;
1505        struct srp_event_struct *evt;
1506        struct srp_event_struct *tmp_evt, *found_evt;
1507        union viosrp_iu srp_rsp;
1508        int rsp_rc;
1509        unsigned long flags;
1510        u16 lun = lun_from_dev(cmd->device);
1511        unsigned long wait_switch = 0;
1512
1513        /* First, find this command in our sent list so we can figure
1514         * out the correct tag
1515         */
1516        spin_lock_irqsave(hostdata->host->host_lock, flags);
1517        wait_switch = jiffies + (init_timeout * HZ);
1518        do {
1519                found_evt = NULL;
1520                list_for_each_entry(tmp_evt, &hostdata->sent, list) {
1521                        if (tmp_evt->cmnd == cmd) {
1522                                found_evt = tmp_evt;
1523                                break;
1524                        }
1525                }
1526
1527                if (!found_evt) {
1528                        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1529                        return SUCCESS;
1530                }
1531
1532                evt = get_event_struct(&hostdata->pool);
1533                if (evt == NULL) {
1534                        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1535                        sdev_printk(KERN_ERR, cmd->device,
1536                                "failed to allocate abort event\n");
1537                        return FAILED;
1538                }
1539        
1540                init_event_struct(evt,
1541                                  sync_completion,
1542                                  VIOSRP_SRP_FORMAT,
1543                                  abort_timeout);
1544
1545                tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1546        
1547                /* Set up an abort SRP command */
1548                memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1549                tsk_mgmt->opcode = SRP_TSK_MGMT;
1550                int_to_scsilun(lun, &tsk_mgmt->lun);
1551                tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
1552                tsk_mgmt->task_tag = (u64) found_evt;
1553
1554                evt->sync_srp = &srp_rsp;
1555
1556                init_completion(&evt->comp);
1557                rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
1558
1559                if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1560                        break;
1561
1562                spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1563                msleep(10);
1564                spin_lock_irqsave(hostdata->host->host_lock, flags);
1565        } while (time_before(jiffies, wait_switch));
1566
1567        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1568
1569        if (rsp_rc != 0) {
1570                sdev_printk(KERN_ERR, cmd->device,
1571                            "failed to send abort() event. rc=%d\n", rsp_rc);
1572                return FAILED;
1573        }
1574
1575        sdev_printk(KERN_INFO, cmd->device,
1576                    "aborting command. lun 0x%llx, tag 0x%llx\n",
1577                    (((u64) lun) << 48), (u64) found_evt);
1578
1579        wait_for_completion(&evt->comp);
1580
1581        /* make sure we got a good response */
1582        if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1583                if (printk_ratelimit())
1584                        sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
1585                                    srp_rsp.srp.rsp.opcode);
1586                return FAILED;
1587        }
1588
1589        if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1590                rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1591        else
1592                rsp_rc = srp_rsp.srp.rsp.status;
1593
1594        if (rsp_rc) {
1595                if (printk_ratelimit())
1596                        sdev_printk(KERN_WARNING, cmd->device,
1597                                    "abort code %d for task tag 0x%llx\n",
1598                                    rsp_rc, tsk_mgmt->task_tag);
1599                return FAILED;
1600        }
1601
1602        /* Because we dropped the spinlock above, it's possible
1603         * The event is no longer in our list.  Make sure it didn't
1604         * complete while we were aborting
1605         */
1606        spin_lock_irqsave(hostdata->host->host_lock, flags);
1607        found_evt = NULL;
1608        list_for_each_entry(tmp_evt, &hostdata->sent, list) {
1609                if (tmp_evt->cmnd == cmd) {
1610                        found_evt = tmp_evt;
1611                        break;
1612                }
1613        }
1614
1615        if (found_evt == NULL) {
1616                spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1617                sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n",
1618                            tsk_mgmt->task_tag);
1619                return SUCCESS;
1620        }
1621
1622        sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n",
1623                    tsk_mgmt->task_tag);
1624
1625        cmd->result = (DID_ABORT << 16);
1626        list_del(&found_evt->list);
1627        unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
1628                       found_evt->hostdata->dev);
1629        free_event_struct(&found_evt->hostdata->pool, found_evt);
1630        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1631        atomic_inc(&hostdata->request_limit);
1632        return SUCCESS;
1633}
1634
1635/**
1636 * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host 
1637 * template send this over to the server and wait synchronously for the 
1638 * response
1639 */
1640static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1641{
1642        struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1643        struct srp_tsk_mgmt *tsk_mgmt;
1644        struct srp_event_struct *evt;
1645        struct srp_event_struct *tmp_evt, *pos;
1646        union viosrp_iu srp_rsp;
1647        int rsp_rc;
1648        unsigned long flags;
1649        u16 lun = lun_from_dev(cmd->device);
1650        unsigned long wait_switch = 0;
1651
1652        spin_lock_irqsave(hostdata->host->host_lock, flags);
1653        wait_switch = jiffies + (init_timeout * HZ);
1654        do {
1655                evt = get_event_struct(&hostdata->pool);
1656                if (evt == NULL) {
1657                        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1658                        sdev_printk(KERN_ERR, cmd->device,
1659                                "failed to allocate reset event\n");
1660                        return FAILED;
1661                }
1662        
1663                init_event_struct(evt,
1664                                  sync_completion,
1665                                  VIOSRP_SRP_FORMAT,
1666                                  reset_timeout);
1667
1668                tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1669
1670                /* Set up a lun reset SRP command */
1671                memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1672                tsk_mgmt->opcode = SRP_TSK_MGMT;
1673                int_to_scsilun(lun, &tsk_mgmt->lun);
1674                tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
1675
1676                evt->sync_srp = &srp_rsp;
1677
1678                init_completion(&evt->comp);
1679                rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
1680
1681                if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1682                        break;
1683
1684                spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1685                msleep(10);
1686                spin_lock_irqsave(hostdata->host->host_lock, flags);
1687        } while (time_before(jiffies, wait_switch));
1688
1689        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1690
1691        if (rsp_rc != 0) {
1692                sdev_printk(KERN_ERR, cmd->device,
1693                            "failed to send reset event. rc=%d\n", rsp_rc);
1694                return FAILED;
1695        }
1696
1697        sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n",
1698                    (((u64) lun) << 48));
1699
1700        wait_for_completion(&evt->comp);
1701
1702        /* make sure we got a good response */
1703        if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1704                if (printk_ratelimit())
1705                        sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
1706                                    srp_rsp.srp.rsp.opcode);
1707                return FAILED;
1708        }
1709
1710        if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1711                rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1712        else
1713                rsp_rc = srp_rsp.srp.rsp.status;
1714
1715        if (rsp_rc) {
1716                if (printk_ratelimit())
1717                        sdev_printk(KERN_WARNING, cmd->device,
1718                                    "reset code %d for task tag 0x%llx\n",
1719                                    rsp_rc, tsk_mgmt->task_tag);
1720                return FAILED;
1721        }
1722
1723        /* We need to find all commands for this LUN that have not yet been
1724         * responded to, and fail them with DID_RESET
1725         */
1726        spin_lock_irqsave(hostdata->host->host_lock, flags);
1727        list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
1728                if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
1729                        if (tmp_evt->cmnd)
1730                                tmp_evt->cmnd->result = (DID_RESET << 16);
1731                        list_del(&tmp_evt->list);
1732                        unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
1733                                       tmp_evt->hostdata->dev);
1734                        free_event_struct(&tmp_evt->hostdata->pool,
1735                                                   tmp_evt);
1736                        atomic_inc(&hostdata->request_limit);
1737                        if (tmp_evt->cmnd_done)
1738                                tmp_evt->cmnd_done(tmp_evt->cmnd);
1739                        else if (tmp_evt->done)
1740                                tmp_evt->done(tmp_evt);
1741                }
1742        }
1743        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1744        return SUCCESS;
1745}
1746
1747/**
1748 * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
1749 * @cmd:        struct scsi_cmnd having problems
1750*/
1751static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
1752{
1753        unsigned long wait_switch = 0;
1754        struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1755
1756        dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
1757
1758        ibmvscsi_reset_host(hostdata);
1759
1760        for (wait_switch = jiffies + (init_timeout * HZ);
1761             time_before(jiffies, wait_switch) &&
1762                     atomic_read(&hostdata->request_limit) < 2;) {
1763
1764                msleep(10);
1765        }
1766
1767        if (atomic_read(&hostdata->request_limit) <= 0)
1768                return FAILED;
1769
1770        return SUCCESS;
1771}
1772
1773/**
1774 * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
1775 * @crq:        Command/Response queue
1776 * @hostdata:   ibmvscsi_host_data of host
1777 *
1778*/
1779static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1780                                struct ibmvscsi_host_data *hostdata)
1781{
1782        long rc;
1783        unsigned long flags;
1784        /* The hypervisor copies our tag value here so no byteswapping */
1785        struct srp_event_struct *evt_struct =
1786                        (__force struct srp_event_struct *)crq->IU_data_ptr;
1787        switch (crq->valid) {
1788        case VIOSRP_CRQ_INIT_RSP:               /* initialization */
1789                switch (crq->format) {
1790                case VIOSRP_CRQ_INIT:   /* Initialization message */
1791                        dev_info(hostdata->dev, "partner initialized\n");
1792                        /* Send back a response */
1793                        rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
1794                        if (rc == 0) {
1795                                /* Now login */
1796                                init_adapter(hostdata);
1797                        } else {
1798                                dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
1799                        }
1800
1801                        break;
1802                case VIOSRP_CRQ_INIT_COMPLETE:  /* Initialization response */
1803                        dev_info(hostdata->dev, "partner initialization complete\n");
1804
1805                        /* Now login */
1806                        init_adapter(hostdata);
1807                        break;
1808                default:
1809                        dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
1810                }
1811                return;
1812        case VIOSRP_CRQ_XPORT_EVENT:    /* Hypervisor telling us the connection is closed */
1813                scsi_block_requests(hostdata->host);
1814                ibmvscsi_set_request_limit(hostdata, 0);
1815                if (crq->format == 0x06) {
1816                        /* We need to re-setup the interpartition connection */
1817                        dev_info(hostdata->dev, "Re-enabling adapter!\n");
1818                        hostdata->client_migrated = 1;
1819                        hostdata->action = IBMVSCSI_HOST_ACTION_REENABLE;
1820                        purge_requests(hostdata, DID_REQUEUE);
1821                        wake_up(&hostdata->work_wait_q);
1822                } else {
1823                        dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
1824                                crq->format);
1825                        ibmvscsi_reset_host(hostdata);
1826                }
1827                return;
1828        case VIOSRP_CRQ_CMD_RSP:                /* real payload */
1829                break;
1830        default:
1831                dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
1832                        crq->valid);
1833                return;
1834        }
1835
1836        /* The only kind of payload CRQs we should get are responses to
1837         * things we send. Make sure this response is to something we
1838         * actually sent
1839         */
1840        if (!valid_event_struct(&hostdata->pool, evt_struct)) {
1841                dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
1842                       evt_struct);
1843                return;
1844        }
1845
1846        if (atomic_read(&evt_struct->free)) {
1847                dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
1848                        evt_struct);
1849                return;
1850        }
1851
1852        if (crq->format == VIOSRP_SRP_FORMAT)
1853                atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
1854                           &hostdata->request_limit);
1855
1856        del_timer(&evt_struct->timer);
1857
1858        if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd)
1859                evt_struct->cmnd->result = DID_ERROR << 16;
1860        if (evt_struct->done)
1861                evt_struct->done(evt_struct);
1862        else
1863                dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
1864
1865        /*
1866         * Lock the host_lock before messing with these structures, since we
1867         * are running in a task context
1868         */
1869        spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
1870        list_del(&evt_struct->list);
1871        free_event_struct(&evt_struct->hostdata->pool, evt_struct);
1872        spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
1873}
1874
1875/**
1876 * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
1877 * @sdev:       struct scsi_device device to configure
1878 *
1879 * Enable allow_restart for a device if it is a disk.  Adjust the
1880 * queue_depth here also as is required by the documentation for
1881 * struct scsi_host_template.
1882 */
1883static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1884{
1885        struct Scsi_Host *shost = sdev->host;
1886        unsigned long lock_flags = 0;
1887
1888        spin_lock_irqsave(shost->host_lock, lock_flags);
1889        if (sdev->type == TYPE_DISK) {
1890                sdev->allow_restart = 1;
1891                blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
1892        }
1893        spin_unlock_irqrestore(shost->host_lock, lock_flags);
1894        return 0;
1895}
1896
1897/**
1898 * ibmvscsi_change_queue_depth - Change the device's queue depth
1899 * @sdev:       scsi device struct
1900 * @qdepth:     depth to set
1901 * @reason:     calling context
1902 *
1903 * Return value:
1904 *      actual depth set
1905 **/
1906static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
1907{
1908        if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
1909                qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
1910        return scsi_change_queue_depth(sdev, qdepth);
1911}
1912
1913/* ------------------------------------------------------------
1914 * sysfs attributes
1915 */
1916static ssize_t show_host_vhost_loc(struct device *dev,
1917                                   struct device_attribute *attr, char *buf)
1918{
1919        struct Scsi_Host *shost = class_to_shost(dev);
1920        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1921        int len;
1922
1923        len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
1924                       hostdata->caps.loc);
1925        return len;
1926}
1927
1928static struct device_attribute ibmvscsi_host_vhost_loc = {
1929        .attr = {
1930                 .name = "vhost_loc",
1931                 .mode = S_IRUGO,
1932                 },
1933        .show = show_host_vhost_loc,
1934};
1935
1936static ssize_t show_host_vhost_name(struct device *dev,
1937                                    struct device_attribute *attr, char *buf)
1938{
1939        struct Scsi_Host *shost = class_to_shost(dev);
1940        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1941        int len;
1942
1943        len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
1944                       hostdata->caps.name);
1945        return len;
1946}
1947
1948static struct device_attribute ibmvscsi_host_vhost_name = {
1949        .attr = {
1950                 .name = "vhost_name",
1951                 .mode = S_IRUGO,
1952                 },
1953        .show = show_host_vhost_name,
1954};
1955
1956static ssize_t show_host_srp_version(struct device *dev,
1957                                     struct device_attribute *attr, char *buf)
1958{
1959        struct Scsi_Host *shost = class_to_shost(dev);
1960        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1961        int len;
1962
1963        len = snprintf(buf, PAGE_SIZE, "%s\n",
1964                       hostdata->madapter_info.srp_version);
1965        return len;
1966}
1967
1968static struct device_attribute ibmvscsi_host_srp_version = {
1969        .attr = {
1970                 .name = "srp_version",
1971                 .mode = S_IRUGO,
1972                 },
1973        .show = show_host_srp_version,
1974};
1975
1976static ssize_t show_host_partition_name(struct device *dev,
1977                                        struct device_attribute *attr,
1978                                        char *buf)
1979{
1980        struct Scsi_Host *shost = class_to_shost(dev);
1981        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1982        int len;
1983
1984        len = snprintf(buf, PAGE_SIZE, "%s\n",
1985                       hostdata->madapter_info.partition_name);
1986        return len;
1987}
1988
1989static struct device_attribute ibmvscsi_host_partition_name = {
1990        .attr = {
1991                 .name = "partition_name",
1992                 .mode = S_IRUGO,
1993                 },
1994        .show = show_host_partition_name,
1995};
1996
1997static ssize_t show_host_partition_number(struct device *dev,
1998                                          struct device_attribute *attr,
1999                                          char *buf)
2000{
2001        struct Scsi_Host *shost = class_to_shost(dev);
2002        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
2003        int len;
2004
2005        len = snprintf(buf, PAGE_SIZE, "%d\n",
2006                       be32_to_cpu(hostdata->madapter_info.partition_number));
2007        return len;
2008}
2009
2010static struct device_attribute ibmvscsi_host_partition_number = {
2011        .attr = {
2012                 .name = "partition_number",
2013                 .mode = S_IRUGO,
2014                 },
2015        .show = show_host_partition_number,
2016};
2017
2018static ssize_t show_host_mad_version(struct device *dev,
2019                                     struct device_attribute *attr, char *buf)
2020{
2021        struct Scsi_Host *shost = class_to_shost(dev);
2022        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
2023        int len;
2024
2025        len = snprintf(buf, PAGE_SIZE, "%d\n",
2026                       be32_to_cpu(hostdata->madapter_info.mad_version));
2027        return len;
2028}
2029
2030static struct device_attribute ibmvscsi_host_mad_version = {
2031        .attr = {
2032                 .name = "mad_version",
2033                 .mode = S_IRUGO,
2034                 },
2035        .show = show_host_mad_version,
2036};
2037
2038static ssize_t show_host_os_type(struct device *dev,
2039                                 struct device_attribute *attr, char *buf)
2040{
2041        struct Scsi_Host *shost = class_to_shost(dev);
2042        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
2043        int len;
2044
2045        len = snprintf(buf, PAGE_SIZE, "%d\n",
2046                       be32_to_cpu(hostdata->madapter_info.os_type));
2047        return len;
2048}
2049
2050static struct device_attribute ibmvscsi_host_os_type = {
2051        .attr = {
2052                 .name = "os_type",
2053                 .mode = S_IRUGO,
2054                 },
2055        .show = show_host_os_type,
2056};
2057
2058static ssize_t show_host_config(struct device *dev,
2059                                struct device_attribute *attr, char *buf)
2060{
2061        return 0;
2062}
2063
2064static struct device_attribute ibmvscsi_host_config = {
2065        .attr = {
2066                .name = "config",
2067                .mode = S_IRUGO,
2068                },
2069        .show = show_host_config,
2070};
2071
2072static int ibmvscsi_host_reset(struct Scsi_Host *shost, int reset_type)
2073{
2074        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
2075
2076        dev_info(hostdata->dev, "Initiating adapter reset!\n");
2077        ibmvscsi_reset_host(hostdata);
2078
2079        return 0;
2080}
2081
2082static struct device_attribute *ibmvscsi_attrs[] = {
2083        &ibmvscsi_host_vhost_loc,
2084        &ibmvscsi_host_vhost_name,
2085        &ibmvscsi_host_srp_version,
2086        &ibmvscsi_host_partition_name,
2087        &ibmvscsi_host_partition_number,
2088        &ibmvscsi_host_mad_version,
2089        &ibmvscsi_host_os_type,
2090        &ibmvscsi_host_config,
2091        NULL
2092};
2093
2094/* ------------------------------------------------------------
2095 * SCSI driver registration
2096 */
2097static struct scsi_host_template driver_template = {
2098        .module = THIS_MODULE,
2099        .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
2100        .proc_name = "ibmvscsi",
2101        .queuecommand = ibmvscsi_queuecommand,
2102        .eh_timed_out = srp_timed_out,
2103        .eh_abort_handler = ibmvscsi_eh_abort_handler,
2104        .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
2105        .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
2106        .slave_configure = ibmvscsi_slave_configure,
2107        .change_queue_depth = ibmvscsi_change_queue_depth,
2108        .host_reset = ibmvscsi_host_reset,
2109        .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
2110        .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
2111        .this_id = -1,
2112        .sg_tablesize = SG_ALL,
2113        .use_clustering = ENABLE_CLUSTERING,
2114        .shost_attrs = ibmvscsi_attrs,
2115};
2116
2117/**
2118 * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver
2119 *
2120 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
2121 *
2122 * Return value:
2123 *      Number of bytes of IO data the driver will need to perform well.
2124 */
2125static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
2126{
2127        /* iu_storage data allocated in initialize_event_pool */
2128        unsigned long desired_io = max_events * sizeof(union viosrp_iu);
2129
2130        /* add io space for sg data */
2131        desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
2132                             IBMVSCSI_CMDS_PER_LUN_DEFAULT);
2133
2134        return desired_io;
2135}
2136
2137static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
2138{
2139        unsigned long flags;
2140        int rc;
2141        char *action = "reset";
2142
2143        spin_lock_irqsave(hostdata->host->host_lock, flags);
2144        switch (hostdata->action) {
2145        case IBMVSCSI_HOST_ACTION_UNBLOCK:
2146                rc = 0;
2147                break;
2148        case IBMVSCSI_HOST_ACTION_RESET:
2149                spin_unlock_irqrestore(hostdata->host->host_lock, flags);
2150                rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
2151                spin_lock_irqsave(hostdata->host->host_lock, flags);
2152                if (!rc)
2153                        rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
2154                vio_enable_interrupts(to_vio_dev(hostdata->dev));
2155                break;
2156        case IBMVSCSI_HOST_ACTION_REENABLE:
2157                action = "enable";
2158                spin_unlock_irqrestore(hostdata->host->host_lock, flags);
2159                rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
2160                spin_lock_irqsave(hostdata->host->host_lock, flags);
2161                if (!rc)
2162                        rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
2163                break;
2164        case IBMVSCSI_HOST_ACTION_NONE:
2165        default:
2166                spin_unlock_irqrestore(hostdata->host->host_lock, flags);
2167                return;
2168        }
2169
2170        hostdata->action = IBMVSCSI_HOST_ACTION_NONE;
2171        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
2172
2173        if (rc) {
2174                ibmvscsi_set_request_limit(hostdata, -1);
2175                dev_err(hostdata->dev, "error after %s\n", action);
2176        }
2177
2178        scsi_unblock_requests(hostdata->host);
2179}
2180
2181static int __ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
2182{
2183        if (kthread_should_stop())
2184                return 1;
2185        switch (hostdata->action) {
2186        case IBMVSCSI_HOST_ACTION_NONE:
2187                return 0;
2188        case IBMVSCSI_HOST_ACTION_RESET:
2189        case IBMVSCSI_HOST_ACTION_REENABLE:
2190        case IBMVSCSI_HOST_ACTION_UNBLOCK:
2191        default:
2192                break;
2193        }
2194
2195        return 1;
2196}
2197
2198static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
2199{
2200        unsigned long flags;
2201        int rc;
2202
2203        spin_lock_irqsave(hostdata->host->host_lock, flags);
2204        rc = __ibmvscsi_work_to_do(hostdata);
2205        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
2206
2207        return rc;
2208}
2209
2210static int ibmvscsi_work(void *data)
2211{
2212        struct ibmvscsi_host_data *hostdata = data;
2213        int rc;
2214
2215        set_user_nice(current, MIN_NICE);
2216
2217        while (1) {
2218                rc = wait_event_interruptible(hostdata->work_wait_q,
2219                                              ibmvscsi_work_to_do(hostdata));
2220
2221                BUG_ON(rc);
2222
2223                if (kthread_should_stop())
2224                        break;
2225
2226                ibmvscsi_do_work(hostdata);
2227        }
2228
2229        return 0;
2230}
2231
2232/**
2233 * Called by bus code for each adapter
2234 */
2235static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2236{
2237        struct ibmvscsi_host_data *hostdata;
2238        struct Scsi_Host *host;
2239        struct device *dev = &vdev->dev;
2240        struct srp_rport_identifiers ids;
2241        struct srp_rport *rport;
2242        unsigned long wait_switch = 0;
2243        int rc;
2244
2245        dev_set_drvdata(&vdev->dev, NULL);
2246
2247        host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
2248        if (!host) {
2249                dev_err(&vdev->dev, "couldn't allocate host data\n");
2250                goto scsi_host_alloc_failed;
2251        }
2252
2253        host->transportt = ibmvscsi_transport_template;
2254        hostdata = shost_priv(host);
2255        memset(hostdata, 0x00, sizeof(*hostdata));
2256        INIT_LIST_HEAD(&hostdata->sent);
2257        init_waitqueue_head(&hostdata->work_wait_q);
2258        hostdata->host = host;
2259        hostdata->dev = dev;
2260        ibmvscsi_set_request_limit(hostdata, -1);
2261        hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
2262
2263        if (map_persist_bufs(hostdata)) {
2264                dev_err(&vdev->dev, "couldn't map persistent buffers\n");
2265                goto persist_bufs_failed;
2266        }
2267
2268        hostdata->work_thread = kthread_run(ibmvscsi_work, hostdata, "%s_%d",
2269                                            "ibmvscsi", host->host_no);
2270
2271        if (IS_ERR(hostdata->work_thread)) {
2272                dev_err(&vdev->dev, "couldn't initialize kthread. rc=%ld\n",
2273                        PTR_ERR(hostdata->work_thread));
2274                goto init_crq_failed;
2275        }
2276
2277        rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);
2278        if (rc != 0 && rc != H_RESOURCE) {
2279                dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
2280                goto kill_kthread;
2281        }
2282        if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
2283                dev_err(&vdev->dev, "couldn't initialize event pool\n");
2284                goto init_pool_failed;
2285        }
2286
2287        host->max_lun = IBMVSCSI_MAX_LUN;
2288        host->max_id = max_id;
2289        host->max_channel = max_channel;
2290        host->max_cmd_len = 16;
2291
2292        dev_info(dev,
2293                 "Maximum ID: %d Maximum LUN: %llu Maximum Channel: %d\n",
2294                 host->max_id, host->max_lun, host->max_channel);
2295
2296        if (scsi_add_host(hostdata->host, hostdata->dev))
2297                goto add_host_failed;
2298
2299        /* we don't have a proper target_port_id so let's use the fake one */
2300        memcpy(ids.port_id, hostdata->madapter_info.partition_name,
2301               sizeof(ids.port_id));
2302        ids.roles = SRP_RPORT_ROLE_TARGET;
2303        rport = srp_rport_add(host, &ids);
2304        if (IS_ERR(rport))
2305                goto add_srp_port_failed;
2306
2307        /* Try to send an initialization message.  Note that this is allowed
2308         * to fail if the other end is not acive.  In that case we don't
2309         * want to scan
2310         */
2311        if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
2312            || rc == H_RESOURCE) {
2313                /*
2314                 * Wait around max init_timeout secs for the adapter to finish
2315                 * initializing. When we are done initializing, we will have a
2316                 * valid request_limit.  We don't want Linux scanning before
2317                 * we are ready.
2318                 */
2319                for (wait_switch = jiffies + (init_timeout * HZ);
2320                     time_before(jiffies, wait_switch) &&
2321                     atomic_read(&hostdata->request_limit) < 2;) {
2322
2323                        msleep(10);
2324                }
2325
2326                /* if we now have a valid request_limit, initiate a scan */
2327                if (atomic_read(&hostdata->request_limit) > 0)
2328                        scsi_scan_host(host);
2329        }
2330
2331        dev_set_drvdata(&vdev->dev, hostdata);
2332        spin_lock(&ibmvscsi_driver_lock);
2333        list_add_tail(&hostdata->host_list, &ibmvscsi_head);
2334        spin_unlock(&ibmvscsi_driver_lock);
2335        return 0;
2336
2337      add_srp_port_failed:
2338        scsi_remove_host(hostdata->host);
2339      add_host_failed:
2340        release_event_pool(&hostdata->pool, hostdata);
2341      init_pool_failed:
2342        ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);
2343      kill_kthread:
2344      kthread_stop(hostdata->work_thread);
2345      init_crq_failed:
2346        unmap_persist_bufs(hostdata);
2347      persist_bufs_failed:
2348        scsi_host_put(host);
2349      scsi_host_alloc_failed:
2350        return -1;
2351}
2352
2353static int ibmvscsi_remove(struct vio_dev *vdev)
2354{
2355        struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
2356
2357        srp_remove_host(hostdata->host);
2358        scsi_remove_host(hostdata->host);
2359
2360        purge_requests(hostdata, DID_ERROR);
2361        release_event_pool(&hostdata->pool, hostdata);
2362
2363        ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
2364                                        max_events);
2365
2366        kthread_stop(hostdata->work_thread);
2367        unmap_persist_bufs(hostdata);
2368
2369        spin_lock(&ibmvscsi_driver_lock);
2370        list_del(&hostdata->host_list);
2371        spin_unlock(&ibmvscsi_driver_lock);
2372
2373        scsi_host_put(hostdata->host);
2374
2375        return 0;
2376}
2377
2378/**
2379 * ibmvscsi_resume: Resume from suspend
2380 * @dev:        device struct
2381 *
2382 * We may have lost an interrupt across suspend/resume, so kick the
2383 * interrupt handler
2384 */
2385static int ibmvscsi_resume(struct device *dev)
2386{
2387        struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
2388        vio_disable_interrupts(to_vio_dev(hostdata->dev));
2389        tasklet_schedule(&hostdata->srp_task);
2390
2391        return 0;
2392}
2393
2394/**
2395 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 
2396 * support.
2397 */
2398static const struct vio_device_id ibmvscsi_device_table[] = {
2399        {"vscsi", "IBM,v-scsi"},
2400        { "", "" }
2401};
2402MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
2403
2404static const struct dev_pm_ops ibmvscsi_pm_ops = {
2405        .resume = ibmvscsi_resume
2406};
2407
2408static struct vio_driver ibmvscsi_driver = {
2409        .id_table = ibmvscsi_device_table,
2410        .probe = ibmvscsi_probe,
2411        .remove = ibmvscsi_remove,
2412        .get_desired_dma = ibmvscsi_get_desired_dma,
2413        .name = "ibmvscsi",
2414        .pm = &ibmvscsi_pm_ops,
2415};
2416
2417static struct srp_function_template ibmvscsi_transport_functions = {
2418};
2419
2420int __init ibmvscsi_module_init(void)
2421{
2422        int ret;
2423
2424        /* Ensure we have two requests to do error recovery */
2425        driver_template.can_queue = max_requests;
2426        max_events = max_requests + 2;
2427
2428        if (!firmware_has_feature(FW_FEATURE_VIO))
2429                return -ENODEV;
2430
2431        ibmvscsi_transport_template =
2432                srp_attach_transport(&ibmvscsi_transport_functions);
2433        if (!ibmvscsi_transport_template)
2434                return -ENOMEM;
2435
2436        ret = vio_register_driver(&ibmvscsi_driver);
2437        if (ret)
2438                srp_release_transport(ibmvscsi_transport_template);
2439        return ret;
2440}
2441
2442void __exit ibmvscsi_module_exit(void)
2443{
2444        vio_unregister_driver(&ibmvscsi_driver);
2445        srp_release_transport(ibmvscsi_transport_template);
2446}
2447
2448module_init(ibmvscsi_module_init);
2449module_exit(ibmvscsi_module_exit);
2450