linux/drivers/scsi/ibmvscsi/ibmvscsi.c
<<
>>
Prefs
   1/* ------------------------------------------------------------
   2 * ibmvscsi.c
   3 * (C) Copyright IBM Corporation 1994, 2004
   4 * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
   5 *          Santiago Leon (santil@us.ibm.com)
   6 *          Dave Boutcher (sleddog@us.ibm.com)
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
  21 * USA
  22 *
  23 * ------------------------------------------------------------
  24 * Emulation of a SCSI host adapter for Virtual I/O devices
  25 *
  26 * This driver supports the SCSI adapter implemented by the IBM
  27 * Power5 firmware.  That SCSI adapter is not a physical adapter,
  28 * but allows Linux SCSI peripheral drivers to directly
  29 * access devices in another logical partition on the physical system.
  30 *
  31 * The virtual adapter(s) are present in the open firmware device
  32 * tree just like real adapters.
  33 *
  34 * One of the capabilities provided on these systems is the ability
  35 * to DMA between partitions.  The architecture states that for VSCSI,
  36 * the server side is allowed to DMA to and from the client.  The client
  37 * is never trusted to DMA to or from the server directly.
  38 *
  39 * Messages are sent between partitions on a "Command/Response Queue" 
  40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's 
  41 * Senders cannot access the buffer directly, but send messages by
  42 * making a hypervisor call and passing in the 16 bytes.  The hypervisor
  43 * puts the message in the next 16 byte space in round-robin fashion,
  44 * turns on the high order bit of the message (the valid bit), and 
  45 * generates an interrupt to the receiver (if interrupts are turned on.) 
  46 * The receiver just turns off the valid bit when they have copied out
  47 * the message.
  48 *
  49 * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
  50 * (IU) (as defined in the T10 standard available at www.t10.org), gets 
  51 * a DMA address for the message, and sends it to the server as the
  52 * payload of a CRQ message.  The server DMAs the SRP IU and processes it,
  53 * including doing any additional data transfers.  When it is done, it
  54 * DMAs the SRP response back to the same address as the request came from,
  55 * and sends a CRQ message back to inform the client that the request has
  56 * completed.
  57 *
  58 * TODO: This is currently pretty tied to the IBM pSeries hypervisor
  59 * interfaces.  It would be really nice to abstract this above an RDMA
  60 * layer.
  61 */
  62
  63#include <linux/module.h>
  64#include <linux/moduleparam.h>
  65#include <linux/dma-mapping.h>
  66#include <linux/delay.h>
  67#include <linux/slab.h>
  68#include <linux/of.h>
  69#include <linux/pm.h>
  70#include <linux/kthread.h>
  71#include <asm/firmware.h>
  72#include <asm/vio.h>
  73#include <scsi/scsi.h>
  74#include <scsi/scsi_cmnd.h>
  75#include <scsi/scsi_host.h>
  76#include <scsi/scsi_device.h>
  77#include <scsi/scsi_transport_srp.h>
  78#include "ibmvscsi.h"
  79
  80/* The values below are somewhat arbitrary default values, but 
  81 * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
  82 * Note that there are 3 bits of channel value, 6 bits of id, and
  83 * 5 bits of LUN.
  84 */
  85static int max_id = 64;
  86static int max_channel = 3;
  87static int init_timeout = 300;
  88static int login_timeout = 60;
  89static int info_timeout = 30;
  90static int abort_timeout = 60;
  91static int reset_timeout = 60;
  92static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
  93static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
  94static int fast_fail = 1;
  95static int client_reserve = 1;
  96static char partition_name[97] = "UNKNOWN";
  97static unsigned int partition_number = -1;
  98
  99static struct scsi_transport_template *ibmvscsi_transport_template;
 100
 101#define IBMVSCSI_VERSION "1.5.9"
 102
 103MODULE_DESCRIPTION("IBM Virtual SCSI");
 104MODULE_AUTHOR("Dave Boutcher");
 105MODULE_LICENSE("GPL");
 106MODULE_VERSION(IBMVSCSI_VERSION);
 107
 108module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
 109MODULE_PARM_DESC(max_id, "Largest ID value for each channel");
 110module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
 111MODULE_PARM_DESC(max_channel, "Largest channel value");
 112module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
 113MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
 114module_param_named(max_requests, max_requests, int, S_IRUGO);
 115MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
 116module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
 117MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
 118module_param_named(client_reserve, client_reserve, int, S_IRUGO );
 119MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
 120
 121static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
 122                                struct ibmvscsi_host_data *hostdata);
 123
 124/* ------------------------------------------------------------
 125 * Routines for managing the command/response queue
 126 */
 127/**
 128 * ibmvscsi_handle_event: - Interrupt handler for crq events
 129 * @irq:        number of irq to handle, not used
 130 * @dev_instance: ibmvscsi_host_data of host that received interrupt
 131 *
 132 * Disables interrupts and schedules srp_task
 133 * Always returns IRQ_HANDLED
 134 */
 135static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
 136{
 137        struct ibmvscsi_host_data *hostdata =
 138            (struct ibmvscsi_host_data *)dev_instance;
 139        vio_disable_interrupts(to_vio_dev(hostdata->dev));
 140        tasklet_schedule(&hostdata->srp_task);
 141        return IRQ_HANDLED;
 142}
 143
 144/**
 145 * release_crq_queue: - Deallocates data and unregisters CRQ
 146 * @queue:      crq_queue to initialize and register
 147 * @host_data:  ibmvscsi_host_data of host
 148 *
 149 * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
 150 * the crq with the hypervisor.
 151 */
 152static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
 153                                       struct ibmvscsi_host_data *hostdata,
 154                                       int max_requests)
 155{
 156        long rc = 0;
 157        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 158        free_irq(vdev->irq, (void *)hostdata);
 159        tasklet_kill(&hostdata->srp_task);
 160        do {
 161                if (rc)
 162                        msleep(100);
 163                rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
 164        } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
 165        dma_unmap_single(hostdata->dev,
 166                         queue->msg_token,
 167                         queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
 168        free_page((unsigned long)queue->msgs);
 169}
 170
 171/**
 172 * crq_queue_next_crq: - Returns the next entry in message queue
 173 * @queue:      crq_queue to use
 174 *
 175 * Returns pointer to next entry in queue, or NULL if there are no new
 176 * entried in the CRQ.
 177 */
 178static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
 179{
 180        struct viosrp_crq *crq;
 181        unsigned long flags;
 182
 183        spin_lock_irqsave(&queue->lock, flags);
 184        crq = &queue->msgs[queue->cur];
 185        if (crq->valid & 0x80) {
 186                if (++queue->cur == queue->size)
 187                        queue->cur = 0;
 188        } else
 189                crq = NULL;
 190        spin_unlock_irqrestore(&queue->lock, flags);
 191
 192        return crq;
 193}
 194
 195/**
 196 * ibmvscsi_send_crq: - Send a CRQ
 197 * @hostdata:   the adapter
 198 * @word1:      the first 64 bits of the data
 199 * @word2:      the second 64 bits of the data
 200 */
 201static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
 202                             u64 word1, u64 word2)
 203{
 204        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 205
 206        return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
 207}
 208
 209/**
 210 * ibmvscsi_task: - Process srps asynchronously
 211 * @data:       ibmvscsi_host_data of host
 212 */
 213static void ibmvscsi_task(void *data)
 214{
 215        struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
 216        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 217        struct viosrp_crq *crq;
 218        int done = 0;
 219
 220        while (!done) {
 221                /* Pull all the valid messages off the CRQ */
 222                while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
 223                        ibmvscsi_handle_crq(crq, hostdata);
 224                        crq->valid = 0x00;
 225                }
 226
 227                vio_enable_interrupts(vdev);
 228                crq = crq_queue_next_crq(&hostdata->queue);
 229                if (crq != NULL) {
 230                        vio_disable_interrupts(vdev);
 231                        ibmvscsi_handle_crq(crq, hostdata);
 232                        crq->valid = 0x00;
 233                } else {
 234                        done = 1;
 235                }
 236        }
 237}
 238
 239static void gather_partition_info(void)
 240{
 241        struct device_node *rootdn;
 242
 243        const char *ppartition_name;
 244        const __be32 *p_number_ptr;
 245
 246        /* Retrieve information about this partition */
 247        rootdn = of_find_node_by_path("/");
 248        if (!rootdn) {
 249                return;
 250        }
 251
 252        ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
 253        if (ppartition_name)
 254                strncpy(partition_name, ppartition_name,
 255                                sizeof(partition_name));
 256        p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
 257        if (p_number_ptr)
 258                partition_number = of_read_number(p_number_ptr, 1);
 259        of_node_put(rootdn);
 260}
 261
 262static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
 263{
 264        memset(&hostdata->madapter_info, 0x00,
 265                        sizeof(hostdata->madapter_info));
 266
 267        dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
 268        strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
 269
 270        strncpy(hostdata->madapter_info.partition_name, partition_name,
 271                        sizeof(hostdata->madapter_info.partition_name));
 272
 273        hostdata->madapter_info.partition_number =
 274                                        cpu_to_be32(partition_number);
 275
 276        hostdata->madapter_info.mad_version = cpu_to_be32(1);
 277        hostdata->madapter_info.os_type = cpu_to_be32(2);
 278}
 279
 280/**
 281 * reset_crq_queue: - resets a crq after a failure
 282 * @queue:      crq_queue to initialize and register
 283 * @hostdata:   ibmvscsi_host_data of host
 284 *
 285 */
 286static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
 287                                    struct ibmvscsi_host_data *hostdata)
 288{
 289        int rc = 0;
 290        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 291
 292        /* Close the CRQ */
 293        do {
 294                if (rc)
 295                        msleep(100);
 296                rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
 297        } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
 298
 299        /* Clean out the queue */
 300        memset(queue->msgs, 0x00, PAGE_SIZE);
 301        queue->cur = 0;
 302
 303        set_adapter_info(hostdata);
 304
 305        /* And re-open it again */
 306        rc = plpar_hcall_norets(H_REG_CRQ,
 307                                vdev->unit_address,
 308                                queue->msg_token, PAGE_SIZE);
 309        if (rc == 2) {
 310                /* Adapter is good, but other end is not ready */
 311                dev_warn(hostdata->dev, "Partner adapter not ready\n");
 312        } else if (rc != 0) {
 313                dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
 314        }
 315        return rc;
 316}
 317
 318/**
 319 * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
 320 * @queue:      crq_queue to initialize and register
 321 * @hostdata:   ibmvscsi_host_data of host
 322 *
 323 * Allocates a page for messages, maps it for dma, and registers
 324 * the crq with the hypervisor.
 325 * Returns zero on success.
 326 */
 327static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
 328                                   struct ibmvscsi_host_data *hostdata,
 329                                   int max_requests)
 330{
 331        int rc;
 332        int retrc;
 333        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 334
 335        queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
 336
 337        if (!queue->msgs)
 338                goto malloc_failed;
 339        queue->size = PAGE_SIZE / sizeof(*queue->msgs);
 340
 341        queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
 342                                          queue->size * sizeof(*queue->msgs),
 343                                          DMA_BIDIRECTIONAL);
 344
 345        if (dma_mapping_error(hostdata->dev, queue->msg_token))
 346                goto map_failed;
 347
 348        gather_partition_info();
 349        set_adapter_info(hostdata);
 350
 351        retrc = rc = plpar_hcall_norets(H_REG_CRQ,
 352                                vdev->unit_address,
 353                                queue->msg_token, PAGE_SIZE);
 354        if (rc == H_RESOURCE)
 355                /* maybe kexecing and resource is busy. try a reset */
 356                rc = ibmvscsi_reset_crq_queue(queue,
 357                                              hostdata);
 358
 359        if (rc == 2) {
 360                /* Adapter is good, but other end is not ready */
 361                dev_warn(hostdata->dev, "Partner adapter not ready\n");
 362                retrc = 0;
 363        } else if (rc != 0) {
 364                dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
 365                goto reg_crq_failed;
 366        }
 367
 368        queue->cur = 0;
 369        spin_lock_init(&queue->lock);
 370
 371        tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
 372                     (unsigned long)hostdata);
 373
 374        if (request_irq(vdev->irq,
 375                        ibmvscsi_handle_event,
 376                        0, "ibmvscsi", (void *)hostdata) != 0) {
 377                dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
 378                        vdev->irq);
 379                goto req_irq_failed;
 380        }
 381
 382        rc = vio_enable_interrupts(vdev);
 383        if (rc != 0) {
 384                dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
 385                goto req_irq_failed;
 386        }
 387
 388        return retrc;
 389
 390      req_irq_failed:
 391        tasklet_kill(&hostdata->srp_task);
 392        rc = 0;
 393        do {
 394                if (rc)
 395                        msleep(100);
 396                rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
 397        } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
 398      reg_crq_failed:
 399        dma_unmap_single(hostdata->dev,
 400                         queue->msg_token,
 401                         queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
 402      map_failed:
 403        free_page((unsigned long)queue->msgs);
 404      malloc_failed:
 405        return -1;
 406}
 407
 408/**
 409 * reenable_crq_queue: - reenables a crq after
 410 * @queue:      crq_queue to initialize and register
 411 * @hostdata:   ibmvscsi_host_data of host
 412 *
 413 */
 414static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
 415                                       struct ibmvscsi_host_data *hostdata)
 416{
 417        int rc = 0;
 418        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 419
 420        /* Re-enable the CRQ */
 421        do {
 422                if (rc)
 423                        msleep(100);
 424                rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
 425        } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
 426
 427        if (rc)
 428                dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
 429        return rc;
 430}
 431
 432/* ------------------------------------------------------------
 433 * Routines for the event pool and event structs
 434 */
 435/**
 436 * initialize_event_pool: - Allocates and initializes the event pool for a host
 437 * @pool:       event_pool to be initialized
 438 * @size:       Number of events in pool
 439 * @hostdata:   ibmvscsi_host_data who owns the event pool
 440 *
 441 * Returns zero on success.
 442*/
 443static int initialize_event_pool(struct event_pool *pool,
 444                                 int size, struct ibmvscsi_host_data *hostdata)
 445{
 446        int i;
 447
 448        pool->size = size;
 449        pool->next = 0;
 450        pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
 451        if (!pool->events)
 452                return -ENOMEM;
 453
 454        pool->iu_storage =
 455            dma_alloc_coherent(hostdata->dev,
 456                               pool->size * sizeof(*pool->iu_storage),
 457                               &pool->iu_token, 0);
 458        if (!pool->iu_storage) {
 459                kfree(pool->events);
 460                return -ENOMEM;
 461        }
 462
 463        for (i = 0; i < pool->size; ++i) {
 464                struct srp_event_struct *evt = &pool->events[i];
 465                memset(&evt->crq, 0x00, sizeof(evt->crq));
 466                atomic_set(&evt->free, 1);
 467                evt->crq.valid = 0x80;
 468                evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
 469                evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
 470                        sizeof(*evt->xfer_iu) * i);
 471                evt->xfer_iu = pool->iu_storage + i;
 472                evt->hostdata = hostdata;
 473                evt->ext_list = NULL;
 474                evt->ext_list_token = 0;
 475        }
 476
 477        return 0;
 478}
 479
 480/**
 481 * release_event_pool: - Frees memory of an event pool of a host
 482 * @pool:       event_pool to be released
 483 * @hostdata:   ibmvscsi_host_data who owns the even pool
 484 *
 485 * Returns zero on success.
 486*/
 487static void release_event_pool(struct event_pool *pool,
 488                               struct ibmvscsi_host_data *hostdata)
 489{
 490        int i, in_use = 0;
 491        for (i = 0; i < pool->size; ++i) {
 492                if (atomic_read(&pool->events[i].free) != 1)
 493                        ++in_use;
 494                if (pool->events[i].ext_list) {
 495                        dma_free_coherent(hostdata->dev,
 496                                  SG_ALL * sizeof(struct srp_direct_buf),
 497                                  pool->events[i].ext_list,
 498                                  pool->events[i].ext_list_token);
 499                }
 500        }
 501        if (in_use)
 502                dev_warn(hostdata->dev, "releasing event pool with %d "
 503                         "events still in use?\n", in_use);
 504        kfree(pool->events);
 505        dma_free_coherent(hostdata->dev,
 506                          pool->size * sizeof(*pool->iu_storage),
 507                          pool->iu_storage, pool->iu_token);
 508}
 509
 510/**
 511 * valid_event_struct: - Determines if event is valid.
 512 * @pool:       event_pool that contains the event
 513 * @evt:        srp_event_struct to be checked for validity
 514 *
 515 * Returns zero if event is invalid, one otherwise.
 516*/
 517static int valid_event_struct(struct event_pool *pool,
 518                                struct srp_event_struct *evt)
 519{
 520        int index = evt - pool->events;
 521        if (index < 0 || index >= pool->size)   /* outside of bounds */
 522                return 0;
 523        if (evt != pool->events + index)        /* unaligned */
 524                return 0;
 525        return 1;
 526}
 527
 528/**
 529 * ibmvscsi_free-event_struct: - Changes status of event to "free"
 530 * @pool:       event_pool that contains the event
 531 * @evt:        srp_event_struct to be modified
 532 *
 533*/
 534static void free_event_struct(struct event_pool *pool,
 535                                       struct srp_event_struct *evt)
 536{
 537        if (!valid_event_struct(pool, evt)) {
 538                dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
 539                        "(not in pool %p)\n", evt, pool->events);
 540                return;
 541        }
 542        if (atomic_inc_return(&evt->free) != 1) {
 543                dev_err(evt->hostdata->dev, "Freeing event_struct %p "
 544                        "which is not in use!\n", evt);
 545                return;
 546        }
 547}
 548
 549/**
 550 * get_evt_struct: - Gets the next free event in pool
 551 * @pool:       event_pool that contains the events to be searched
 552 *
 553 * Returns the next event in "free" state, and NULL if none are free.
 554 * Note that no synchronization is done here, we assume the host_lock
 555 * will syncrhonze things.
 556*/
 557static struct srp_event_struct *get_event_struct(struct event_pool *pool)
 558{
 559        int i;
 560        int poolsize = pool->size;
 561        int offset = pool->next;
 562
 563        for (i = 0; i < poolsize; i++) {
 564                offset = (offset + 1) % poolsize;
 565                if (!atomic_dec_if_positive(&pool->events[offset].free)) {
 566                        pool->next = offset;
 567                        return &pool->events[offset];
 568                }
 569        }
 570
 571        printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
 572        return NULL;
 573}
 574
 575/**
 576 * init_event_struct: Initialize fields in an event struct that are always 
 577 *                    required.
 578 * @evt:        The event
 579 * @done:       Routine to call when the event is responded to
 580 * @format:     SRP or MAD format
 581 * @timeout:    timeout value set in the CRQ
 582 */
 583static void init_event_struct(struct srp_event_struct *evt_struct,
 584                              void (*done) (struct srp_event_struct *),
 585                              u8 format,
 586                              int timeout)
 587{
 588        evt_struct->cmnd = NULL;
 589        evt_struct->cmnd_done = NULL;
 590        evt_struct->sync_srp = NULL;
 591        evt_struct->crq.format = format;
 592        evt_struct->crq.timeout = cpu_to_be16(timeout);
 593        evt_struct->done = done;
 594}
 595
 596/* ------------------------------------------------------------
 597 * Routines for receiving SCSI responses from the hosting partition
 598 */
 599
 600/**
 601 * set_srp_direction: Set the fields in the srp related to data
 602 *     direction and number of buffers based on the direction in
 603 *     the scsi_cmnd and the number of buffers
 604 */
 605static void set_srp_direction(struct scsi_cmnd *cmd,
 606                              struct srp_cmd *srp_cmd, 
 607                              int numbuf)
 608{
 609        u8 fmt;
 610
 611        if (numbuf == 0)
 612                return;
 613        
 614        if (numbuf == 1)
 615                fmt = SRP_DATA_DESC_DIRECT;
 616        else {
 617                fmt = SRP_DATA_DESC_INDIRECT;
 618                numbuf = min(numbuf, MAX_INDIRECT_BUFS);
 619
 620                if (cmd->sc_data_direction == DMA_TO_DEVICE)
 621                        srp_cmd->data_out_desc_cnt = numbuf;
 622                else
 623                        srp_cmd->data_in_desc_cnt = numbuf;
 624        }
 625
 626        if (cmd->sc_data_direction == DMA_TO_DEVICE)
 627                srp_cmd->buf_fmt = fmt << 4;
 628        else
 629                srp_cmd->buf_fmt = fmt;
 630}
 631
 632/**
 633 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
 634 * @cmd:        srp_cmd whose additional_data member will be unmapped
 635 * @dev:        device for which the memory is mapped
 636 *
 637*/
 638static void unmap_cmd_data(struct srp_cmd *cmd,
 639                           struct srp_event_struct *evt_struct,
 640                           struct device *dev)
 641{
 642        u8 out_fmt, in_fmt;
 643
 644        out_fmt = cmd->buf_fmt >> 4;
 645        in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
 646
 647        if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
 648                return;
 649
 650        if (evt_struct->cmnd)
 651                scsi_dma_unmap(evt_struct->cmnd);
 652}
 653
 654static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
 655                       struct srp_direct_buf *md)
 656{
 657        int i;
 658        struct scatterlist *sg;
 659        u64 total_length = 0;
 660
 661        scsi_for_each_sg(cmd, sg, nseg, i) {
 662                struct srp_direct_buf *descr = md + i;
 663                descr->va = cpu_to_be64(sg_dma_address(sg));
 664                descr->len = cpu_to_be32(sg_dma_len(sg));
 665                descr->key = 0;
 666                total_length += sg_dma_len(sg);
 667        }
 668        return total_length;
 669}
 670
 671/**
 672 * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
 673 * @cmd:        Scsi_Cmnd with the scatterlist
 674 * @srp_cmd:    srp_cmd that contains the memory descriptor
 675 * @dev:        device for which to map dma memory
 676 *
 677 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
 678 * Returns 1 on success.
 679*/
 680static int map_sg_data(struct scsi_cmnd *cmd,
 681                       struct srp_event_struct *evt_struct,
 682                       struct srp_cmd *srp_cmd, struct device *dev)
 683{
 684
 685        int sg_mapped;
 686        u64 total_length = 0;
 687        struct srp_direct_buf *data =
 688                (struct srp_direct_buf *) srp_cmd->add_data;
 689        struct srp_indirect_buf *indirect =
 690                (struct srp_indirect_buf *) data;
 691
 692        sg_mapped = scsi_dma_map(cmd);
 693        if (!sg_mapped)
 694                return 1;
 695        else if (sg_mapped < 0)
 696                return 0;
 697
 698        set_srp_direction(cmd, srp_cmd, sg_mapped);
 699
 700        /* special case; we can use a single direct descriptor */
 701        if (sg_mapped == 1) {
 702                map_sg_list(cmd, sg_mapped, data);
 703                return 1;
 704        }
 705
 706        indirect->table_desc.va = 0;
 707        indirect->table_desc.len = cpu_to_be32(sg_mapped *
 708                                               sizeof(struct srp_direct_buf));
 709        indirect->table_desc.key = 0;
 710
 711        if (sg_mapped <= MAX_INDIRECT_BUFS) {
 712                total_length = map_sg_list(cmd, sg_mapped,
 713                                           &indirect->desc_list[0]);
 714                indirect->len = cpu_to_be32(total_length);
 715                return 1;
 716        }
 717
 718        /* get indirect table */
 719        if (!evt_struct->ext_list) {
 720                evt_struct->ext_list = (struct srp_direct_buf *)
 721                        dma_alloc_coherent(dev,
 722                                           SG_ALL * sizeof(struct srp_direct_buf),
 723                                           &evt_struct->ext_list_token, 0);
 724                if (!evt_struct->ext_list) {
 725                        if (!firmware_has_feature(FW_FEATURE_CMO))
 726                                sdev_printk(KERN_ERR, cmd->device,
 727                                            "Can't allocate memory "
 728                                            "for indirect table\n");
 729                        scsi_dma_unmap(cmd);
 730                        return 0;
 731                }
 732        }
 733
 734        total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
 735
 736        indirect->len = cpu_to_be32(total_length);
 737        indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token);
 738        indirect->table_desc.len = cpu_to_be32(sg_mapped *
 739                                               sizeof(indirect->desc_list[0]));
 740        memcpy(indirect->desc_list, evt_struct->ext_list,
 741               MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
 742        return 1;
 743}
 744
 745/**
 746 * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
 747 * @cmd:        struct scsi_cmnd with the memory to be mapped
 748 * @srp_cmd:    srp_cmd that contains the memory descriptor
 749 * @dev:        dma device for which to map dma memory
 750 *
 751 * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds 
 752 * Returns 1 on success.
 753*/
 754static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
 755                                struct srp_event_struct *evt_struct,
 756                                struct srp_cmd *srp_cmd, struct device *dev)
 757{
 758        switch (cmd->sc_data_direction) {
 759        case DMA_FROM_DEVICE:
 760        case DMA_TO_DEVICE:
 761                break;
 762        case DMA_NONE:
 763                return 1;
 764        case DMA_BIDIRECTIONAL:
 765                sdev_printk(KERN_ERR, cmd->device,
 766                            "Can't map DMA_BIDIRECTIONAL to read/write\n");
 767                return 0;
 768        default:
 769                sdev_printk(KERN_ERR, cmd->device,
 770                            "Unknown data direction 0x%02x; can't map!\n",
 771                            cmd->sc_data_direction);
 772                return 0;
 773        }
 774
 775        return map_sg_data(cmd, evt_struct, srp_cmd, dev);
 776}
 777
 778/**
 779 * purge_requests: Our virtual adapter just shut down.  purge any sent requests
 780 * @hostdata:    the adapter
 781 */
 782static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
 783{
 784        struct srp_event_struct *evt;
 785        unsigned long flags;
 786
 787        spin_lock_irqsave(hostdata->host->host_lock, flags);
 788        while (!list_empty(&hostdata->sent)) {
 789                evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list);
 790                list_del(&evt->list);
 791                del_timer(&evt->timer);
 792
 793                spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 794                if (evt->cmnd) {
 795                        evt->cmnd->result = (error_code << 16);
 796                        unmap_cmd_data(&evt->iu.srp.cmd, evt,
 797                                       evt->hostdata->dev);
 798                        if (evt->cmnd_done)
 799                                evt->cmnd_done(evt->cmnd);
 800                } else if (evt->done)
 801                        evt->done(evt);
 802                free_event_struct(&evt->hostdata->pool, evt);
 803                spin_lock_irqsave(hostdata->host->host_lock, flags);
 804        }
 805        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 806}
 807
 808/**
 809 * ibmvscsi_reset_host - Reset the connection to the server
 810 * @hostdata:   struct ibmvscsi_host_data to reset
 811*/
 812static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
 813{
 814        scsi_block_requests(hostdata->host);
 815        atomic_set(&hostdata->request_limit, 0);
 816
 817        purge_requests(hostdata, DID_ERROR);
 818        hostdata->reset_crq = 1;
 819        wake_up(&hostdata->work_wait_q);
 820}
 821
 822/**
 823 * ibmvscsi_timeout - Internal command timeout handler
 824 * @evt_struct: struct srp_event_struct that timed out
 825 *
 826 * Called when an internally generated command times out
 827*/
 828static void ibmvscsi_timeout(struct srp_event_struct *evt_struct)
 829{
 830        struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
 831
 832        dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
 833                evt_struct->iu.srp.cmd.opcode);
 834
 835        ibmvscsi_reset_host(hostdata);
 836}
 837
 838
 839/* ------------------------------------------------------------
 840 * Routines for sending and receiving SRPs
 841 */
 842/**
 843 * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
 844 * @evt_struct: evt_struct to be sent
 845 * @hostdata:   ibmvscsi_host_data of host
 846 * @timeout:    timeout in seconds - 0 means do not time command
 847 *
 848 * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
 849 * Note that this routine assumes that host_lock is held for synchronization
 850*/
 851static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
 852                                   struct ibmvscsi_host_data *hostdata,
 853                                   unsigned long timeout)
 854{
 855        __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;
 856        int request_status = 0;
 857        int rc;
 858        int srp_req = 0;
 859
 860        /* If we have exhausted our request limit, just fail this request,
 861         * unless it is for a reset or abort.
 862         * Note that there are rare cases involving driver generated requests 
 863         * (such as task management requests) that the mid layer may think we
 864         * can handle more requests (can_queue) when we actually can't
 865         */
 866        if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
 867                srp_req = 1;
 868                request_status =
 869                        atomic_dec_if_positive(&hostdata->request_limit);
 870                /* If request limit was -1 when we started, it is now even
 871                 * less than that
 872                 */
 873                if (request_status < -1)
 874                        goto send_error;
 875                /* Otherwise, we may have run out of requests. */
 876                /* If request limit was 0 when we started the adapter is in the
 877                 * process of performing a login with the server adapter, or
 878                 * we may have run out of requests.
 879                 */
 880                else if (request_status == -1 &&
 881                         evt_struct->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
 882                        goto send_busy;
 883                /* Abort and reset calls should make it through.
 884                 * Nothing except abort and reset should use the last two
 885                 * slots unless we had two or less to begin with.
 886                 */
 887                else if (request_status < 2 &&
 888                         evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
 889                        /* In the case that we have less than two requests
 890                         * available, check the server limit as a combination
 891                         * of the request limit and the number of requests
 892                         * in-flight (the size of the send list).  If the
 893                         * server limit is greater than 2, return busy so
 894                         * that the last two are reserved for reset and abort.
 895                         */
 896                        int server_limit = request_status;
 897                        struct srp_event_struct *tmp_evt;
 898
 899                        list_for_each_entry(tmp_evt, &hostdata->sent, list) {
 900                                server_limit++;
 901                        }
 902
 903                        if (server_limit > 2)
 904                                goto send_busy;
 905                }
 906        }
 907
 908        /* Copy the IU into the transfer area */
 909        *evt_struct->xfer_iu = evt_struct->iu;
 910        evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
 911
 912        /* Add this to the sent list.  We need to do this 
 913         * before we actually send 
 914         * in case it comes back REALLY fast
 915         */
 916        list_add_tail(&evt_struct->list, &hostdata->sent);
 917
 918        init_timer(&evt_struct->timer);
 919        if (timeout) {
 920                evt_struct->timer.data = (unsigned long) evt_struct;
 921                evt_struct->timer.expires = jiffies + (timeout * HZ);
 922                evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout;
 923                add_timer(&evt_struct->timer);
 924        }
 925
 926        rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]),
 927                               be64_to_cpu(crq_as_u64[1]));
 928        if (rc != 0) {
 929                list_del(&evt_struct->list);
 930                del_timer(&evt_struct->timer);
 931
 932                /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
 933                 * Firmware will send a CRQ with a transport event (0xFF) to
 934                 * tell this client what has happened to the transport.  This
 935                 * will be handled in ibmvscsi_handle_crq()
 936                 */
 937                if (rc == H_CLOSED) {
 938                        dev_warn(hostdata->dev, "send warning. "
 939                                 "Receive queue closed, will retry.\n");
 940                        goto send_busy;
 941                }
 942                dev_err(hostdata->dev, "send error %d\n", rc);
 943                if (srp_req)
 944                        atomic_inc(&hostdata->request_limit);
 945                goto send_error;
 946        }
 947
 948        return 0;
 949
 950 send_busy:
 951        unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
 952
 953        free_event_struct(&hostdata->pool, evt_struct);
 954        if (srp_req && request_status != -1)
 955                atomic_inc(&hostdata->request_limit);
 956        return SCSI_MLQUEUE_HOST_BUSY;
 957
 958 send_error:
 959        unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
 960
 961        if (evt_struct->cmnd != NULL) {
 962                evt_struct->cmnd->result = DID_ERROR << 16;
 963                evt_struct->cmnd_done(evt_struct->cmnd);
 964        } else if (evt_struct->done)
 965                evt_struct->done(evt_struct);
 966
 967        free_event_struct(&hostdata->pool, evt_struct);
 968        return 0;
 969}
 970
 971/**
 972 * handle_cmd_rsp: -  Handle responses from commands
 973 * @evt_struct: srp_event_struct to be handled
 974 *
 975 * Used as a callback by when sending scsi cmds.
 976 * Gets called by ibmvscsi_handle_crq()
 977*/
 978static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
 979{
 980        struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
 981        struct scsi_cmnd *cmnd = evt_struct->cmnd;
 982
 983        if (unlikely(rsp->opcode != SRP_RSP)) {
 984                if (printk_ratelimit())
 985                        dev_warn(evt_struct->hostdata->dev,
 986                                 "bad SRP RSP type %d\n", rsp->opcode);
 987        }
 988        
 989        if (cmnd) {
 990                cmnd->result |= rsp->status;
 991                if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
 992                        memcpy(cmnd->sense_buffer,
 993                               rsp->data,
 994                               be32_to_cpu(rsp->sense_data_len));
 995                unmap_cmd_data(&evt_struct->iu.srp.cmd, 
 996                               evt_struct, 
 997                               evt_struct->hostdata->dev);
 998
 999                if (rsp->flags & SRP_RSP_FLAG_DOOVER)
1000                        scsi_set_resid(cmnd,
1001                                       be32_to_cpu(rsp->data_out_res_cnt));
1002                else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
1003                        scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));
1004        }
1005
1006        if (evt_struct->cmnd_done)
1007                evt_struct->cmnd_done(cmnd);
1008}
1009
1010/**
1011 * lun_from_dev: - Returns the lun of the scsi device
1012 * @dev:        struct scsi_device
1013 *
1014*/
1015static inline u16 lun_from_dev(struct scsi_device *dev)
1016{
1017        return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
1018}
1019
1020/**
1021 * ibmvscsi_queue: - The queuecommand function of the scsi template 
1022 * @cmd:        struct scsi_cmnd to be executed
1023 * @done:       Callback function to be called when cmd is completed
1024*/
1025static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
1026                                 void (*done) (struct scsi_cmnd *))
1027{
1028        struct srp_cmd *srp_cmd;
1029        struct srp_event_struct *evt_struct;
1030        struct srp_indirect_buf *indirect;
1031        struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host);
1032        u16 lun = lun_from_dev(cmnd->device);
1033        u8 out_fmt, in_fmt;
1034
1035        cmnd->result = (DID_OK << 16);
1036        evt_struct = get_event_struct(&hostdata->pool);
1037        if (!evt_struct)
1038                return SCSI_MLQUEUE_HOST_BUSY;
1039
1040        /* Set up the actual SRP IU */
1041        srp_cmd = &evt_struct->iu.srp.cmd;
1042        memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
1043        srp_cmd->opcode = SRP_CMD;
1044        memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
1045        srp_cmd->lun = cpu_to_be64(((u64)lun) << 48);
1046
1047        if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
1048                if (!firmware_has_feature(FW_FEATURE_CMO))
1049                        sdev_printk(KERN_ERR, cmnd->device,
1050                                    "couldn't convert cmd to srp_cmd\n");
1051                free_event_struct(&hostdata->pool, evt_struct);
1052                return SCSI_MLQUEUE_HOST_BUSY;
1053        }
1054
1055        init_event_struct(evt_struct,
1056                          handle_cmd_rsp,
1057                          VIOSRP_SRP_FORMAT,
1058                          cmnd->request->timeout/HZ);
1059
1060        evt_struct->cmnd = cmnd;
1061        evt_struct->cmnd_done = done;
1062
1063        /* Fix up dma address of the buffer itself */
1064        indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
1065        out_fmt = srp_cmd->buf_fmt >> 4;
1066        in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
1067        if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
1068             out_fmt == SRP_DATA_DESC_INDIRECT) &&
1069            indirect->table_desc.va == 0) {
1070                indirect->table_desc.va =
1071                        cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +
1072                        offsetof(struct srp_cmd, add_data) +
1073                        offsetof(struct srp_indirect_buf, desc_list));
1074        }
1075
1076        return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
1077}
1078
1079static DEF_SCSI_QCMD(ibmvscsi_queuecommand)
1080
1081/* ------------------------------------------------------------
1082 * Routines for driver initialization
1083 */
1084
1085/**
1086 * map_persist_bufs: - Pre-map persistent data for adapter logins
1087 * @hostdata:   ibmvscsi_host_data of host
1088 *
1089 * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
1090 * Return 1 on error, 0 on success.
1091 */
1092static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
1093{
1094
1095        hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
1096                                             sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
1097
1098        if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
1099                dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
1100                return 1;
1101        }
1102
1103        hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
1104                                                     &hostdata->madapter_info,
1105                                                     sizeof(hostdata->madapter_info),
1106                                                     DMA_BIDIRECTIONAL);
1107        if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
1108                dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
1109                dma_unmap_single(hostdata->dev, hostdata->caps_addr,
1110                                 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
1111                return 1;
1112        }
1113
1114        return 0;
1115}
1116
1117/**
1118 * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
1119 * @hostdata:   ibmvscsi_host_data of host
1120 *
1121 * Unmap the capabilities and adapter info DMA buffers
1122 */
1123static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
1124{
1125        dma_unmap_single(hostdata->dev, hostdata->caps_addr,
1126                         sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
1127
1128        dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
1129                         sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
1130}
1131
1132/**
1133 * login_rsp: - Handle response to SRP login request
1134 * @evt_struct: srp_event_struct with the response
1135 *
1136 * Used as a "done" callback by when sending srp_login. Gets called
1137 * by ibmvscsi_handle_crq()
1138*/
1139static void login_rsp(struct srp_event_struct *evt_struct)
1140{
1141        struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1142        switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
1143        case SRP_LOGIN_RSP:     /* it worked! */
1144                break;
1145        case SRP_LOGIN_REJ:     /* refused! */
1146                dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
1147                         evt_struct->xfer_iu->srp.login_rej.reason);
1148                /* Login failed.  */
1149                atomic_set(&hostdata->request_limit, -1);
1150                return;
1151        default:
1152                dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
1153                        evt_struct->xfer_iu->srp.login_rsp.opcode);
1154                /* Login failed.  */
1155                atomic_set(&hostdata->request_limit, -1);
1156                return;
1157        }
1158
1159        dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
1160        hostdata->client_migrated = 0;
1161
1162        /* Now we know what the real request-limit is.
1163         * This value is set rather than added to request_limit because
1164         * request_limit could have been set to -1 by this client.
1165         */
1166        atomic_set(&hostdata->request_limit,
1167                   be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
1168
1169        /* If we had any pending I/Os, kick them */
1170        scsi_unblock_requests(hostdata->host);
1171}
1172
1173/**
1174 * send_srp_login: - Sends the srp login
1175 * @hostdata:   ibmvscsi_host_data of host
1176 *
1177 * Returns zero if successful.
1178*/
1179static int send_srp_login(struct ibmvscsi_host_data *hostdata)
1180{
1181        int rc;
1182        unsigned long flags;
1183        struct srp_login_req *login;
1184        struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
1185
1186        BUG_ON(!evt_struct);
1187        init_event_struct(evt_struct, login_rsp,
1188                          VIOSRP_SRP_FORMAT, login_timeout);
1189
1190        login = &evt_struct->iu.srp.login_req;
1191        memset(login, 0, sizeof(*login));
1192        login->opcode = SRP_LOGIN_REQ;
1193        login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
1194        login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
1195                                         SRP_BUF_FORMAT_INDIRECT);
1196
1197        spin_lock_irqsave(hostdata->host->host_lock, flags);
1198        /* Start out with a request limit of 0, since this is negotiated in
1199         * the login request we are just sending and login requests always
1200         * get sent by the driver regardless of request_limit.
1201         */
1202        atomic_set(&hostdata->request_limit, 0);
1203
1204        rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
1205        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1206        dev_info(hostdata->dev, "sent SRP login\n");
1207        return rc;
1208};
1209
1210/**
1211 * capabilities_rsp: - Handle response to MAD adapter capabilities request
1212 * @evt_struct: srp_event_struct with the response
1213 *
1214 * Used as a "done" callback by when sending adapter_info.
1215 */
1216static void capabilities_rsp(struct srp_event_struct *evt_struct)
1217{
1218        struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1219
1220        if (evt_struct->xfer_iu->mad.capabilities.common.status) {
1221                dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
1222                        evt_struct->xfer_iu->mad.capabilities.common.status);
1223        } else {
1224                if (hostdata->caps.migration.common.server_support !=
1225                    cpu_to_be16(SERVER_SUPPORTS_CAP))
1226                        dev_info(hostdata->dev, "Partition migration not supported\n");
1227
1228                if (client_reserve) {
1229                        if (hostdata->caps.reserve.common.server_support ==
1230                            cpu_to_be16(SERVER_SUPPORTS_CAP))
1231                                dev_info(hostdata->dev, "Client reserve enabled\n");
1232                        else
1233                                dev_info(hostdata->dev, "Client reserve not supported\n");
1234                }
1235        }
1236
1237        send_srp_login(hostdata);
1238}
1239
1240/**
1241 * send_mad_capabilities: - Sends the mad capabilities request
1242 *      and stores the result so it can be retrieved with
1243 * @hostdata:   ibmvscsi_host_data of host
1244 */
1245static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
1246{
1247        struct viosrp_capabilities *req;
1248        struct srp_event_struct *evt_struct;
1249        unsigned long flags;
1250        struct device_node *of_node = hostdata->dev->of_node;
1251        const char *location;
1252
1253        evt_struct = get_event_struct(&hostdata->pool);
1254        BUG_ON(!evt_struct);
1255
1256        init_event_struct(evt_struct, capabilities_rsp,
1257                          VIOSRP_MAD_FORMAT, info_timeout);
1258
1259        req = &evt_struct->iu.mad.capabilities;
1260        memset(req, 0, sizeof(*req));
1261
1262        hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);
1263        if (hostdata->client_migrated)
1264                hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
1265
1266        strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
1267                sizeof(hostdata->caps.name));
1268        hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
1269
1270        location = of_get_property(of_node, "ibm,loc-code", NULL);
1271        location = location ? location : dev_name(hostdata->dev);
1272        strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
1273        hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
1274
1275        req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
1276        req->buffer = cpu_to_be64(hostdata->caps_addr);
1277
1278        hostdata->caps.migration.common.cap_type =
1279                                cpu_to_be32(MIGRATION_CAPABILITIES);
1280        hostdata->caps.migration.common.length =
1281                                cpu_to_be16(sizeof(hostdata->caps.migration));
1282        hostdata->caps.migration.common.server_support =
1283                                cpu_to_be16(SERVER_SUPPORTS_CAP);
1284        hostdata->caps.migration.ecl = cpu_to_be32(1);
1285
1286        if (client_reserve) {
1287                hostdata->caps.reserve.common.cap_type =
1288                                        cpu_to_be32(RESERVATION_CAPABILITIES);
1289                hostdata->caps.reserve.common.length =
1290                                cpu_to_be16(sizeof(hostdata->caps.reserve));
1291                hostdata->caps.reserve.common.server_support =
1292                                cpu_to_be16(SERVER_SUPPORTS_CAP);
1293                hostdata->caps.reserve.type =
1294                                cpu_to_be32(CLIENT_RESERVE_SCSI_2);
1295                req->common.length =
1296                                cpu_to_be16(sizeof(hostdata->caps));
1297        } else
1298                req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
1299                                                sizeof(hostdata->caps.reserve));
1300
1301        spin_lock_irqsave(hostdata->host->host_lock, flags);
1302        if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1303                dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
1304        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1305};
1306
1307/**
1308 * fast_fail_rsp: - Handle response to MAD enable fast fail
1309 * @evt_struct: srp_event_struct with the response
1310 *
1311 * Used as a "done" callback by when sending enable fast fail. Gets called
1312 * by ibmvscsi_handle_crq()
1313 */
1314static void fast_fail_rsp(struct srp_event_struct *evt_struct)
1315{
1316        struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1317        u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);
1318
1319        if (status == VIOSRP_MAD_NOT_SUPPORTED)
1320                dev_err(hostdata->dev, "fast_fail not supported in server\n");
1321        else if (status == VIOSRP_MAD_FAILED)
1322                dev_err(hostdata->dev, "fast_fail request failed\n");
1323        else if (status != VIOSRP_MAD_SUCCESS)
1324                dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
1325
1326        send_mad_capabilities(hostdata);
1327}
1328
1329/**
1330 * init_host - Start host initialization
1331 * @hostdata:   ibmvscsi_host_data of host
1332 *
1333 * Returns zero if successful.
1334 */
1335static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
1336{
1337        int rc;
1338        unsigned long flags;
1339        struct viosrp_fast_fail *fast_fail_mad;
1340        struct srp_event_struct *evt_struct;
1341
1342        if (!fast_fail) {
1343                send_mad_capabilities(hostdata);
1344                return 0;
1345        }
1346
1347        evt_struct = get_event_struct(&hostdata->pool);
1348        BUG_ON(!evt_struct);
1349
1350        init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
1351
1352        fast_fail_mad = &evt_struct->iu.mad.fast_fail;
1353        memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
1354        fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL);
1355        fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));
1356
1357        spin_lock_irqsave(hostdata->host->host_lock, flags);
1358        rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1359        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1360        return rc;
1361}
1362
1363/**
1364 * adapter_info_rsp: - Handle response to MAD adapter info request
1365 * @evt_struct: srp_event_struct with the response
1366 *
1367 * Used as a "done" callback by when sending adapter_info. Gets called
1368 * by ibmvscsi_handle_crq()
1369*/
1370static void adapter_info_rsp(struct srp_event_struct *evt_struct)
1371{
1372        struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1373
1374        if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
1375                dev_err(hostdata->dev, "error %d getting adapter info\n",
1376                        evt_struct->xfer_iu->mad.adapter_info.common.status);
1377        } else {
1378                dev_info(hostdata->dev, "host srp version: %s, "
1379                         "host partition %s (%d), OS %d, max io %u\n",
1380                         hostdata->madapter_info.srp_version,
1381                         hostdata->madapter_info.partition_name,
1382                         be32_to_cpu(hostdata->madapter_info.partition_number),
1383                         be32_to_cpu(hostdata->madapter_info.os_type),
1384                         be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));
1385                
1386                if (hostdata->madapter_info.port_max_txu[0]) 
1387                        hostdata->host->max_sectors = 
1388                                be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
1389                
1390                if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 &&
1391                    strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
1392                        dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
1393                                hostdata->madapter_info.srp_version);
1394                        dev_err(hostdata->dev, "limiting scatterlists to %d\n",
1395                                MAX_INDIRECT_BUFS);
1396                        hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
1397                }
1398
1399                if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) {
1400                        enable_fast_fail(hostdata);
1401                        return;
1402                }
1403        }
1404
1405        send_srp_login(hostdata);
1406}
1407
1408/**
1409 * send_mad_adapter_info: - Sends the mad adapter info request
1410 *      and stores the result so it can be retrieved with
1411 *      sysfs.  We COULD consider causing a failure if the
1412 *      returned SRP version doesn't match ours.
1413 * @hostdata:   ibmvscsi_host_data of host
1414 * 
1415 * Returns zero if successful.
1416*/
1417static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
1418{
1419        struct viosrp_adapter_info *req;
1420        struct srp_event_struct *evt_struct;
1421        unsigned long flags;
1422
1423        evt_struct = get_event_struct(&hostdata->pool);
1424        BUG_ON(!evt_struct);
1425
1426        init_event_struct(evt_struct,
1427                          adapter_info_rsp,
1428                          VIOSRP_MAD_FORMAT,
1429                          info_timeout);
1430        
1431        req = &evt_struct->iu.mad.adapter_info;
1432        memset(req, 0x00, sizeof(*req));
1433        
1434        req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
1435        req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
1436        req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
1437
1438        spin_lock_irqsave(hostdata->host->host_lock, flags);
1439        if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1440                dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
1441        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1442};
1443
1444/**
1445 * init_adapter: Start virtual adapter initialization sequence
1446 *
1447 */
1448static void init_adapter(struct ibmvscsi_host_data *hostdata)
1449{
1450        send_mad_adapter_info(hostdata);
1451}
1452
1453/**
1454 * sync_completion: Signal that a synchronous command has completed
1455 * Note that after returning from this call, the evt_struct is freed.
1456 * the caller waiting on this completion shouldn't touch the evt_struct
1457 * again.
1458 */
1459static void sync_completion(struct srp_event_struct *evt_struct)
1460{
1461        /* copy the response back */
1462        if (evt_struct->sync_srp)
1463                *evt_struct->sync_srp = *evt_struct->xfer_iu;
1464        
1465        complete(&evt_struct->comp);
1466}
1467
1468/**
1469 * ibmvscsi_abort: Abort a command...from scsi host template
1470 * send this over to the server and wait synchronously for the response
1471 */
1472static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1473{
1474        struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1475        struct srp_tsk_mgmt *tsk_mgmt;
1476        struct srp_event_struct *evt;
1477        struct srp_event_struct *tmp_evt, *found_evt;
1478        union viosrp_iu srp_rsp;
1479        int rsp_rc;
1480        unsigned long flags;
1481        u16 lun = lun_from_dev(cmd->device);
1482        unsigned long wait_switch = 0;
1483
1484        /* First, find this command in our sent list so we can figure
1485         * out the correct tag
1486         */
1487        spin_lock_irqsave(hostdata->host->host_lock, flags);
1488        wait_switch = jiffies + (init_timeout * HZ);
1489        do {
1490                found_evt = NULL;
1491                list_for_each_entry(tmp_evt, &hostdata->sent, list) {
1492                        if (tmp_evt->cmnd == cmd) {
1493                                found_evt = tmp_evt;
1494                                break;
1495                        }
1496                }
1497
1498                if (!found_evt) {
1499                        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1500                        return SUCCESS;
1501                }
1502
1503                evt = get_event_struct(&hostdata->pool);
1504                if (evt == NULL) {
1505                        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1506                        sdev_printk(KERN_ERR, cmd->device,
1507                                "failed to allocate abort event\n");
1508                        return FAILED;
1509                }
1510        
1511                init_event_struct(evt,
1512                                  sync_completion,
1513                                  VIOSRP_SRP_FORMAT,
1514                                  abort_timeout);
1515
1516                tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1517        
1518                /* Set up an abort SRP command */
1519                memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1520                tsk_mgmt->opcode = SRP_TSK_MGMT;
1521                tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
1522                tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
1523                tsk_mgmt->task_tag = (u64) found_evt;
1524
1525                evt->sync_srp = &srp_rsp;
1526
1527                init_completion(&evt->comp);
1528                rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
1529
1530                if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1531                        break;
1532
1533                spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1534                msleep(10);
1535                spin_lock_irqsave(hostdata->host->host_lock, flags);
1536        } while (time_before(jiffies, wait_switch));
1537
1538        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1539
1540        if (rsp_rc != 0) {
1541                sdev_printk(KERN_ERR, cmd->device,
1542                            "failed to send abort() event. rc=%d\n", rsp_rc);
1543                return FAILED;
1544        }
1545
1546        sdev_printk(KERN_INFO, cmd->device,
1547                    "aborting command. lun 0x%llx, tag 0x%llx\n",
1548                    (((u64) lun) << 48), (u64) found_evt);
1549
1550        wait_for_completion(&evt->comp);
1551
1552        /* make sure we got a good response */
1553        if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1554                if (printk_ratelimit())
1555                        sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
1556                                    srp_rsp.srp.rsp.opcode);
1557                return FAILED;
1558        }
1559
1560        if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1561                rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1562        else
1563                rsp_rc = srp_rsp.srp.rsp.status;
1564
1565        if (rsp_rc) {
1566                if (printk_ratelimit())
1567                        sdev_printk(KERN_WARNING, cmd->device,
1568                                    "abort code %d for task tag 0x%llx\n",
1569                                    rsp_rc, tsk_mgmt->task_tag);
1570                return FAILED;
1571        }
1572
1573        /* Because we dropped the spinlock above, it's possible
1574         * The event is no longer in our list.  Make sure it didn't
1575         * complete while we were aborting
1576         */
1577        spin_lock_irqsave(hostdata->host->host_lock, flags);
1578        found_evt = NULL;
1579        list_for_each_entry(tmp_evt, &hostdata->sent, list) {
1580                if (tmp_evt->cmnd == cmd) {
1581                        found_evt = tmp_evt;
1582                        break;
1583                }
1584        }
1585
1586        if (found_evt == NULL) {
1587                spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1588                sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n",
1589                            tsk_mgmt->task_tag);
1590                return SUCCESS;
1591        }
1592
1593        sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n",
1594                    tsk_mgmt->task_tag);
1595
1596        cmd->result = (DID_ABORT << 16);
1597        list_del(&found_evt->list);
1598        unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
1599                       found_evt->hostdata->dev);
1600        free_event_struct(&found_evt->hostdata->pool, found_evt);
1601        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1602        atomic_inc(&hostdata->request_limit);
1603        return SUCCESS;
1604}
1605
1606/**
1607 * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host 
1608 * template send this over to the server and wait synchronously for the 
1609 * response
1610 */
1611static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1612{
1613        struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1614        struct srp_tsk_mgmt *tsk_mgmt;
1615        struct srp_event_struct *evt;
1616        struct srp_event_struct *tmp_evt, *pos;
1617        union viosrp_iu srp_rsp;
1618        int rsp_rc;
1619        unsigned long flags;
1620        u16 lun = lun_from_dev(cmd->device);
1621        unsigned long wait_switch = 0;
1622
1623        spin_lock_irqsave(hostdata->host->host_lock, flags);
1624        wait_switch = jiffies + (init_timeout * HZ);
1625        do {
1626                evt = get_event_struct(&hostdata->pool);
1627                if (evt == NULL) {
1628                        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1629                        sdev_printk(KERN_ERR, cmd->device,
1630                                "failed to allocate reset event\n");
1631                        return FAILED;
1632                }
1633        
1634                init_event_struct(evt,
1635                                  sync_completion,
1636                                  VIOSRP_SRP_FORMAT,
1637                                  reset_timeout);
1638
1639                tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1640
1641                /* Set up a lun reset SRP command */
1642                memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1643                tsk_mgmt->opcode = SRP_TSK_MGMT;
1644                tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
1645                tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
1646
1647                evt->sync_srp = &srp_rsp;
1648
1649                init_completion(&evt->comp);
1650                rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
1651
1652                if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1653                        break;
1654
1655                spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1656                msleep(10);
1657                spin_lock_irqsave(hostdata->host->host_lock, flags);
1658        } while (time_before(jiffies, wait_switch));
1659
1660        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1661
1662        if (rsp_rc != 0) {
1663                sdev_printk(KERN_ERR, cmd->device,
1664                            "failed to send reset event. rc=%d\n", rsp_rc);
1665                return FAILED;
1666        }
1667
1668        sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n",
1669                    (((u64) lun) << 48));
1670
1671        wait_for_completion(&evt->comp);
1672
1673        /* make sure we got a good response */
1674        if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1675                if (printk_ratelimit())
1676                        sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
1677                                    srp_rsp.srp.rsp.opcode);
1678                return FAILED;
1679        }
1680
1681        if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1682                rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1683        else
1684                rsp_rc = srp_rsp.srp.rsp.status;
1685
1686        if (rsp_rc) {
1687                if (printk_ratelimit())
1688                        sdev_printk(KERN_WARNING, cmd->device,
1689                                    "reset code %d for task tag 0x%llx\n",
1690                                    rsp_rc, tsk_mgmt->task_tag);
1691                return FAILED;
1692        }
1693
1694        /* We need to find all commands for this LUN that have not yet been
1695         * responded to, and fail them with DID_RESET
1696         */
1697        spin_lock_irqsave(hostdata->host->host_lock, flags);
1698        list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
1699                if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
1700                        if (tmp_evt->cmnd)
1701                                tmp_evt->cmnd->result = (DID_RESET << 16);
1702                        list_del(&tmp_evt->list);
1703                        unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
1704                                       tmp_evt->hostdata->dev);
1705                        free_event_struct(&tmp_evt->hostdata->pool,
1706                                                   tmp_evt);
1707                        atomic_inc(&hostdata->request_limit);
1708                        if (tmp_evt->cmnd_done)
1709                                tmp_evt->cmnd_done(tmp_evt->cmnd);
1710                        else if (tmp_evt->done)
1711                                tmp_evt->done(tmp_evt);
1712                }
1713        }
1714        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1715        return SUCCESS;
1716}
1717
1718/**
1719 * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
1720 * @cmd:        struct scsi_cmnd having problems
1721*/
1722static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
1723{
1724        unsigned long wait_switch = 0;
1725        struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1726
1727        dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
1728
1729        ibmvscsi_reset_host(hostdata);
1730
1731        for (wait_switch = jiffies + (init_timeout * HZ);
1732             time_before(jiffies, wait_switch) &&
1733                     atomic_read(&hostdata->request_limit) < 2;) {
1734
1735                msleep(10);
1736        }
1737
1738        if (atomic_read(&hostdata->request_limit) <= 0)
1739                return FAILED;
1740
1741        return SUCCESS;
1742}
1743
1744/**
1745 * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
1746 * @crq:        Command/Response queue
1747 * @hostdata:   ibmvscsi_host_data of host
1748 *
1749*/
1750static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1751                                struct ibmvscsi_host_data *hostdata)
1752{
1753        long rc;
1754        unsigned long flags;
1755        /* The hypervisor copies our tag value here so no byteswapping */
1756        struct srp_event_struct *evt_struct =
1757                        (__force struct srp_event_struct *)crq->IU_data_ptr;
1758        switch (crq->valid) {
1759        case 0xC0:              /* initialization */
1760                switch (crq->format) {
1761                case 0x01:      /* Initialization message */
1762                        dev_info(hostdata->dev, "partner initialized\n");
1763                        /* Send back a response */
1764                        rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
1765                        if (rc == 0) {
1766                                /* Now login */
1767                                init_adapter(hostdata);
1768                        } else {
1769                                dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
1770                        }
1771
1772                        break;
1773                case 0x02:      /* Initialization response */
1774                        dev_info(hostdata->dev, "partner initialization complete\n");
1775
1776                        /* Now login */
1777                        init_adapter(hostdata);
1778                        break;
1779                default:
1780                        dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
1781                }
1782                return;
1783        case 0xFF:      /* Hypervisor telling us the connection is closed */
1784                scsi_block_requests(hostdata->host);
1785                atomic_set(&hostdata->request_limit, 0);
1786                if (crq->format == 0x06) {
1787                        /* We need to re-setup the interpartition connection */
1788                        dev_info(hostdata->dev, "Re-enabling adapter!\n");
1789                        hostdata->client_migrated = 1;
1790                        hostdata->reenable_crq = 1;
1791                        purge_requests(hostdata, DID_REQUEUE);
1792                        wake_up(&hostdata->work_wait_q);
1793                } else {
1794                        dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
1795                                crq->format);
1796                        ibmvscsi_reset_host(hostdata);
1797                }
1798                return;
1799        case 0x80:              /* real payload */
1800                break;
1801        default:
1802                dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
1803                        crq->valid);
1804                return;
1805        }
1806
1807        /* The only kind of payload CRQs we should get are responses to
1808         * things we send. Make sure this response is to something we
1809         * actually sent
1810         */
1811        if (!valid_event_struct(&hostdata->pool, evt_struct)) {
1812                dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
1813                       evt_struct);
1814                return;
1815        }
1816
1817        if (atomic_read(&evt_struct->free)) {
1818                dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
1819                        evt_struct);
1820                return;
1821        }
1822
1823        if (crq->format == VIOSRP_SRP_FORMAT)
1824                atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
1825                           &hostdata->request_limit);
1826
1827        del_timer(&evt_struct->timer);
1828
1829        if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd)
1830                evt_struct->cmnd->result = DID_ERROR << 16;
1831        if (evt_struct->done)
1832                evt_struct->done(evt_struct);
1833        else
1834                dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
1835
1836        /*
1837         * Lock the host_lock before messing with these structures, since we
1838         * are running in a task context
1839         */
1840        spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
1841        list_del(&evt_struct->list);
1842        free_event_struct(&evt_struct->hostdata->pool, evt_struct);
1843        spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
1844}
1845
1846/**
1847 * ibmvscsi_get_host_config: Send the command to the server to get host
1848 * configuration data.  The data is opaque to us.
1849 */
1850static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1851                                   unsigned char *buffer, int length)
1852{
1853        struct viosrp_host_config *host_config;
1854        struct srp_event_struct *evt_struct;
1855        unsigned long flags;
1856        dma_addr_t addr;
1857        int rc;
1858
1859        evt_struct = get_event_struct(&hostdata->pool);
1860        if (!evt_struct) {
1861                dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
1862                return -1;
1863        }
1864
1865        init_event_struct(evt_struct,
1866                          sync_completion,
1867                          VIOSRP_MAD_FORMAT,
1868                          info_timeout);
1869
1870        host_config = &evt_struct->iu.mad.host_config;
1871
1872        /* The transport length field is only 16-bit */
1873        length = min(0xffff, length);
1874
1875        /* Set up a lun reset SRP command */
1876        memset(host_config, 0x00, sizeof(*host_config));
1877        host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE);
1878        host_config->common.length = cpu_to_be16(length);
1879        addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL);
1880
1881        if (dma_mapping_error(hostdata->dev, addr)) {
1882                if (!firmware_has_feature(FW_FEATURE_CMO))
1883                        dev_err(hostdata->dev,
1884                                "dma_mapping error getting host config\n");
1885                free_event_struct(&hostdata->pool, evt_struct);
1886                return -1;
1887        }
1888
1889        host_config->buffer = cpu_to_be64(addr);
1890
1891        init_completion(&evt_struct->comp);
1892        spin_lock_irqsave(hostdata->host->host_lock, flags);
1893        rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1894        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1895        if (rc == 0)
1896                wait_for_completion(&evt_struct->comp);
1897        dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
1898
1899        return rc;
1900}
1901
1902/**
1903 * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
1904 * @sdev:       struct scsi_device device to configure
1905 *
1906 * Enable allow_restart for a device if it is a disk.  Adjust the
1907 * queue_depth here also as is required by the documentation for
1908 * struct scsi_host_template.
1909 */
1910static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1911{
1912        struct Scsi_Host *shost = sdev->host;
1913        unsigned long lock_flags = 0;
1914
1915        spin_lock_irqsave(shost->host_lock, lock_flags);
1916        if (sdev->type == TYPE_DISK) {
1917                sdev->allow_restart = 1;
1918                blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
1919        }
1920        spin_unlock_irqrestore(shost->host_lock, lock_flags);
1921        scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1922        return 0;
1923}
1924
1925/**
1926 * ibmvscsi_change_queue_depth - Change the device's queue depth
1927 * @sdev:       scsi device struct
1928 * @qdepth:     depth to set
1929 * @reason:     calling context
1930 *
1931 * Return value:
1932 *      actual depth set
1933 **/
1934static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth,
1935                                       int reason)
1936{
1937        if (reason != SCSI_QDEPTH_DEFAULT)
1938                return -EOPNOTSUPP;
1939
1940        if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
1941                qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
1942
1943        scsi_adjust_queue_depth(sdev, 0, qdepth);
1944        return sdev->queue_depth;
1945}
1946
1947/* ------------------------------------------------------------
1948 * sysfs attributes
1949 */
1950static ssize_t show_host_vhost_loc(struct device *dev,
1951                                   struct device_attribute *attr, char *buf)
1952{
1953        struct Scsi_Host *shost = class_to_shost(dev);
1954        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1955        int len;
1956
1957        len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
1958                       hostdata->caps.loc);
1959        return len;
1960}
1961
1962static struct device_attribute ibmvscsi_host_vhost_loc = {
1963        .attr = {
1964                 .name = "vhost_loc",
1965                 .mode = S_IRUGO,
1966                 },
1967        .show = show_host_vhost_loc,
1968};
1969
1970static ssize_t show_host_vhost_name(struct device *dev,
1971                                    struct device_attribute *attr, char *buf)
1972{
1973        struct Scsi_Host *shost = class_to_shost(dev);
1974        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1975        int len;
1976
1977        len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
1978                       hostdata->caps.name);
1979        return len;
1980}
1981
1982static struct device_attribute ibmvscsi_host_vhost_name = {
1983        .attr = {
1984                 .name = "vhost_name",
1985                 .mode = S_IRUGO,
1986                 },
1987        .show = show_host_vhost_name,
1988};
1989
1990static ssize_t show_host_srp_version(struct device *dev,
1991                                     struct device_attribute *attr, char *buf)
1992{
1993        struct Scsi_Host *shost = class_to_shost(dev);
1994        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1995        int len;
1996
1997        len = snprintf(buf, PAGE_SIZE, "%s\n",
1998                       hostdata->madapter_info.srp_version);
1999        return len;
2000}
2001
2002static struct device_attribute ibmvscsi_host_srp_version = {
2003        .attr = {
2004                 .name = "srp_version",
2005                 .mode = S_IRUGO,
2006                 },
2007        .show = show_host_srp_version,
2008};
2009
2010static ssize_t show_host_partition_name(struct device *dev,
2011                                        struct device_attribute *attr,
2012                                        char *buf)
2013{
2014        struct Scsi_Host *shost = class_to_shost(dev);
2015        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
2016        int len;
2017
2018        len = snprintf(buf, PAGE_SIZE, "%s\n",
2019                       hostdata->madapter_info.partition_name);
2020        return len;
2021}
2022
2023static struct device_attribute ibmvscsi_host_partition_name = {
2024        .attr = {
2025                 .name = "partition_name",
2026                 .mode = S_IRUGO,
2027                 },
2028        .show = show_host_partition_name,
2029};
2030
2031static ssize_t show_host_partition_number(struct device *dev,
2032                                          struct device_attribute *attr,
2033                                          char *buf)
2034{
2035        struct Scsi_Host *shost = class_to_shost(dev);
2036        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
2037        int len;
2038
2039        len = snprintf(buf, PAGE_SIZE, "%d\n",
2040                       hostdata->madapter_info.partition_number);
2041        return len;
2042}
2043
2044static struct device_attribute ibmvscsi_host_partition_number = {
2045        .attr = {
2046                 .name = "partition_number",
2047                 .mode = S_IRUGO,
2048                 },
2049        .show = show_host_partition_number,
2050};
2051
2052static ssize_t show_host_mad_version(struct device *dev,
2053                                     struct device_attribute *attr, char *buf)
2054{
2055        struct Scsi_Host *shost = class_to_shost(dev);
2056        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
2057        int len;
2058
2059        len = snprintf(buf, PAGE_SIZE, "%d\n",
2060                       hostdata->madapter_info.mad_version);
2061        return len;
2062}
2063
2064static struct device_attribute ibmvscsi_host_mad_version = {
2065        .attr = {
2066                 .name = "mad_version",
2067                 .mode = S_IRUGO,
2068                 },
2069        .show = show_host_mad_version,
2070};
2071
2072static ssize_t show_host_os_type(struct device *dev,
2073                                 struct device_attribute *attr, char *buf)
2074{
2075        struct Scsi_Host *shost = class_to_shost(dev);
2076        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
2077        int len;
2078
2079        len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
2080        return len;
2081}
2082
2083static struct device_attribute ibmvscsi_host_os_type = {
2084        .attr = {
2085                 .name = "os_type",
2086                 .mode = S_IRUGO,
2087                 },
2088        .show = show_host_os_type,
2089};
2090
2091static ssize_t show_host_config(struct device *dev,
2092                                struct device_attribute *attr, char *buf)
2093{
2094        struct Scsi_Host *shost = class_to_shost(dev);
2095        struct ibmvscsi_host_data *hostdata = shost_priv(shost);
2096
2097        /* returns null-terminated host config data */
2098        if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
2099                return strlen(buf);
2100        else
2101                return 0;
2102}
2103
2104static struct device_attribute ibmvscsi_host_config = {
2105        .attr = {
2106                 .name = "config",
2107                 .mode = S_IRUGO,
2108                 },
2109        .show = show_host_config,
2110};
2111
2112static struct device_attribute *ibmvscsi_attrs[] = {
2113        &ibmvscsi_host_vhost_loc,
2114        &ibmvscsi_host_vhost_name,
2115        &ibmvscsi_host_srp_version,
2116        &ibmvscsi_host_partition_name,
2117        &ibmvscsi_host_partition_number,
2118        &ibmvscsi_host_mad_version,
2119        &ibmvscsi_host_os_type,
2120        &ibmvscsi_host_config,
2121        NULL
2122};
2123
2124/* ------------------------------------------------------------
2125 * SCSI driver registration
2126 */
2127static struct scsi_host_template driver_template = {
2128        .module = THIS_MODULE,
2129        .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
2130        .proc_name = "ibmvscsi",
2131        .queuecommand = ibmvscsi_queuecommand,
2132        .eh_abort_handler = ibmvscsi_eh_abort_handler,
2133        .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
2134        .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
2135        .slave_configure = ibmvscsi_slave_configure,
2136        .change_queue_depth = ibmvscsi_change_queue_depth,
2137        .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
2138        .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
2139        .this_id = -1,
2140        .sg_tablesize = SG_ALL,
2141        .use_clustering = ENABLE_CLUSTERING,
2142        .shost_attrs = ibmvscsi_attrs,
2143};
2144
2145/**
2146 * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver
2147 *
2148 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
2149 *
2150 * Return value:
2151 *      Number of bytes of IO data the driver will need to perform well.
2152 */
2153static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
2154{
2155        /* iu_storage data allocated in initialize_event_pool */
2156        unsigned long desired_io = max_events * sizeof(union viosrp_iu);
2157
2158        /* add io space for sg data */
2159        desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
2160                             IBMVSCSI_CMDS_PER_LUN_DEFAULT);
2161
2162        return desired_io;
2163}
2164
2165static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
2166{
2167        int rc;
2168        char *action = "reset";
2169
2170        if (hostdata->reset_crq) {
2171                smp_rmb();
2172                hostdata->reset_crq = 0;
2173
2174                rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
2175                if (!rc)
2176                        rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
2177                vio_enable_interrupts(to_vio_dev(hostdata->dev));
2178        } else if (hostdata->reenable_crq) {
2179                smp_rmb();
2180                action = "enable";
2181                rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
2182                hostdata->reenable_crq = 0;
2183                if (!rc)
2184                        rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
2185        } else
2186                return;
2187
2188        if (rc) {
2189                atomic_set(&hostdata->request_limit, -1);
2190                dev_err(hostdata->dev, "error after %s\n", action);
2191        }
2192
2193        scsi_unblock_requests(hostdata->host);
2194}
2195
2196static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
2197{
2198        if (kthread_should_stop())
2199                return 1;
2200        else if (hostdata->reset_crq) {
2201                smp_rmb();
2202                return 1;
2203        } else if (hostdata->reenable_crq) {
2204                smp_rmb();
2205                return 1;
2206        }
2207
2208        return 0;
2209}
2210
2211static int ibmvscsi_work(void *data)
2212{
2213        struct ibmvscsi_host_data *hostdata = data;
2214        int rc;
2215
2216        set_user_nice(current, -20);
2217
2218        while (1) {
2219                rc = wait_event_interruptible(hostdata->work_wait_q,
2220                                              ibmvscsi_work_to_do(hostdata));
2221
2222                BUG_ON(rc);
2223
2224                if (kthread_should_stop())
2225                        break;
2226
2227                ibmvscsi_do_work(hostdata);
2228        }
2229
2230        return 0;
2231}
2232
2233/**
2234 * Called by bus code for each adapter
2235 */
2236static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2237{
2238        struct ibmvscsi_host_data *hostdata;
2239        struct Scsi_Host *host;
2240        struct device *dev = &vdev->dev;
2241        struct srp_rport_identifiers ids;
2242        struct srp_rport *rport;
2243        unsigned long wait_switch = 0;
2244        int rc;
2245
2246        dev_set_drvdata(&vdev->dev, NULL);
2247
2248        host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
2249        if (!host) {
2250                dev_err(&vdev->dev, "couldn't allocate host data\n");
2251                goto scsi_host_alloc_failed;
2252        }
2253
2254        host->transportt = ibmvscsi_transport_template;
2255        hostdata = shost_priv(host);
2256        memset(hostdata, 0x00, sizeof(*hostdata));
2257        INIT_LIST_HEAD(&hostdata->sent);
2258        init_waitqueue_head(&hostdata->work_wait_q);
2259        hostdata->host = host;
2260        hostdata->dev = dev;
2261        atomic_set(&hostdata->request_limit, -1);
2262        hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
2263
2264        if (map_persist_bufs(hostdata)) {
2265                dev_err(&vdev->dev, "couldn't map persistent buffers\n");
2266                goto persist_bufs_failed;
2267        }
2268
2269        hostdata->work_thread = kthread_run(ibmvscsi_work, hostdata, "%s_%d",
2270                                            "ibmvscsi", host->host_no);
2271
2272        if (IS_ERR(hostdata->work_thread)) {
2273                dev_err(&vdev->dev, "couldn't initialize kthread. rc=%ld\n",
2274                        PTR_ERR(hostdata->work_thread));
2275                goto init_crq_failed;
2276        }
2277
2278        rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);
2279        if (rc != 0 && rc != H_RESOURCE) {
2280                dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
2281                goto kill_kthread;
2282        }
2283        if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
2284                dev_err(&vdev->dev, "couldn't initialize event pool\n");
2285                goto init_pool_failed;
2286        }
2287
2288        host->max_lun = 8;
2289        host->max_id = max_id;
2290        host->max_channel = max_channel;
2291        host->max_cmd_len = 16;
2292
2293        if (scsi_add_host(hostdata->host, hostdata->dev))
2294                goto add_host_failed;
2295
2296        /* we don't have a proper target_port_id so let's use the fake one */
2297        memcpy(ids.port_id, hostdata->madapter_info.partition_name,
2298               sizeof(ids.port_id));
2299        ids.roles = SRP_RPORT_ROLE_TARGET;
2300        rport = srp_rport_add(host, &ids);
2301        if (IS_ERR(rport))
2302                goto add_srp_port_failed;
2303
2304        /* Try to send an initialization message.  Note that this is allowed
2305         * to fail if the other end is not acive.  In that case we don't
2306         * want to scan
2307         */
2308        if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
2309            || rc == H_RESOURCE) {
2310                /*
2311                 * Wait around max init_timeout secs for the adapter to finish
2312                 * initializing. When we are done initializing, we will have a
2313                 * valid request_limit.  We don't want Linux scanning before
2314                 * we are ready.
2315                 */
2316                for (wait_switch = jiffies + (init_timeout * HZ);
2317                     time_before(jiffies, wait_switch) &&
2318                     atomic_read(&hostdata->request_limit) < 2;) {
2319
2320                        msleep(10);
2321                }
2322
2323                /* if we now have a valid request_limit, initiate a scan */
2324                if (atomic_read(&hostdata->request_limit) > 0)
2325                        scsi_scan_host(host);
2326        }
2327
2328        dev_set_drvdata(&vdev->dev, hostdata);
2329        return 0;
2330
2331      add_srp_port_failed:
2332        scsi_remove_host(hostdata->host);
2333      add_host_failed:
2334        release_event_pool(&hostdata->pool, hostdata);
2335      init_pool_failed:
2336        ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);
2337      kill_kthread:
2338      kthread_stop(hostdata->work_thread);
2339      init_crq_failed:
2340        unmap_persist_bufs(hostdata);
2341      persist_bufs_failed:
2342        scsi_host_put(host);
2343      scsi_host_alloc_failed:
2344        return -1;
2345}
2346
2347static int ibmvscsi_remove(struct vio_dev *vdev)
2348{
2349        struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
2350        unmap_persist_bufs(hostdata);
2351        release_event_pool(&hostdata->pool, hostdata);
2352        ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
2353                                        max_events);
2354
2355        kthread_stop(hostdata->work_thread);
2356        srp_remove_host(hostdata->host);
2357        scsi_remove_host(hostdata->host);
2358        scsi_host_put(hostdata->host);
2359
2360        return 0;
2361}
2362
2363/**
2364 * ibmvscsi_resume: Resume from suspend
2365 * @dev:        device struct
2366 *
2367 * We may have lost an interrupt across suspend/resume, so kick the
2368 * interrupt handler
2369 */
2370static int ibmvscsi_resume(struct device *dev)
2371{
2372        struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
2373        vio_disable_interrupts(to_vio_dev(hostdata->dev));
2374        tasklet_schedule(&hostdata->srp_task);
2375
2376        return 0;
2377}
2378
2379/**
2380 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 
2381 * support.
2382 */
2383static struct vio_device_id ibmvscsi_device_table[] = {
2384        {"vscsi", "IBM,v-scsi"},
2385        { "", "" }
2386};
2387MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
2388
2389static struct dev_pm_ops ibmvscsi_pm_ops = {
2390        .resume = ibmvscsi_resume
2391};
2392
2393static struct vio_driver ibmvscsi_driver = {
2394        .id_table = ibmvscsi_device_table,
2395        .probe = ibmvscsi_probe,
2396        .remove = ibmvscsi_remove,
2397        .get_desired_dma = ibmvscsi_get_desired_dma,
2398        .name = "ibmvscsi",
2399        .pm = &ibmvscsi_pm_ops,
2400};
2401
2402static struct srp_function_template ibmvscsi_transport_functions = {
2403};
2404
2405int __init ibmvscsi_module_init(void)
2406{
2407        int ret;
2408
2409        /* Ensure we have two requests to do error recovery */
2410        driver_template.can_queue = max_requests;
2411        max_events = max_requests + 2;
2412
2413        if (!firmware_has_feature(FW_FEATURE_VIO))
2414                return -ENODEV;
2415
2416        ibmvscsi_transport_template =
2417                srp_attach_transport(&ibmvscsi_transport_functions);
2418        if (!ibmvscsi_transport_template)
2419                return -ENOMEM;
2420
2421        ret = vio_register_driver(&ibmvscsi_driver);
2422        if (ret)
2423                srp_release_transport(ibmvscsi_transport_template);
2424        return ret;
2425}
2426
2427void __exit ibmvscsi_module_exit(void)
2428{
2429        vio_unregister_driver(&ibmvscsi_driver);
2430        srp_release_transport(ibmvscsi_transport_template);
2431}
2432
2433module_init(ibmvscsi_module_init);
2434module_exit(ibmvscsi_module_exit);
2435