linux/drivers/staging/unisys/visorhba/visorhba_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012 - 2015 UNISYS CORPORATION
   3 * All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or (at
   8 * your option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13 * NON INFRINGEMENT.  See the GNU General Public License for more
  14 * details.
  15 */
  16
  17#include <linux/debugfs.h>
  18#include <linux/kthread.h>
  19#include <linux/idr.h>
  20#include <linux/module.h>
  21#include <linux/seq_file.h>
  22#include <scsi/scsi.h>
  23#include <scsi/scsi_host.h>
  24#include <scsi/scsi_cmnd.h>
  25#include <scsi/scsi_device.h>
  26
  27#include "visorbus.h"
  28#include "iochannel.h"
  29
  30/* The Send and Receive Buffers of the IO Queue may both be full */
  31
  32#define IOS_ERROR_THRESHOLD  1000
  33#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
  34#define VISORHBA_ERROR_COUNT 30
  35
  36static struct dentry *visorhba_debugfs_dir;
  37
  38/* GUIDS for HBA channel type supported by this driver */
  39static struct visor_channeltype_descriptor visorhba_channel_types[] = {
  40        /* Note that the only channel type we expect to be reported by the
  41         * bus driver is the VISOR_VHBA channel.
  42         */
  43        { VISOR_VHBA_CHANNEL_GUID, "sparvhba", sizeof(struct channel_header),
  44          VISOR_VHBA_CHANNEL_VERSIONID },
  45        {}
  46};
  47
  48MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
  49MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
  50
  51struct visordisk_info {
  52        struct scsi_device *sdev;
  53        u32 valid;
  54        atomic_t ios_threshold;
  55        atomic_t error_count;
  56        struct visordisk_info *next;
  57};
  58
  59struct scsipending {
  60        struct uiscmdrsp cmdrsp;
  61        /* The Data being tracked */
  62        void *sent;
  63        /* Type of pointer that is being stored */
  64        char cmdtype;
  65};
  66
  67/* Each scsi_host has a host_data area that contains this struct. */
  68struct visorhba_devdata {
  69        struct Scsi_Host *scsihost;
  70        struct visor_device *dev;
  71        struct list_head dev_info_list;
  72        /* Tracks the requests that have been forwarded to
  73         * the IOVM and haven't returned yet
  74         */
  75        struct scsipending pending[MAX_PENDING_REQUESTS];
  76        /* Start search for next pending free slot here */
  77        unsigned int nextinsert;
  78        /* lock to protect data in devdata */
  79        spinlock_t privlock;
  80        bool serverdown;
  81        bool serverchangingstate;
  82        unsigned long long acquire_failed_cnt;
  83        unsigned long long interrupts_rcvd;
  84        unsigned long long interrupts_notme;
  85        unsigned long long interrupts_disabled;
  86        u64 __iomem *flags_addr;
  87        atomic_t interrupt_rcvd;
  88        wait_queue_head_t rsp_queue;
  89        struct visordisk_info head;
  90        unsigned int max_buff_len;
  91        int devnum;
  92        struct task_struct *thread;
  93        int thread_wait_ms;
  94
  95        /*
  96         * allows us to pass int handles back-and-forth between us and
  97         * iovm, instead of raw pointers
  98         */
  99        struct idr idr;
 100
 101        struct dentry *debugfs_dir;
 102        struct dentry *debugfs_info;
 103};
 104
 105struct visorhba_devices_open {
 106        struct visorhba_devdata *devdata;
 107};
 108
 109/*
 110 * visor_thread_start - Starts a thread for the device
 111 * @threadfn:   Function the thread starts
 112 * @thrcontext: Context to pass to the thread, i.e. devdata
 113 * @name:       String describing name of thread
 114 *
 115 * Starts a thread for the device.
 116 *
 117 * Return: The task_struct * denoting the thread on success,
 118 *         or NULL on failure
 119 */
 120static struct task_struct *visor_thread_start(int (*threadfn)(void *),
 121                                              void *thrcontext, char *name)
 122{
 123        struct task_struct *task;
 124
 125        task = kthread_run(threadfn, thrcontext, "%s", name);
 126        if (IS_ERR(task)) {
 127                pr_err("visorbus failed to start thread\n");
 128                return NULL;
 129        }
 130        return task;
 131}
 132
 133/*
 134 * visor_thread_stop - Stops the thread if it is running
 135 * @task: Description of process to stop
 136 */
 137static void visor_thread_stop(struct task_struct *task)
 138{
 139        kthread_stop(task);
 140}
 141
 142/*
 143 * add_scsipending_entry - Save off io command that is pending in
 144 *                         Service Partition
 145 * @devdata: Pointer to devdata
 146 * @cmdtype: Specifies the type of command pending
 147 * @new:     The command to be saved
 148 *
 149 * Saves off the io command that is being handled by the Service
 150 * Partition so that it can be handled when it completes. If new is
 151 * NULL it is assumed the entry refers only to the cmdrsp.
 152 *
 153 * Return: Insert_location where entry was added on success,
 154 *         -EBUSY if it can't
 155 */
 156static int add_scsipending_entry(struct visorhba_devdata *devdata,
 157                                 char cmdtype, void *new)
 158{
 159        unsigned long flags;
 160        struct scsipending *entry;
 161        int insert_location;
 162
 163        spin_lock_irqsave(&devdata->privlock, flags);
 164        insert_location = devdata->nextinsert;
 165        while (devdata->pending[insert_location].sent) {
 166                insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
 167                if (insert_location == (int)devdata->nextinsert) {
 168                        spin_unlock_irqrestore(&devdata->privlock, flags);
 169                        return -EBUSY;
 170                }
 171        }
 172
 173        entry = &devdata->pending[insert_location];
 174        memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
 175        entry->cmdtype = cmdtype;
 176        if (new)
 177                entry->sent = new;
 178        /* wants to send cmdrsp */
 179        else
 180                entry->sent = &entry->cmdrsp;
 181        devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
 182        spin_unlock_irqrestore(&devdata->privlock, flags);
 183
 184        return insert_location;
 185}
 186
 187/*
 188 * del_scsipending_ent - Removes an entry from the pending array
 189 * @devdata: Device holding the pending array
 190 * @del:     Entry to remove
 191 *
 192 * Removes the entry pointed at by del and returns it.
 193 *
 194 * Return: The scsipending entry pointed to on success, NULL on failure
 195 */
 196static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
 197{
 198        unsigned long flags;
 199        void *sent;
 200
 201        if (del >= MAX_PENDING_REQUESTS)
 202                return NULL;
 203
 204        spin_lock_irqsave(&devdata->privlock, flags);
 205        sent = devdata->pending[del].sent;
 206        devdata->pending[del].cmdtype = 0;
 207        devdata->pending[del].sent = NULL;
 208        spin_unlock_irqrestore(&devdata->privlock, flags);
 209
 210        return sent;
 211}
 212
 213/*
 214 * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry
 215 * @ddata: Device holding the pending array
 216 * @ent:   Entry that stores the cmdrsp
 217 *
 218 * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
 219 * if the "sent" field is not NULL.
 220 *
 221 * Return: A pointer to the cmdrsp, NULL on failure
 222 */
 223static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
 224                                                int ent)
 225{
 226        if (ddata->pending[ent].sent)
 227                return &ddata->pending[ent].cmdrsp;
 228
 229        return NULL;
 230}
 231
 232/*
 233 * simple_idr_get - Associate a provided pointer with an int value
 234 *                  1 <= value <= INT_MAX, and return this int value;
 235 *                  the pointer value can be obtained later by passing
 236 *                  this int value to idr_find()
 237 * @idrtable: The data object maintaining the pointer<-->int mappings
 238 * @p:        The pointer value to be remembered
 239 * @lock:     A spinlock used when exclusive access to idrtable is needed
 240 *
 241 * Return: The id number mapped to pointer 'p', 0 on failure
 242 */
 243static unsigned int simple_idr_get(struct idr *idrtable, void *p,
 244                                   spinlock_t *lock)
 245{
 246        int id;
 247        unsigned long flags;
 248
 249        idr_preload(GFP_KERNEL);
 250        spin_lock_irqsave(lock, flags);
 251        id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
 252        spin_unlock_irqrestore(lock, flags);
 253        idr_preload_end();
 254        /* failure */
 255        if (id < 0)
 256                return 0;
 257        /* idr_alloc() guarantees > 0 */
 258        return (unsigned int)(id);
 259}
 260
 261/*
 262 * setup_scsitaskmgmt_handles - Stash the necessary handles so that the
 263 *                              completion processing logic for a taskmgmt
 264 *                              cmd will be able to find who to wake up
 265 *                              and where to stash the result
 266 * @idrtable: The data object maintaining the pointer<-->int mappings
 267 * @lock:     A spinlock used when exclusive access to idrtable is needed
 268 * @cmdrsp:   Response from the IOVM
 269 * @event:    The event handle to associate with an id
 270 * @result:   The location to place the result of the event handle into
 271 */
 272static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
 273                                       struct uiscmdrsp *cmdrsp,
 274                                       wait_queue_head_t *event, int *result)
 275{
 276        /* specify the event that has to be triggered when this */
 277        /* cmd is complete */
 278        cmdrsp->scsitaskmgmt.notify_handle =
 279                simple_idr_get(idrtable, event, lock);
 280        cmdrsp->scsitaskmgmt.notifyresult_handle =
 281                simple_idr_get(idrtable, result, lock);
 282}
 283
 284/*
 285 * cleanup_scsitaskmgmt_handles - Forget handles created by
 286 *                                setup_scsitaskmgmt_handles()
 287 * @idrtable: The data object maintaining the pointer<-->int mappings
 288 * @cmdrsp:   Response from the IOVM
 289 */
 290static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
 291                                         struct uiscmdrsp *cmdrsp)
 292{
 293        if (cmdrsp->scsitaskmgmt.notify_handle)
 294                idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
 295        if (cmdrsp->scsitaskmgmt.notifyresult_handle)
 296                idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
 297}
 298
 299/*
 300 * forward_taskmgmt_command - Send taskmegmt command to the Service
 301 *                            Partition
 302 * @tasktype: Type of taskmgmt command
 303 * @scsidev:  Scsidev that issued command
 304 *
 305 * Create a cmdrsp packet and send it to the Serivce Partition
 306 * that will service this request.
 307 *
 308 * Return: Int representing whether command was queued successfully or not
 309 */
 310static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
 311                                    struct scsi_device *scsidev)
 312{
 313        struct uiscmdrsp *cmdrsp;
 314        struct visorhba_devdata *devdata =
 315                (struct visorhba_devdata *)scsidev->host->hostdata;
 316        int notifyresult = 0xffff;
 317        wait_queue_head_t notifyevent;
 318        int scsicmd_id = 0;
 319
 320        if (devdata->serverdown || devdata->serverchangingstate)
 321                return FAILED;
 322
 323        scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
 324                                           NULL);
 325        if (scsicmd_id < 0)
 326                return FAILED;
 327
 328        cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
 329
 330        init_waitqueue_head(&notifyevent);
 331
 332        /* issue TASK_MGMT_ABORT_TASK */
 333        cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
 334        setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
 335                                   &notifyevent, &notifyresult);
 336
 337        /* save destination */
 338        cmdrsp->scsitaskmgmt.tasktype = tasktype;
 339        cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
 340        cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
 341        cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
 342        cmdrsp->scsitaskmgmt.handle = scsicmd_id;
 343
 344        dev_dbg(&scsidev->sdev_gendev,
 345                "visorhba: initiating type=%d taskmgmt command\n", tasktype);
 346        if (visorchannel_signalinsert(devdata->dev->visorchannel,
 347                                      IOCHAN_TO_IOPART,
 348                                      cmdrsp))
 349                goto err_del_scsipending_ent;
 350
 351        /* It can take the Service Partition up to 35 seconds to complete
 352         * an IO in some cases, so wait 45 seconds and error out
 353         */
 354        if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
 355                                msecs_to_jiffies(45000)))
 356                goto err_del_scsipending_ent;
 357
 358        dev_dbg(&scsidev->sdev_gendev,
 359                "visorhba: taskmgmt type=%d success; result=0x%x\n",
 360                 tasktype, notifyresult);
 361        cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
 362        return SUCCESS;
 363
 364err_del_scsipending_ent:
 365        dev_dbg(&scsidev->sdev_gendev,
 366                "visorhba: taskmgmt type=%d not executed\n", tasktype);
 367        del_scsipending_ent(devdata, scsicmd_id);
 368        cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
 369        return FAILED;
 370}
 371
 372/*
 373 * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
 374 * @scsicmd: The scsicmd that needs aborted
 375 *
 376 * Return: SUCCESS if inserted, FAILED otherwise
 377 */
 378static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
 379{
 380        /* issue TASK_MGMT_ABORT_TASK */
 381        struct scsi_device *scsidev;
 382        struct visordisk_info *vdisk;
 383        int rtn;
 384
 385        scsidev = scsicmd->device;
 386        vdisk = scsidev->hostdata;
 387        if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
 388                atomic_inc(&vdisk->error_count);
 389        else
 390                atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
 391        rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
 392        if (rtn == SUCCESS) {
 393                scsicmd->result = DID_ABORT << 16;
 394                scsicmd->scsi_done(scsicmd);
 395        }
 396        return rtn;
 397}
 398
 399/*
 400 * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
 401 * @scsicmd: The scsicmd that needs aborted
 402 *
 403 * Return: SUCCESS if inserted, FAILED otherwise
 404 */
 405static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
 406{
 407        /* issue TASK_MGMT_LUN_RESET */
 408        struct scsi_device *scsidev;
 409        struct visordisk_info *vdisk;
 410        int rtn;
 411
 412        scsidev = scsicmd->device;
 413        vdisk = scsidev->hostdata;
 414        if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
 415                atomic_inc(&vdisk->error_count);
 416        else
 417                atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
 418        rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
 419        if (rtn == SUCCESS) {
 420                scsicmd->result = DID_RESET << 16;
 421                scsicmd->scsi_done(scsicmd);
 422        }
 423        return rtn;
 424}
 425
 426/*
 427 * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
 428 *                              target on the bus
 429 * @scsicmd: The scsicmd that needs aborted
 430 *
 431 * Return: SUCCESS if inserted, FAILED otherwise
 432 */
 433static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
 434{
 435        struct scsi_device *scsidev;
 436        struct visordisk_info *vdisk;
 437        int rtn;
 438
 439        scsidev = scsicmd->device;
 440        shost_for_each_device(scsidev, scsidev->host) {
 441                vdisk = scsidev->hostdata;
 442                if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
 443                        atomic_inc(&vdisk->error_count);
 444                else
 445                        atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
 446        }
 447        rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
 448        if (rtn == SUCCESS) {
 449                scsicmd->result = DID_RESET << 16;
 450                scsicmd->scsi_done(scsicmd);
 451        }
 452        return rtn;
 453}
 454
 455/*
 456 * visorhba_host_reset_handler - Not supported
 457 * @scsicmd: The scsicmd that needs to be aborted
 458 *
 459 * Return: Not supported, return SUCCESS
 460 */
 461static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
 462{
 463        /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
 464        return SUCCESS;
 465}
 466
 467/*
 468 * visorhba_get_info - Get information about SCSI device
 469 * @shp: Scsi host that is requesting information
 470 *
 471 * Return: String with visorhba information
 472 */
 473static const char *visorhba_get_info(struct Scsi_Host *shp)
 474{
 475        /* Return version string */
 476        return "visorhba";
 477}
 478
 479/*
 480 * dma_data_dir_linux_to_spar - convert dma_data_direction value to
 481 *                              Unisys-specific equivalent
 482 * @d: dma direction value to convert
 483 *
 484 * Returns the Unisys-specific dma direction value corresponding to @d
 485 */
 486static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
 487{
 488        switch (d) {
 489        case DMA_BIDIRECTIONAL:
 490                return UIS_DMA_BIDIRECTIONAL;
 491        case DMA_TO_DEVICE:
 492                return UIS_DMA_TO_DEVICE;
 493        case DMA_FROM_DEVICE:
 494                return UIS_DMA_FROM_DEVICE;
 495        case DMA_NONE:
 496                return UIS_DMA_NONE;
 497        default:
 498                return UIS_DMA_NONE;
 499        }
 500}
 501
 502/*
 503 * visorhba_queue_command_lck - Queues command to the Service Partition
 504 * @scsicmd:            Command to be queued
 505 * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
 506 *
 507 * Queues to scsicmd to the ServicePartition after converting it to a
 508 * uiscmdrsp structure.
 509 *
 510 * Return: 0 if successfully queued to the Service Partition, otherwise
 511 *         error code
 512 */
 513static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
 514                                      void (*visorhba_cmnd_done)
 515                                           (struct scsi_cmnd *))
 516{
 517        struct uiscmdrsp *cmdrsp;
 518        struct scsi_device *scsidev = scsicmd->device;
 519        int insert_location;
 520        unsigned char *cdb = scsicmd->cmnd;
 521        struct Scsi_Host *scsihost = scsidev->host;
 522        unsigned int i;
 523        struct visorhba_devdata *devdata =
 524                (struct visorhba_devdata *)scsihost->hostdata;
 525        struct scatterlist *sg = NULL;
 526        struct scatterlist *sglist = NULL;
 527
 528        if (devdata->serverdown || devdata->serverchangingstate)
 529                return SCSI_MLQUEUE_DEVICE_BUSY;
 530
 531        insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
 532                                                (void *)scsicmd);
 533        if (insert_location < 0)
 534                return SCSI_MLQUEUE_DEVICE_BUSY;
 535
 536        cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
 537        cmdrsp->cmdtype = CMD_SCSI_TYPE;
 538        /* save the pending insertion location. Deletion from pending
 539         * will return the scsicmd pointer for completion
 540         */
 541        cmdrsp->scsi.handle = insert_location;
 542
 543        /* save done function that we have call when cmd is complete */
 544        scsicmd->scsi_done = visorhba_cmnd_done;
 545        /* save destination */
 546        cmdrsp->scsi.vdest.channel = scsidev->channel;
 547        cmdrsp->scsi.vdest.id = scsidev->id;
 548        cmdrsp->scsi.vdest.lun = scsidev->lun;
 549        /* save datadir */
 550        cmdrsp->scsi.data_dir =
 551                dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
 552        memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
 553        cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
 554
 555        /* keep track of the max buffer length so far. */
 556        if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
 557                devdata->max_buff_len = cmdrsp->scsi.bufflen;
 558
 559        if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
 560                goto err_del_scsipending_ent;
 561
 562        /* convert buffer to phys information  */
 563        /* buffer is scatterlist - copy it out */
 564        sglist = scsi_sglist(scsicmd);
 565
 566        for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
 567                cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
 568                cmdrsp->scsi.gpi_list[i].length = sg->length;
 569        }
 570        cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
 571
 572        if (visorchannel_signalinsert(devdata->dev->visorchannel,
 573                                      IOCHAN_TO_IOPART,
 574                                      cmdrsp))
 575                /* queue must be full and we aren't going to wait */
 576                goto err_del_scsipending_ent;
 577
 578        return 0;
 579
 580err_del_scsipending_ent:
 581        del_scsipending_ent(devdata, insert_location);
 582        return SCSI_MLQUEUE_DEVICE_BUSY;
 583}
 584
 585#ifdef DEF_SCSI_QCMD
 586static DEF_SCSI_QCMD(visorhba_queue_command)
 587#else
 588#define visorhba_queue_command visorhba_queue_command_lck
 589#endif
 590
 591/*
 592 * visorhba_slave_alloc - Called when new disk is discovered
 593 * @scsidev: New disk
 594 *
 595 * Create a new visordisk_info structure and add it to our
 596 * list of vdisks.
 597 *
 598 * Return: 0 on success, -ENOMEM on failure.
 599 */
 600static int visorhba_slave_alloc(struct scsi_device *scsidev)
 601{
 602        /* this is called by the midlayer before scan for new devices --
 603         * LLD can alloc any struct & do init if needed.
 604         */
 605        struct visordisk_info *vdisk;
 606        struct visorhba_devdata *devdata;
 607        struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
 608
 609        /* already allocated return success */
 610        if (scsidev->hostdata)
 611                return 0;
 612
 613        /* even though we errored, treat as success */
 614        devdata = (struct visorhba_devdata *)scsihost->hostdata;
 615        if (!devdata)
 616                return 0;
 617
 618        vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
 619        if (!vdisk)
 620                return -ENOMEM;
 621
 622        vdisk->sdev = scsidev;
 623        scsidev->hostdata = vdisk;
 624        return 0;
 625}
 626
 627/*
 628 * visorhba_slave_destroy - Disk is going away, clean up resources.
 629 * @scsidev: Scsi device to destroy
 630 */
 631static void visorhba_slave_destroy(struct scsi_device *scsidev)
 632{
 633        /* midlevel calls this after device has been quiesced and
 634         * before it is to be deleted.
 635         */
 636        struct visordisk_info *vdisk;
 637
 638        vdisk = scsidev->hostdata;
 639        scsidev->hostdata = NULL;
 640        kfree(vdisk);
 641}
 642
 643static struct scsi_host_template visorhba_driver_template = {
 644        .name = "Unisys Visor HBA",
 645        .info = visorhba_get_info,
 646        .queuecommand = visorhba_queue_command,
 647        .eh_abort_handler = visorhba_abort_handler,
 648        .eh_device_reset_handler = visorhba_device_reset_handler,
 649        .eh_bus_reset_handler = visorhba_bus_reset_handler,
 650        .eh_host_reset_handler = visorhba_host_reset_handler,
 651        .shost_attrs = NULL,
 652#define visorhba_MAX_CMNDS 128
 653        .can_queue = visorhba_MAX_CMNDS,
 654        .sg_tablesize = 64,
 655        .this_id = -1,
 656        .slave_alloc = visorhba_slave_alloc,
 657        .slave_destroy = visorhba_slave_destroy,
 658        .use_clustering = ENABLE_CLUSTERING,
 659};
 660
 661/*
 662 * info_debugfs_show - Debugfs interface to dump visorhba states
 663 * @seq: The sequence file to write information to
 664 * @v:   Unused, but needed for use with seq file single_open invocation
 665 *
 666 * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info.
 667 *
 668 * Return: SUCCESS
 669 */
 670static int info_debugfs_show(struct seq_file *seq, void *v)
 671{
 672        struct visorhba_devdata *devdata = seq->private;
 673
 674        seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
 675        seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
 676        seq_printf(seq, "interrupts_disabled = %llu\n",
 677                   devdata->interrupts_disabled);
 678        seq_printf(seq, "interrupts_notme = %llu\n",
 679                   devdata->interrupts_notme);
 680        seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
 681        if (devdata->flags_addr) {
 682                u64 phys_flags_addr =
 683                        virt_to_phys((__force  void *)devdata->flags_addr);
 684                seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
 685                           phys_flags_addr);
 686                seq_printf(seq, "FeatureFlags = %llu\n",
 687                           (u64)readq(devdata->flags_addr));
 688        }
 689        seq_printf(seq, "acquire_failed_cnt = %llu\n",
 690                   devdata->acquire_failed_cnt);
 691
 692        return 0;
 693}
 694
 695static int info_debugfs_open(struct inode *inode, struct file *file)
 696{
 697        return single_open(file, info_debugfs_show, inode->i_private);
 698}
 699
 700static const struct file_operations info_debugfs_fops = {
 701        .owner = THIS_MODULE,
 702        .open = info_debugfs_open,
 703        .read = seq_read,
 704        .llseek = seq_lseek,
 705        .release = single_release,
 706};
 707
 708/*
 709 * complete_taskmgmt_command - Complete task management
 710 * @idrtable: The data object maintaining the pointer<-->int mappings
 711 * @cmdrsp:   Response from the IOVM
 712 * @result:   The result of the task management command
 713 *
 714 * Service Partition returned the result of the task management
 715 * command. Wake up anyone waiting for it.
 716 */
 717static void complete_taskmgmt_command(struct idr *idrtable,
 718                                      struct uiscmdrsp *cmdrsp, int result)
 719{
 720        wait_queue_head_t *wq =
 721                idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
 722        int *scsi_result_ptr =
 723                idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
 724        if (unlikely(!(wq && scsi_result_ptr))) {
 725                pr_err("visorhba: no completion context; cmd will time out\n");
 726                return;
 727        }
 728
 729        /* copy the result of the taskmgmt and
 730         * wake up the error handler that is waiting for this
 731         */
 732        pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
 733        *scsi_result_ptr = result;
 734        wake_up_all(wq);
 735}
 736
 737/*
 738 * visorhba_serverdown_complete - Called when we are done cleaning up
 739 *                                from serverdown
 740 * @devdata: Visorhba instance on which to complete serverdown
 741 *
 742 * Called when we are done cleanning up from serverdown, stop processing
 743 * queue, fail pending IOs.
 744 */
 745static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
 746{
 747        int i;
 748        struct scsipending *pendingdel = NULL;
 749        struct scsi_cmnd *scsicmd = NULL;
 750        struct uiscmdrsp *cmdrsp;
 751        unsigned long flags;
 752
 753        /* Stop using the IOVM response queue (queue should be drained
 754         * by the end)
 755         */
 756        visor_thread_stop(devdata->thread);
 757
 758        /* Fail commands that weren't completed */
 759        spin_lock_irqsave(&devdata->privlock, flags);
 760        for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
 761                pendingdel = &devdata->pending[i];
 762                switch (pendingdel->cmdtype) {
 763                case CMD_SCSI_TYPE:
 764                        scsicmd = pendingdel->sent;
 765                        scsicmd->result = DID_RESET << 16;
 766                        if (scsicmd->scsi_done)
 767                                scsicmd->scsi_done(scsicmd);
 768                        break;
 769                case CMD_SCSITASKMGMT_TYPE:
 770                        cmdrsp = pendingdel->sent;
 771                        complete_taskmgmt_command(&devdata->idr, cmdrsp,
 772                                                  TASK_MGMT_FAILED);
 773                        break;
 774                default:
 775                        break;
 776                }
 777                pendingdel->cmdtype = 0;
 778                pendingdel->sent = NULL;
 779        }
 780        spin_unlock_irqrestore(&devdata->privlock, flags);
 781
 782        devdata->serverdown = true;
 783        devdata->serverchangingstate = false;
 784}
 785
 786/*
 787 * visorhba_serverdown - Got notified that the IOVM is down
 788 * @devdata: Visorhba that is being serviced by downed IOVM
 789 *
 790 * Something happened to the IOVM, return immediately and
 791 * schedule cleanup work.
 792 *
 793 * Return: 0 on success, -EINVAL on failure
 794 */
 795static int visorhba_serverdown(struct visorhba_devdata *devdata)
 796{
 797        if (!devdata->serverdown && !devdata->serverchangingstate) {
 798                devdata->serverchangingstate = true;
 799                visorhba_serverdown_complete(devdata);
 800        } else if (devdata->serverchangingstate) {
 801                return -EINVAL;
 802        }
 803        return 0;
 804}
 805
 806/*
 807 * do_scsi_linuxstat - Scsi command returned linuxstat
 808 * @cmdrsp:  Response from IOVM
 809 * @scsicmd: Command issued
 810 *
 811 * Don't log errors for disk-not-present inquiries.
 812 */
 813static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
 814                              struct scsi_cmnd *scsicmd)
 815{
 816        struct visordisk_info *vdisk;
 817        struct scsi_device *scsidev;
 818
 819        scsidev = scsicmd->device;
 820        memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
 821
 822        /* Do not log errors for disk-not-present inquiries */
 823        if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
 824            (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
 825            cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)
 826                return;
 827        /* Okay see what our error_count is here.... */
 828        vdisk = scsidev->hostdata;
 829        if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
 830                atomic_inc(&vdisk->error_count);
 831                atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
 832        }
 833}
 834
 835static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
 836                                      bool is_lun0)
 837{
 838        if (len < NO_DISK_INQUIRY_RESULT_LEN)
 839                return -EINVAL;
 840        memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
 841        buf[2] = SCSI_SPC2_VER;
 842        if (is_lun0) {
 843                buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
 844                buf[3] = DEV_HISUPPORT;
 845        } else {
 846                buf[0] = DEV_NOT_CAPABLE;
 847        }
 848        buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
 849        strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
 850        return 0;
 851}
 852
 853/*
 854 * do_scsi_nolinuxstat - Scsi command didn't have linuxstat
 855 * @cmdrsp:  Response from IOVM
 856 * @scsicmd: Command issued
 857 *
 858 * Handle response when no linuxstat was returned.
 859 */
 860static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
 861                                struct scsi_cmnd *scsicmd)
 862{
 863        struct scsi_device *scsidev;
 864        unsigned char *buf;
 865        struct scatterlist *sg;
 866        unsigned int i;
 867        char *this_page;
 868        char *this_page_orig;
 869        int bufind = 0;
 870        struct visordisk_info *vdisk;
 871
 872        scsidev = scsicmd->device;
 873        if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
 874            cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN) {
 875                if (cmdrsp->scsi.no_disk_result == 0)
 876                        return;
 877
 878                buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
 879                if (!buf)
 880                        return;
 881
 882                /* Linux scsi code wants a device at Lun 0
 883                 * to issue report luns, but we don't want
 884                 * a disk there so we'll present a processor
 885                 * there.
 886                 */
 887                set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
 888                                           scsidev->lun == 0);
 889
 890                if (scsi_sg_count(scsicmd) == 0) {
 891                        memcpy(scsi_sglist(scsicmd), buf,
 892                               cmdrsp->scsi.bufflen);
 893                        kfree(buf);
 894                        return;
 895                }
 896
 897                sg = scsi_sglist(scsicmd);
 898                for (i = 0; i < scsi_sg_count(scsicmd); i++) {
 899                        this_page_orig = kmap_atomic(sg_page(sg + i));
 900                        this_page = (void *)((unsigned long)this_page_orig |
 901                                             sg[i].offset);
 902                        memcpy(this_page, buf + bufind, sg[i].length);
 903                        kunmap_atomic(this_page_orig);
 904                }
 905                kfree(buf);
 906        } else {
 907                vdisk = scsidev->hostdata;
 908                if (atomic_read(&vdisk->ios_threshold) > 0) {
 909                        atomic_dec(&vdisk->ios_threshold);
 910                        if (atomic_read(&vdisk->ios_threshold) == 0)
 911                                atomic_set(&vdisk->error_count, 0);
 912                }
 913        }
 914}
 915
 916/*
 917 * complete_scsi_command - Complete a scsi command
 918 * @uiscmdrsp: Response from Service Partition
 919 * @scsicmd:   The scsi command
 920 *
 921 * Response was returned by the Service Partition. Finish it and send
 922 * completion to the scsi midlayer.
 923 */
 924static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
 925                                  struct scsi_cmnd *scsicmd)
 926{
 927        /* take what we need out of cmdrsp and complete the scsicmd */
 928        scsicmd->result = cmdrsp->scsi.linuxstat;
 929        if (cmdrsp->scsi.linuxstat)
 930                do_scsi_linuxstat(cmdrsp, scsicmd);
 931        else
 932                do_scsi_nolinuxstat(cmdrsp, scsicmd);
 933
 934        scsicmd->scsi_done(scsicmd);
 935}
 936
 937/*
 938 * drain_queue - Pull responses out of iochannel
 939 * @cmdrsp:  Response from the IOSP
 940 * @devdata: Device that owns this iochannel
 941 *
 942 * Pulls responses out of the iochannel and process the responses.
 943 */
 944static void drain_queue(struct uiscmdrsp *cmdrsp,
 945                        struct visorhba_devdata *devdata)
 946{
 947        struct scsi_cmnd *scsicmd;
 948
 949        while (1) {
 950                /* queue empty */
 951                if (visorchannel_signalremove(devdata->dev->visorchannel,
 952                                              IOCHAN_FROM_IOPART,
 953                                              cmdrsp))
 954                        break;
 955                if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
 956                        /* scsicmd location is returned by the
 957                         * deletion
 958                         */
 959                        scsicmd = del_scsipending_ent(devdata,
 960                                                      cmdrsp->scsi.handle);
 961                        if (!scsicmd)
 962                                break;
 963                        /* complete the orig cmd */
 964                        complete_scsi_command(cmdrsp, scsicmd);
 965                } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
 966                        if (!del_scsipending_ent(devdata,
 967                                                 cmdrsp->scsitaskmgmt.handle))
 968                                break;
 969                        complete_taskmgmt_command(&devdata->idr, cmdrsp,
 970                                                  cmdrsp->scsitaskmgmt.result);
 971                } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
 972                        dev_err_once(&devdata->dev->device,
 973                                     "ignoring unsupported NOTIFYGUEST\n");
 974                /* cmdrsp is now available for re-use */
 975        }
 976}
 977
 978/*
 979 * process_incoming_rsps - Process responses from IOSP
 980 * @v:  Void pointer to visorhba_devdata
 981 *
 982 * Main function for the thread that processes the responses
 983 * from the IO Service Partition. When the queue is empty, wait
 984 * to check to see if it is full again.
 985 *
 986 * Return: 0 on success, -ENOMEM on failure
 987 */
 988static int process_incoming_rsps(void *v)
 989{
 990        struct visorhba_devdata *devdata = v;
 991        struct uiscmdrsp *cmdrsp = NULL;
 992        const int size = sizeof(*cmdrsp);
 993
 994        cmdrsp = kmalloc(size, GFP_ATOMIC);
 995        if (!cmdrsp)
 996                return -ENOMEM;
 997
 998        while (1) {
 999                if (kthread_should_stop())
1000                        break;
1001                wait_event_interruptible_timeout(
1002                        devdata->rsp_queue, (atomic_read(
1003                                             &devdata->interrupt_rcvd) == 1),
1004                                msecs_to_jiffies(devdata->thread_wait_ms));
1005                /* drain queue */
1006                drain_queue(cmdrsp, devdata);
1007        }
1008        kfree(cmdrsp);
1009        return 0;
1010}
1011
1012/*
1013 * visorhba_pause - Function to handle visorbus pause messages
1014 * @dev:           Device that is pausing
1015 * @complete_func: Function to call when finished
1016 *
1017 * Something has happened to the IO Service Partition that is
1018 * handling this device. Quiet this device and reset commands
1019 * so that the Service Partition can be corrected.
1020 *
1021 * Return: SUCCESS
1022 */
1023static int visorhba_pause(struct visor_device *dev,
1024                          visorbus_state_complete_func complete_func)
1025{
1026        struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1027
1028        visorhba_serverdown(devdata);
1029        complete_func(dev, 0);
1030        return 0;
1031}
1032
1033/*
1034 * visorhba_resume - Function called when the IO Service Partition is back
1035 * @dev:           Device that is pausing
1036 * @complete_func: Function to call when finished
1037 *
1038 * Yay! The IO Service Partition is back, the channel has been wiped
1039 * so lets re-establish connection and start processing responses.
1040 *
1041 * Return: 0 on success, -EINVAL on failure
1042 */
1043static int visorhba_resume(struct visor_device *dev,
1044                           visorbus_state_complete_func complete_func)
1045{
1046        struct visorhba_devdata *devdata;
1047
1048        devdata = dev_get_drvdata(&dev->device);
1049        if (!devdata)
1050                return -EINVAL;
1051
1052        if (devdata->serverdown && !devdata->serverchangingstate)
1053                devdata->serverchangingstate = true;
1054
1055        devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1056                                             "vhba_incming");
1057        devdata->serverdown = false;
1058        devdata->serverchangingstate = false;
1059
1060        return 0;
1061}
1062
1063/*
1064 * visorhba_probe - Device has been discovered; do acquire
1065 * @dev: visor_device that was discovered
1066 *
1067 * A new HBA was discovered; do the initial connections of it.
1068 *
1069 * Return: 0 on success, otherwise error code
1070 */
1071static int visorhba_probe(struct visor_device *dev)
1072{
1073        struct Scsi_Host *scsihost;
1074        struct vhba_config_max max;
1075        struct visorhba_devdata *devdata = NULL;
1076        int err, channel_offset;
1077        u64 features;
1078
1079        scsihost = scsi_host_alloc(&visorhba_driver_template,
1080                                   sizeof(*devdata));
1081        if (!scsihost)
1082                return -ENODEV;
1083
1084        channel_offset = offsetof(struct visor_io_channel, vhba.max);
1085        err = visorbus_read_channel(dev, channel_offset, &max,
1086                                    sizeof(struct vhba_config_max));
1087        if (err < 0)
1088                goto err_scsi_host_put;
1089
1090        scsihost->max_id = (unsigned int)max.max_id;
1091        scsihost->max_lun = (unsigned int)max.max_lun;
1092        scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1093        scsihost->max_sectors =
1094            (unsigned short)(max.max_io_size >> 9);
1095        scsihost->sg_tablesize =
1096            (unsigned short)(max.max_io_size / PAGE_SIZE);
1097        if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1098                scsihost->sg_tablesize = MAX_PHYS_INFO;
1099        err = scsi_add_host(scsihost, &dev->device);
1100        if (err < 0)
1101                goto err_scsi_host_put;
1102
1103        devdata = (struct visorhba_devdata *)scsihost->hostdata;
1104        devdata->dev = dev;
1105        dev_set_drvdata(&dev->device, devdata);
1106
1107        devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1108                                                  visorhba_debugfs_dir);
1109        if (!devdata->debugfs_dir) {
1110                err = -ENOMEM;
1111                goto err_scsi_remove_host;
1112        }
1113        devdata->debugfs_info =
1114                debugfs_create_file("info", 0440,
1115                                    devdata->debugfs_dir, devdata,
1116                                    &info_debugfs_fops);
1117        if (!devdata->debugfs_info) {
1118                err = -ENOMEM;
1119                goto err_debugfs_dir;
1120        }
1121
1122        init_waitqueue_head(&devdata->rsp_queue);
1123        spin_lock_init(&devdata->privlock);
1124        devdata->serverdown = false;
1125        devdata->serverchangingstate = false;
1126        devdata->scsihost = scsihost;
1127
1128        channel_offset = offsetof(struct visor_io_channel,
1129                                  channel_header.features);
1130        err = visorbus_read_channel(dev, channel_offset, &features, 8);
1131        if (err)
1132                goto err_debugfs_info;
1133        features |= VISOR_CHANNEL_IS_POLLING;
1134        err = visorbus_write_channel(dev, channel_offset, &features, 8);
1135        if (err)
1136                goto err_debugfs_info;
1137
1138        idr_init(&devdata->idr);
1139
1140        devdata->thread_wait_ms = 2;
1141        devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1142                                             "vhba_incoming");
1143
1144        scsi_scan_host(scsihost);
1145
1146        return 0;
1147
1148err_debugfs_info:
1149        debugfs_remove(devdata->debugfs_info);
1150
1151err_debugfs_dir:
1152        debugfs_remove_recursive(devdata->debugfs_dir);
1153
1154err_scsi_remove_host:
1155        scsi_remove_host(scsihost);
1156
1157err_scsi_host_put:
1158        scsi_host_put(scsihost);
1159        return err;
1160}
1161
1162/*
1163 * visorhba_remove - Remove a visorhba device
1164 * @dev: Device to remove
1165 *
1166 * Removes the visorhba device.
1167 */
1168static void visorhba_remove(struct visor_device *dev)
1169{
1170        struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1171        struct Scsi_Host *scsihost = NULL;
1172
1173        if (!devdata)
1174                return;
1175
1176        scsihost = devdata->scsihost;
1177        visor_thread_stop(devdata->thread);
1178        scsi_remove_host(scsihost);
1179        scsi_host_put(scsihost);
1180
1181        idr_destroy(&devdata->idr);
1182
1183        dev_set_drvdata(&dev->device, NULL);
1184        debugfs_remove(devdata->debugfs_info);
1185        debugfs_remove_recursive(devdata->debugfs_dir);
1186}
1187
1188/* This is used to tell the visorbus driver which types of visor devices
1189 * we support, and what functions to call when a visor device that we support
1190 * is attached or removed.
1191 */
1192static struct visor_driver visorhba_driver = {
1193        .name = "visorhba",
1194        .owner = THIS_MODULE,
1195        .channel_types = visorhba_channel_types,
1196        .probe = visorhba_probe,
1197        .remove = visorhba_remove,
1198        .pause = visorhba_pause,
1199        .resume = visorhba_resume,
1200        .channel_interrupt = NULL,
1201};
1202
1203/*
1204 * visorhba_init - Driver init routine
1205 *
1206 * Initialize the visorhba driver and register it with visorbus
1207 * to handle s-Par virtual host bus adapter.
1208 *
1209 * Return: 0 on success, error code otherwise
1210 */
1211static int visorhba_init(void)
1212{
1213        int rc = -ENOMEM;
1214
1215        visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1216        if (!visorhba_debugfs_dir)
1217                return -ENOMEM;
1218
1219        rc = visorbus_register_visor_driver(&visorhba_driver);
1220        if (rc)
1221                goto cleanup_debugfs;
1222
1223        return 0;
1224
1225cleanup_debugfs:
1226        debugfs_remove_recursive(visorhba_debugfs_dir);
1227
1228        return rc;
1229}
1230
1231/*
1232 * visorhba_exit - Driver exit routine
1233 *
1234 * Unregister driver from the bus and free up memory.
1235 */
1236static void visorhba_exit(void)
1237{
1238        visorbus_unregister_visor_driver(&visorhba_driver);
1239        debugfs_remove_recursive(visorhba_debugfs_dir);
1240}
1241
1242module_init(visorhba_init);
1243module_exit(visorhba_exit);
1244
1245MODULE_AUTHOR("Unisys");
1246MODULE_LICENSE("GPL");
1247MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");
1248