linux/drivers/block/skd_main.c
<<
>>
Prefs
   1/* Copyright 2012 STEC, Inc.
   2 *
   3 * This file is licensed under the terms of the 3-clause
   4 * BSD License (http://opensource.org/licenses/BSD-3-Clause)
   5 * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
   6 * at your option. Both licenses are also available in the LICENSE file
   7 * distributed with this project. This file may not be copied, modified,
   8 * or distributed except in accordance with those terms.
   9 * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
  10 * Initial Driver Design!
  11 * Thomas Swann <tswann@stec-inc.com>
  12 * Interrupt handling.
  13 * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
  14 * biomode implementation.
  15 * Akhil Bhansali <abhansali@stec-inc.com>
  16 * Added support for DISCARD / FLUSH and FUA.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/init.h>
  22#include <linux/pci.h>
  23#include <linux/slab.h>
  24#include <linux/spinlock.h>
  25#include <linux/blkdev.h>
  26#include <linux/sched.h>
  27#include <linux/interrupt.h>
  28#include <linux/compiler.h>
  29#include <linux/workqueue.h>
  30#include <linux/bitops.h>
  31#include <linux/delay.h>
  32#include <linux/time.h>
  33#include <linux/hdreg.h>
  34#include <linux/dma-mapping.h>
  35#include <linux/completion.h>
  36#include <linux/scatterlist.h>
  37#include <linux/version.h>
  38#include <linux/err.h>
  39#include <linux/scatterlist.h>
  40#include <linux/aer.h>
  41#include <linux/ctype.h>
  42#include <linux/wait.h>
  43#include <linux/uio.h>
  44#include <scsi/scsi.h>
  45#include <scsi/sg.h>
  46#include <linux/io.h>
  47#include <linux/uaccess.h>
  48#include <asm/unaligned.h>
  49
  50#include "skd_s1120.h"
  51
  52static int skd_dbg_level;
  53static int skd_isr_comp_limit = 4;
  54
  55enum {
  56        STEC_LINK_2_5GTS = 0,
  57        STEC_LINK_5GTS = 1,
  58        STEC_LINK_8GTS = 2,
  59        STEC_LINK_UNKNOWN = 0xFF
  60};
  61
  62enum {
  63        SKD_FLUSH_INITIALIZER,
  64        SKD_FLUSH_ZERO_SIZE_FIRST,
  65        SKD_FLUSH_DATA_SECOND,
  66};
  67
  68#define SKD_ASSERT(expr) \
  69        do { \
  70                if (unlikely(!(expr))) { \
  71                        pr_err("Assertion failed! %s,%s,%s,line=%d\n",  \
  72                               # expr, __FILE__, __func__, __LINE__); \
  73                } \
  74        } while (0)
  75
  76#define DRV_NAME "skd"
  77#define DRV_VERSION "2.2.1"
  78#define DRV_BUILD_ID "0260"
  79#define PFX DRV_NAME ": "
  80#define DRV_BIN_VERSION 0x100
  81#define DRV_VER_COMPL   "2.2.1." DRV_BUILD_ID
  82
  83MODULE_AUTHOR("bug-reports: support@stec-inc.com");
  84MODULE_LICENSE("Dual BSD/GPL");
  85
  86MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
  87MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
  88
  89#define PCI_VENDOR_ID_STEC      0x1B39
  90#define PCI_DEVICE_ID_S1120     0x0001
  91
  92#define SKD_FUA_NV              (1 << 1)
  93#define SKD_MINORS_PER_DEVICE   16
  94
  95#define SKD_MAX_QUEUE_DEPTH     200u
  96
  97#define SKD_PAUSE_TIMEOUT       (5 * 1000)
  98
  99#define SKD_N_FITMSG_BYTES      (512u)
 100
 101#define SKD_N_SPECIAL_CONTEXT   32u
 102#define SKD_N_SPECIAL_FITMSG_BYTES      (128u)
 103
 104/* SG elements are 32 bytes, so we can make this 4096 and still be under the
 105 * 128KB limit.  That allows 4096*4K = 16M xfer size
 106 */
 107#define SKD_N_SG_PER_REQ_DEFAULT 256u
 108#define SKD_N_SG_PER_SPECIAL    256u
 109
 110#define SKD_N_COMPLETION_ENTRY  256u
 111#define SKD_N_READ_CAP_BYTES    (8u)
 112
 113#define SKD_N_INTERNAL_BYTES    (512u)
 114
 115/* 5 bits of uniqifier, 0xF800 */
 116#define SKD_ID_INCR             (0x400)
 117#define SKD_ID_TABLE_MASK       (3u << 8u)
 118#define  SKD_ID_RW_REQUEST      (0u << 8u)
 119#define  SKD_ID_INTERNAL        (1u << 8u)
 120#define  SKD_ID_SPECIAL_REQUEST (2u << 8u)
 121#define  SKD_ID_FIT_MSG         (3u << 8u)
 122#define SKD_ID_SLOT_MASK        0x00FFu
 123#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
 124
 125#define SKD_N_TIMEOUT_SLOT      4u
 126#define SKD_TIMEOUT_SLOT_MASK   3u
 127
 128#define SKD_N_MAX_SECTORS 2048u
 129
 130#define SKD_MAX_RETRIES 2u
 131
 132#define SKD_TIMER_SECONDS(seconds) (seconds)
 133#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
 134
 135#define INQ_STD_NBYTES 36
 136#define SKD_DISCARD_CDB_LENGTH  24
 137
 138enum skd_drvr_state {
 139        SKD_DRVR_STATE_LOAD,
 140        SKD_DRVR_STATE_IDLE,
 141        SKD_DRVR_STATE_BUSY,
 142        SKD_DRVR_STATE_STARTING,
 143        SKD_DRVR_STATE_ONLINE,
 144        SKD_DRVR_STATE_PAUSING,
 145        SKD_DRVR_STATE_PAUSED,
 146        SKD_DRVR_STATE_DRAINING_TIMEOUT,
 147        SKD_DRVR_STATE_RESTARTING,
 148        SKD_DRVR_STATE_RESUMING,
 149        SKD_DRVR_STATE_STOPPING,
 150        SKD_DRVR_STATE_FAULT,
 151        SKD_DRVR_STATE_DISAPPEARED,
 152        SKD_DRVR_STATE_PROTOCOL_MISMATCH,
 153        SKD_DRVR_STATE_BUSY_ERASE,
 154        SKD_DRVR_STATE_BUSY_SANITIZE,
 155        SKD_DRVR_STATE_BUSY_IMMINENT,
 156        SKD_DRVR_STATE_WAIT_BOOT,
 157        SKD_DRVR_STATE_SYNCING,
 158};
 159
 160#define SKD_WAIT_BOOT_TIMO      SKD_TIMER_SECONDS(90u)
 161#define SKD_STARTING_TIMO       SKD_TIMER_SECONDS(8u)
 162#define SKD_RESTARTING_TIMO     SKD_TIMER_MINUTES(4u)
 163#define SKD_DRAINING_TIMO       SKD_TIMER_SECONDS(6u)
 164#define SKD_BUSY_TIMO           SKD_TIMER_MINUTES(20u)
 165#define SKD_STARTED_BUSY_TIMO   SKD_TIMER_SECONDS(60u)
 166#define SKD_START_WAIT_SECONDS  90u
 167
 168enum skd_req_state {
 169        SKD_REQ_STATE_IDLE,
 170        SKD_REQ_STATE_SETUP,
 171        SKD_REQ_STATE_BUSY,
 172        SKD_REQ_STATE_COMPLETED,
 173        SKD_REQ_STATE_TIMEOUT,
 174        SKD_REQ_STATE_ABORTED,
 175};
 176
 177enum skd_fit_msg_state {
 178        SKD_MSG_STATE_IDLE,
 179        SKD_MSG_STATE_BUSY,
 180};
 181
 182enum skd_check_status_action {
 183        SKD_CHECK_STATUS_REPORT_GOOD,
 184        SKD_CHECK_STATUS_REPORT_SMART_ALERT,
 185        SKD_CHECK_STATUS_REQUEUE_REQUEST,
 186        SKD_CHECK_STATUS_REPORT_ERROR,
 187        SKD_CHECK_STATUS_BUSY_IMMINENT,
 188};
 189
 190struct skd_fitmsg_context {
 191        enum skd_fit_msg_state state;
 192
 193        struct skd_fitmsg_context *next;
 194
 195        u32 id;
 196        u16 outstanding;
 197
 198        u32 length;
 199        u32 offset;
 200
 201        u8 *msg_buf;
 202        dma_addr_t mb_dma_address;
 203};
 204
 205struct skd_request_context {
 206        enum skd_req_state state;
 207
 208        struct skd_request_context *next;
 209
 210        u16 id;
 211        u32 fitmsg_id;
 212
 213        struct request *req;
 214        u8 flush_cmd;
 215        u8 discard_page;
 216
 217        u32 timeout_stamp;
 218        u8 sg_data_dir;
 219        struct scatterlist *sg;
 220        u32 n_sg;
 221        u32 sg_byte_count;
 222
 223        struct fit_sg_descriptor *sksg_list;
 224        dma_addr_t sksg_dma_address;
 225
 226        struct fit_completion_entry_v1 completion;
 227
 228        struct fit_comp_error_info err_info;
 229
 230};
 231#define SKD_DATA_DIR_HOST_TO_CARD       1
 232#define SKD_DATA_DIR_CARD_TO_HOST       2
 233#define SKD_DATA_DIR_NONE               3       /* especially for DISCARD requests. */
 234
 235struct skd_special_context {
 236        struct skd_request_context req;
 237
 238        u8 orphaned;
 239
 240        void *data_buf;
 241        dma_addr_t db_dma_address;
 242
 243        u8 *msg_buf;
 244        dma_addr_t mb_dma_address;
 245};
 246
 247struct skd_sg_io {
 248        fmode_t mode;
 249        void __user *argp;
 250
 251        struct sg_io_hdr sg;
 252
 253        u8 cdb[16];
 254
 255        u32 dxfer_len;
 256        u32 iovcnt;
 257        struct sg_iovec *iov;
 258        struct sg_iovec no_iov_iov;
 259
 260        struct skd_special_context *skspcl;
 261};
 262
 263typedef enum skd_irq_type {
 264        SKD_IRQ_LEGACY,
 265        SKD_IRQ_MSI,
 266        SKD_IRQ_MSIX
 267} skd_irq_type_t;
 268
 269#define SKD_MAX_BARS                    2
 270
 271struct skd_device {
 272        volatile void __iomem *mem_map[SKD_MAX_BARS];
 273        resource_size_t mem_phys[SKD_MAX_BARS];
 274        u32 mem_size[SKD_MAX_BARS];
 275
 276        skd_irq_type_t irq_type;
 277        u32 msix_count;
 278        struct skd_msix_entry *msix_entries;
 279
 280        struct pci_dev *pdev;
 281        int pcie_error_reporting_is_enabled;
 282
 283        spinlock_t lock;
 284        struct gendisk *disk;
 285        struct request_queue *queue;
 286        struct device *class_dev;
 287        int gendisk_on;
 288        int sync_done;
 289
 290        atomic_t device_count;
 291        u32 devno;
 292        u32 major;
 293        char name[32];
 294        char isr_name[30];
 295
 296        enum skd_drvr_state state;
 297        u32 drive_state;
 298
 299        u32 in_flight;
 300        u32 cur_max_queue_depth;
 301        u32 queue_low_water_mark;
 302        u32 dev_max_queue_depth;
 303
 304        u32 num_fitmsg_context;
 305        u32 num_req_context;
 306
 307        u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
 308        u32 timeout_stamp;
 309        struct skd_fitmsg_context *skmsg_free_list;
 310        struct skd_fitmsg_context *skmsg_table;
 311
 312        struct skd_request_context *skreq_free_list;
 313        struct skd_request_context *skreq_table;
 314
 315        struct skd_special_context *skspcl_free_list;
 316        struct skd_special_context *skspcl_table;
 317
 318        struct skd_special_context internal_skspcl;
 319        u32 read_cap_blocksize;
 320        u32 read_cap_last_lba;
 321        int read_cap_is_valid;
 322        int inquiry_is_valid;
 323        u8 inq_serial_num[13];  /*12 chars plus null term */
 324        u8 id_str[80];          /* holds a composite name (pci + sernum) */
 325
 326        u8 skcomp_cycle;
 327        u32 skcomp_ix;
 328        struct fit_completion_entry_v1 *skcomp_table;
 329        struct fit_comp_error_info *skerr_table;
 330        dma_addr_t cq_dma_address;
 331
 332        wait_queue_head_t waitq;
 333
 334        struct timer_list timer;
 335        u32 timer_countdown;
 336        u32 timer_substate;
 337
 338        int n_special;
 339        int sgs_per_request;
 340        u32 last_mtd;
 341
 342        u32 proto_ver;
 343
 344        int dbg_level;
 345        u32 connect_time_stamp;
 346        int connect_retries;
 347#define SKD_MAX_CONNECT_RETRIES 16
 348        u32 drive_jiffies;
 349
 350        u32 timo_slot;
 351
 352
 353        struct work_struct completion_worker;
 354};
 355
 356#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
 357#define SKD_READL(DEV, OFF)      skd_reg_read32(DEV, OFF)
 358#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
 359
 360static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
 361{
 362        u32 val;
 363
 364        if (likely(skdev->dbg_level < 2))
 365                return readl(skdev->mem_map[1] + offset);
 366        else {
 367                barrier();
 368                val = readl(skdev->mem_map[1] + offset);
 369                barrier();
 370                pr_debug("%s:%s:%d offset %x = %x\n",
 371                         skdev->name, __func__, __LINE__, offset, val);
 372                return val;
 373        }
 374
 375}
 376
 377static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
 378                                   u32 offset)
 379{
 380        if (likely(skdev->dbg_level < 2)) {
 381                writel(val, skdev->mem_map[1] + offset);
 382                barrier();
 383        } else {
 384                barrier();
 385                writel(val, skdev->mem_map[1] + offset);
 386                barrier();
 387                pr_debug("%s:%s:%d offset %x = %x\n",
 388                         skdev->name, __func__, __LINE__, offset, val);
 389        }
 390}
 391
 392static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
 393                                   u32 offset)
 394{
 395        if (likely(skdev->dbg_level < 2)) {
 396                writeq(val, skdev->mem_map[1] + offset);
 397                barrier();
 398        } else {
 399                barrier();
 400                writeq(val, skdev->mem_map[1] + offset);
 401                barrier();
 402                pr_debug("%s:%s:%d offset %x = %016llx\n",
 403                         skdev->name, __func__, __LINE__, offset, val);
 404        }
 405}
 406
 407
 408#define SKD_IRQ_DEFAULT SKD_IRQ_MSI
 409static int skd_isr_type = SKD_IRQ_DEFAULT;
 410
 411module_param(skd_isr_type, int, 0444);
 412MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
 413                 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
 414
 415#define SKD_MAX_REQ_PER_MSG_DEFAULT 1
 416static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
 417
 418module_param(skd_max_req_per_msg, int, 0444);
 419MODULE_PARM_DESC(skd_max_req_per_msg,
 420                 "Maximum SCSI requests packed in a single message."
 421                 " (1-14, default==1)");
 422
 423#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
 424#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
 425static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
 426
 427module_param(skd_max_queue_depth, int, 0444);
 428MODULE_PARM_DESC(skd_max_queue_depth,
 429                 "Maximum SCSI requests issued to s1120."
 430                 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
 431
 432static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
 433module_param(skd_sgs_per_request, int, 0444);
 434MODULE_PARM_DESC(skd_sgs_per_request,
 435                 "Maximum SG elements per block request."
 436                 " (1-4096, default==256)");
 437
 438static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
 439module_param(skd_max_pass_thru, int, 0444);
 440MODULE_PARM_DESC(skd_max_pass_thru,
 441                 "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
 442
 443module_param(skd_dbg_level, int, 0444);
 444MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
 445
 446module_param(skd_isr_comp_limit, int, 0444);
 447MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
 448
 449/* Major device number dynamically assigned. */
 450static u32 skd_major;
 451
 452static void skd_destruct(struct skd_device *skdev);
 453static const struct block_device_operations skd_blockdev_ops;
 454static void skd_send_fitmsg(struct skd_device *skdev,
 455                            struct skd_fitmsg_context *skmsg);
 456static void skd_send_special_fitmsg(struct skd_device *skdev,
 457                                    struct skd_special_context *skspcl);
 458static void skd_request_fn(struct request_queue *rq);
 459static void skd_end_request(struct skd_device *skdev,
 460                            struct skd_request_context *skreq, int error);
 461static int skd_preop_sg_list(struct skd_device *skdev,
 462                             struct skd_request_context *skreq);
 463static void skd_postop_sg_list(struct skd_device *skdev,
 464                               struct skd_request_context *skreq);
 465
 466static void skd_restart_device(struct skd_device *skdev);
 467static int skd_quiesce_dev(struct skd_device *skdev);
 468static int skd_unquiesce_dev(struct skd_device *skdev);
 469static void skd_release_special(struct skd_device *skdev,
 470                                struct skd_special_context *skspcl);
 471static void skd_disable_interrupts(struct skd_device *skdev);
 472static void skd_isr_fwstate(struct skd_device *skdev);
 473static void skd_recover_requests(struct skd_device *skdev, int requeue);
 474static void skd_soft_reset(struct skd_device *skdev);
 475
 476static const char *skd_name(struct skd_device *skdev);
 477const char *skd_drive_state_to_str(int state);
 478const char *skd_skdev_state_to_str(enum skd_drvr_state state);
 479static void skd_log_skdev(struct skd_device *skdev, const char *event);
 480static void skd_log_skmsg(struct skd_device *skdev,
 481                          struct skd_fitmsg_context *skmsg, const char *event);
 482static void skd_log_skreq(struct skd_device *skdev,
 483                          struct skd_request_context *skreq, const char *event);
 484
 485/*
 486 *****************************************************************************
 487 * READ/WRITE REQUESTS
 488 *****************************************************************************
 489 */
 490static void skd_fail_all_pending(struct skd_device *skdev)
 491{
 492        struct request_queue *q = skdev->queue;
 493        struct request *req;
 494
 495        for (;; ) {
 496                req = blk_peek_request(q);
 497                if (req == NULL)
 498                        break;
 499                blk_start_request(req);
 500                __blk_end_request_all(req, -EIO);
 501        }
 502}
 503
 504static void
 505skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
 506                int data_dir, unsigned lba,
 507                unsigned count)
 508{
 509        if (data_dir == READ)
 510                scsi_req->cdb[0] = 0x28;
 511        else
 512                scsi_req->cdb[0] = 0x2a;
 513
 514        scsi_req->cdb[1] = 0;
 515        scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
 516        scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
 517        scsi_req->cdb[4] = (lba & 0xff00) >> 8;
 518        scsi_req->cdb[5] = (lba & 0xff);
 519        scsi_req->cdb[6] = 0;
 520        scsi_req->cdb[7] = (count & 0xff00) >> 8;
 521        scsi_req->cdb[8] = count & 0xff;
 522        scsi_req->cdb[9] = 0;
 523}
 524
 525static void
 526skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
 527                            struct skd_request_context *skreq)
 528{
 529        skreq->flush_cmd = 1;
 530
 531        scsi_req->cdb[0] = 0x35;
 532        scsi_req->cdb[1] = 0;
 533        scsi_req->cdb[2] = 0;
 534        scsi_req->cdb[3] = 0;
 535        scsi_req->cdb[4] = 0;
 536        scsi_req->cdb[5] = 0;
 537        scsi_req->cdb[6] = 0;
 538        scsi_req->cdb[7] = 0;
 539        scsi_req->cdb[8] = 0;
 540        scsi_req->cdb[9] = 0;
 541}
 542
 543static void
 544skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
 545                     struct skd_request_context *skreq,
 546                     struct page *page,
 547                     u32 lba, u32 count)
 548{
 549        char *buf;
 550        unsigned long len;
 551        struct request *req;
 552
 553        buf = page_address(page);
 554        len = SKD_DISCARD_CDB_LENGTH;
 555
 556        scsi_req->cdb[0] = UNMAP;
 557        scsi_req->cdb[8] = len;
 558
 559        put_unaligned_be16(6 + 16, &buf[0]);
 560        put_unaligned_be16(16, &buf[2]);
 561        put_unaligned_be64(lba, &buf[8]);
 562        put_unaligned_be32(count, &buf[16]);
 563
 564        req = skreq->req;
 565        blk_add_request_payload(req, page, len);
 566}
 567
 568static void skd_request_fn_not_online(struct request_queue *q);
 569
 570static void skd_request_fn(struct request_queue *q)
 571{
 572        struct skd_device *skdev = q->queuedata;
 573        struct skd_fitmsg_context *skmsg = NULL;
 574        struct fit_msg_hdr *fmh = NULL;
 575        struct skd_request_context *skreq;
 576        struct request *req = NULL;
 577        struct skd_scsi_request *scsi_req;
 578        struct page *page;
 579        unsigned long io_flags;
 580        int error;
 581        u32 lba;
 582        u32 count;
 583        int data_dir;
 584        u32 be_lba;
 585        u32 be_count;
 586        u64 be_dmaa;
 587        u64 cmdctxt;
 588        u32 timo_slot;
 589        void *cmd_ptr;
 590        int flush, fua;
 591
 592        if (skdev->state != SKD_DRVR_STATE_ONLINE) {
 593                skd_request_fn_not_online(q);
 594                return;
 595        }
 596
 597        if (blk_queue_stopped(skdev->queue)) {
 598                if (skdev->skmsg_free_list == NULL ||
 599                    skdev->skreq_free_list == NULL ||
 600                    skdev->in_flight >= skdev->queue_low_water_mark)
 601                        /* There is still some kind of shortage */
 602                        return;
 603
 604                queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
 605        }
 606
 607        /*
 608         * Stop conditions:
 609         *  - There are no more native requests
 610         *  - There are already the maximum number of requests in progress
 611         *  - There are no more skd_request_context entries
 612         *  - There are no more FIT msg buffers
 613         */
 614        for (;; ) {
 615
 616                flush = fua = 0;
 617
 618                req = blk_peek_request(q);
 619
 620                /* Are there any native requests to start? */
 621                if (req == NULL)
 622                        break;
 623
 624                lba = (u32)blk_rq_pos(req);
 625                count = blk_rq_sectors(req);
 626                data_dir = rq_data_dir(req);
 627                io_flags = req->cmd_flags;
 628
 629                if (io_flags & REQ_FLUSH)
 630                        flush++;
 631
 632                if (io_flags & REQ_FUA)
 633                        fua++;
 634
 635                pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
 636                         "count=%u(0x%x) dir=%d\n",
 637                         skdev->name, __func__, __LINE__,
 638                         req, lba, lba, count, count, data_dir);
 639
 640                /* At this point we know there is a request */
 641
 642                /* Are too many requets already in progress? */
 643                if (skdev->in_flight >= skdev->cur_max_queue_depth) {
 644                        pr_debug("%s:%s:%d qdepth %d, limit %d\n",
 645                                 skdev->name, __func__, __LINE__,
 646                                 skdev->in_flight, skdev->cur_max_queue_depth);
 647                        break;
 648                }
 649
 650                /* Is a skd_request_context available? */
 651                skreq = skdev->skreq_free_list;
 652                if (skreq == NULL) {
 653                        pr_debug("%s:%s:%d Out of req=%p\n",
 654                                 skdev->name, __func__, __LINE__, q);
 655                        break;
 656                }
 657                SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
 658                SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
 659
 660                /* Now we check to see if we can get a fit msg */
 661                if (skmsg == NULL) {
 662                        if (skdev->skmsg_free_list == NULL) {
 663                                pr_debug("%s:%s:%d Out of msg\n",
 664                                         skdev->name, __func__, __LINE__);
 665                                break;
 666                        }
 667                }
 668
 669                skreq->flush_cmd = 0;
 670                skreq->n_sg = 0;
 671                skreq->sg_byte_count = 0;
 672                skreq->discard_page = 0;
 673
 674                /*
 675                 * OK to now dequeue request from q.
 676                 *
 677                 * At this point we are comitted to either start or reject
 678                 * the native request. Note that skd_request_context is
 679                 * available but is still at the head of the free list.
 680                 */
 681                blk_start_request(req);
 682                skreq->req = req;
 683                skreq->fitmsg_id = 0;
 684
 685                /* Either a FIT msg is in progress or we have to start one. */
 686                if (skmsg == NULL) {
 687                        /* Are there any FIT msg buffers available? */
 688                        skmsg = skdev->skmsg_free_list;
 689                        if (skmsg == NULL) {
 690                                pr_debug("%s:%s:%d Out of msg skdev=%p\n",
 691                                         skdev->name, __func__, __LINE__,
 692                                         skdev);
 693                                break;
 694                        }
 695                        SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
 696                        SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
 697
 698                        skdev->skmsg_free_list = skmsg->next;
 699
 700                        skmsg->state = SKD_MSG_STATE_BUSY;
 701                        skmsg->id += SKD_ID_INCR;
 702
 703                        /* Initialize the FIT msg header */
 704                        fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
 705                        memset(fmh, 0, sizeof(*fmh));
 706                        fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
 707                        skmsg->length = sizeof(*fmh);
 708                }
 709
 710                skreq->fitmsg_id = skmsg->id;
 711
 712                /*
 713                 * Note that a FIT msg may have just been started
 714                 * but contains no SoFIT requests yet.
 715                 */
 716
 717                /*
 718                 * Transcode the request, checking as we go. The outcome of
 719                 * the transcoding is represented by the error variable.
 720                 */
 721                cmd_ptr = &skmsg->msg_buf[skmsg->length];
 722                memset(cmd_ptr, 0, 32);
 723
 724                be_lba = cpu_to_be32(lba);
 725                be_count = cpu_to_be32(count);
 726                be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
 727                cmdctxt = skreq->id + SKD_ID_INCR;
 728
 729                scsi_req = cmd_ptr;
 730                scsi_req->hdr.tag = cmdctxt;
 731                scsi_req->hdr.sg_list_dma_address = be_dmaa;
 732
 733                if (data_dir == READ)
 734                        skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
 735                else
 736                        skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
 737
 738                if (io_flags & REQ_DISCARD) {
 739                        page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
 740                        if (!page) {
 741                                pr_err("request_fn:Page allocation failed.\n");
 742                                skd_end_request(skdev, skreq, -ENOMEM);
 743                                break;
 744                        }
 745                        skreq->discard_page = 1;
 746                        req->completion_data = page;
 747                        skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
 748
 749                } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
 750                        skd_prep_zerosize_flush_cdb(scsi_req, skreq);
 751                        SKD_ASSERT(skreq->flush_cmd == 1);
 752
 753                } else {
 754                        skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
 755                }
 756
 757                if (fua)
 758                        scsi_req->cdb[1] |= SKD_FUA_NV;
 759
 760                if (!req->bio)
 761                        goto skip_sg;
 762
 763                error = skd_preop_sg_list(skdev, skreq);
 764
 765                if (error != 0) {
 766                        /*
 767                         * Complete the native request with error.
 768                         * Note that the request context is still at the
 769                         * head of the free list, and that the SoFIT request
 770                         * was encoded into the FIT msg buffer but the FIT
 771                         * msg length has not been updated. In short, the
 772                         * only resource that has been allocated but might
 773                         * not be used is that the FIT msg could be empty.
 774                         */
 775                        pr_debug("%s:%s:%d error Out\n",
 776                                 skdev->name, __func__, __LINE__);
 777                        skd_end_request(skdev, skreq, error);
 778                        continue;
 779                }
 780
 781skip_sg:
 782                scsi_req->hdr.sg_list_len_bytes =
 783                        cpu_to_be32(skreq->sg_byte_count);
 784
 785                /* Complete resource allocations. */
 786                skdev->skreq_free_list = skreq->next;
 787                skreq->state = SKD_REQ_STATE_BUSY;
 788                skreq->id += SKD_ID_INCR;
 789
 790                skmsg->length += sizeof(struct skd_scsi_request);
 791                fmh->num_protocol_cmds_coalesced++;
 792
 793                /*
 794                 * Update the active request counts.
 795                 * Capture the timeout timestamp.
 796                 */
 797                skreq->timeout_stamp = skdev->timeout_stamp;
 798                timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
 799                skdev->timeout_slot[timo_slot]++;
 800                skdev->in_flight++;
 801                pr_debug("%s:%s:%d req=0x%x busy=%d\n",
 802                         skdev->name, __func__, __LINE__,
 803                         skreq->id, skdev->in_flight);
 804
 805                /*
 806                 * If the FIT msg buffer is full send it.
 807                 */
 808                if (skmsg->length >= SKD_N_FITMSG_BYTES ||
 809                    fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
 810                        skd_send_fitmsg(skdev, skmsg);
 811                        skmsg = NULL;
 812                        fmh = NULL;
 813                }
 814        }
 815
 816        /*
 817         * Is a FIT msg in progress? If it is empty put the buffer back
 818         * on the free list. If it is non-empty send what we got.
 819         * This minimizes latency when there are fewer requests than
 820         * what fits in a FIT msg.
 821         */
 822        if (skmsg != NULL) {
 823                /* Bigger than just a FIT msg header? */
 824                if (skmsg->length > sizeof(struct fit_msg_hdr)) {
 825                        pr_debug("%s:%s:%d sending msg=%p, len %d\n",
 826                                 skdev->name, __func__, __LINE__,
 827                                 skmsg, skmsg->length);
 828                        skd_send_fitmsg(skdev, skmsg);
 829                } else {
 830                        /*
 831                         * The FIT msg is empty. It means we got started
 832                         * on the msg, but the requests were rejected.
 833                         */
 834                        skmsg->state = SKD_MSG_STATE_IDLE;
 835                        skmsg->id += SKD_ID_INCR;
 836                        skmsg->next = skdev->skmsg_free_list;
 837                        skdev->skmsg_free_list = skmsg;
 838                }
 839                skmsg = NULL;
 840                fmh = NULL;
 841        }
 842
 843        /*
 844         * If req is non-NULL it means there is something to do but
 845         * we are out of a resource.
 846         */
 847        if (req)
 848                blk_stop_queue(skdev->queue);
 849}
 850
 851static void skd_end_request(struct skd_device *skdev,
 852                            struct skd_request_context *skreq, int error)
 853{
 854        struct request *req = skreq->req;
 855        unsigned int io_flags = req->cmd_flags;
 856
 857        if ((io_flags & REQ_DISCARD) &&
 858                (skreq->discard_page == 1)) {
 859                pr_debug("%s:%s:%d, free the page!",
 860                         skdev->name, __func__, __LINE__);
 861                __free_page(req->completion_data);
 862        }
 863
 864        if (unlikely(error)) {
 865                struct request *req = skreq->req;
 866                char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
 867                u32 lba = (u32)blk_rq_pos(req);
 868                u32 count = blk_rq_sectors(req);
 869
 870                pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
 871                       skd_name(skdev), cmd, lba, count, skreq->id);
 872        } else
 873                pr_debug("%s:%s:%d id=0x%x error=%d\n",
 874                         skdev->name, __func__, __LINE__, skreq->id, error);
 875
 876        __blk_end_request_all(skreq->req, error);
 877}
 878
 879static int skd_preop_sg_list(struct skd_device *skdev,
 880                             struct skd_request_context *skreq)
 881{
 882        struct request *req = skreq->req;
 883        int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
 884        int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
 885        struct scatterlist *sg = &skreq->sg[0];
 886        int n_sg;
 887        int i;
 888
 889        skreq->sg_byte_count = 0;
 890
 891        /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
 892                   skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
 893
 894        n_sg = blk_rq_map_sg(skdev->queue, req, sg);
 895        if (n_sg <= 0)
 896                return -EINVAL;
 897
 898        /*
 899         * Map scatterlist to PCI bus addresses.
 900         * Note PCI might change the number of entries.
 901         */
 902        n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
 903        if (n_sg <= 0)
 904                return -EINVAL;
 905
 906        SKD_ASSERT(n_sg <= skdev->sgs_per_request);
 907
 908        skreq->n_sg = n_sg;
 909
 910        for (i = 0; i < n_sg; i++) {
 911                struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
 912                u32 cnt = sg_dma_len(&sg[i]);
 913                uint64_t dma_addr = sg_dma_address(&sg[i]);
 914
 915                sgd->control = FIT_SGD_CONTROL_NOT_LAST;
 916                sgd->byte_count = cnt;
 917                skreq->sg_byte_count += cnt;
 918                sgd->host_side_addr = dma_addr;
 919                sgd->dev_side_addr = 0;
 920        }
 921
 922        skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
 923        skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
 924
 925        if (unlikely(skdev->dbg_level > 1)) {
 926                pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
 927                         skdev->name, __func__, __LINE__,
 928                         skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
 929                for (i = 0; i < n_sg; i++) {
 930                        struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
 931                        pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
 932                                 "addr=0x%llx next=0x%llx\n",
 933                                 skdev->name, __func__, __LINE__,
 934                                 i, sgd->byte_count, sgd->control,
 935                                 sgd->host_side_addr, sgd->next_desc_ptr);
 936                }
 937        }
 938
 939        return 0;
 940}
 941
 942static void skd_postop_sg_list(struct skd_device *skdev,
 943                               struct skd_request_context *skreq)
 944{
 945        int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
 946        int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
 947
 948        /*
 949         * restore the next ptr for next IO request so we
 950         * don't have to set it every time.
 951         */
 952        skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
 953                skreq->sksg_dma_address +
 954                ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
 955        pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
 956}
 957
 958static void skd_request_fn_not_online(struct request_queue *q)
 959{
 960        struct skd_device *skdev = q->queuedata;
 961        int error;
 962
 963        SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
 964
 965        skd_log_skdev(skdev, "req_not_online");
 966        switch (skdev->state) {
 967        case SKD_DRVR_STATE_PAUSING:
 968        case SKD_DRVR_STATE_PAUSED:
 969        case SKD_DRVR_STATE_STARTING:
 970        case SKD_DRVR_STATE_RESTARTING:
 971        case SKD_DRVR_STATE_WAIT_BOOT:
 972        /* In case of starting, we haven't started the queue,
 973         * so we can't get here... but requests are
 974         * possibly hanging out waiting for us because we
 975         * reported the dev/skd0 already.  They'll wait
 976         * forever if connect doesn't complete.
 977         * What to do??? delay dev/skd0 ??
 978         */
 979        case SKD_DRVR_STATE_BUSY:
 980        case SKD_DRVR_STATE_BUSY_IMMINENT:
 981        case SKD_DRVR_STATE_BUSY_ERASE:
 982        case SKD_DRVR_STATE_DRAINING_TIMEOUT:
 983                return;
 984
 985        case SKD_DRVR_STATE_BUSY_SANITIZE:
 986        case SKD_DRVR_STATE_STOPPING:
 987        case SKD_DRVR_STATE_SYNCING:
 988        case SKD_DRVR_STATE_FAULT:
 989        case SKD_DRVR_STATE_DISAPPEARED:
 990        default:
 991                error = -EIO;
 992                break;
 993        }
 994
 995        /* If we get here, terminate all pending block requeusts
 996         * with EIO and any scsi pass thru with appropriate sense
 997         */
 998
 999        skd_fail_all_pending(skdev);
1000}
1001
1002/*
1003 *****************************************************************************
1004 * TIMER
1005 *****************************************************************************
1006 */
1007
1008static void skd_timer_tick_not_online(struct skd_device *skdev);
1009
1010static void skd_timer_tick(ulong arg)
1011{
1012        struct skd_device *skdev = (struct skd_device *)arg;
1013
1014        u32 timo_slot;
1015        u32 overdue_timestamp;
1016        unsigned long reqflags;
1017        u32 state;
1018
1019        if (skdev->state == SKD_DRVR_STATE_FAULT)
1020                /* The driver has declared fault, and we want it to
1021                 * stay that way until driver is reloaded.
1022                 */
1023                return;
1024
1025        spin_lock_irqsave(&skdev->lock, reqflags);
1026
1027        state = SKD_READL(skdev, FIT_STATUS);
1028        state &= FIT_SR_DRIVE_STATE_MASK;
1029        if (state != skdev->drive_state)
1030                skd_isr_fwstate(skdev);
1031
1032        if (skdev->state != SKD_DRVR_STATE_ONLINE) {
1033                skd_timer_tick_not_online(skdev);
1034                goto timer_func_out;
1035        }
1036        skdev->timeout_stamp++;
1037        timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
1038
1039        /*
1040         * All requests that happened during the previous use of
1041         * this slot should be done by now. The previous use was
1042         * over 7 seconds ago.
1043         */
1044        if (skdev->timeout_slot[timo_slot] == 0)
1045                goto timer_func_out;
1046
1047        /* Something is overdue */
1048        overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
1049
1050        pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
1051                 skdev->name, __func__, __LINE__,
1052                 skdev->timeout_slot[timo_slot], skdev->in_flight);
1053        pr_err("(%s): Overdue IOs (%d), busy %d\n",
1054               skd_name(skdev), skdev->timeout_slot[timo_slot],
1055               skdev->in_flight);
1056
1057        skdev->timer_countdown = SKD_DRAINING_TIMO;
1058        skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
1059        skdev->timo_slot = timo_slot;
1060        blk_stop_queue(skdev->queue);
1061
1062timer_func_out:
1063        mod_timer(&skdev->timer, (jiffies + HZ));
1064
1065        spin_unlock_irqrestore(&skdev->lock, reqflags);
1066}
1067
1068static void skd_timer_tick_not_online(struct skd_device *skdev)
1069{
1070        switch (skdev->state) {
1071        case SKD_DRVR_STATE_IDLE:
1072        case SKD_DRVR_STATE_LOAD:
1073                break;
1074        case SKD_DRVR_STATE_BUSY_SANITIZE:
1075                pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
1076                         skdev->name, __func__, __LINE__,
1077                         skdev->drive_state, skdev->state);
1078                /* If we've been in sanitize for 3 seconds, we figure we're not
1079                 * going to get anymore completions, so recover requests now
1080                 */
1081                if (skdev->timer_countdown > 0) {
1082                        skdev->timer_countdown--;
1083                        return;
1084                }
1085                skd_recover_requests(skdev, 0);
1086                break;
1087
1088        case SKD_DRVR_STATE_BUSY:
1089        case SKD_DRVR_STATE_BUSY_IMMINENT:
1090        case SKD_DRVR_STATE_BUSY_ERASE:
1091                pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
1092                         skdev->name, __func__, __LINE__,
1093                         skdev->state, skdev->timer_countdown);
1094                if (skdev->timer_countdown > 0) {
1095                        skdev->timer_countdown--;
1096                        return;
1097                }
1098                pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
1099                         skdev->name, __func__, __LINE__,
1100                         skdev->state, skdev->timer_countdown);
1101                skd_restart_device(skdev);
1102                break;
1103
1104        case SKD_DRVR_STATE_WAIT_BOOT:
1105        case SKD_DRVR_STATE_STARTING:
1106                if (skdev->timer_countdown > 0) {
1107                        skdev->timer_countdown--;
1108                        return;
1109                }
1110                /* For now, we fault the drive.  Could attempt resets to
1111                 * revcover at some point. */
1112                skdev->state = SKD_DRVR_STATE_FAULT;
1113
1114                pr_err("(%s): DriveFault Connect Timeout (%x)\n",
1115                       skd_name(skdev), skdev->drive_state);
1116
1117                /*start the queue so we can respond with error to requests */
1118                /* wakeup anyone waiting for startup complete */
1119                blk_start_queue(skdev->queue);
1120                skdev->gendisk_on = -1;
1121                wake_up_interruptible(&skdev->waitq);
1122                break;
1123
1124        case SKD_DRVR_STATE_ONLINE:
1125                /* shouldn't get here. */
1126                break;
1127
1128        case SKD_DRVR_STATE_PAUSING:
1129        case SKD_DRVR_STATE_PAUSED:
1130                break;
1131
1132        case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1133                pr_debug("%s:%s:%d "
1134                         "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1135                         skdev->name, __func__, __LINE__,
1136                         skdev->timo_slot,
1137                         skdev->timer_countdown,
1138                         skdev->in_flight,
1139                         skdev->timeout_slot[skdev->timo_slot]);
1140                /* if the slot has cleared we can let the I/O continue */
1141                if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1142                        pr_debug("%s:%s:%d Slot drained, starting queue.\n",
1143                                 skdev->name, __func__, __LINE__);
1144                        skdev->state = SKD_DRVR_STATE_ONLINE;
1145                        blk_start_queue(skdev->queue);
1146                        return;
1147                }
1148                if (skdev->timer_countdown > 0) {
1149                        skdev->timer_countdown--;
1150                        return;
1151                }
1152                skd_restart_device(skdev);
1153                break;
1154
1155        case SKD_DRVR_STATE_RESTARTING:
1156                if (skdev->timer_countdown > 0) {
1157                        skdev->timer_countdown--;
1158                        return;
1159                }
1160                /* For now, we fault the drive. Could attempt resets to
1161                 * revcover at some point. */
1162                skdev->state = SKD_DRVR_STATE_FAULT;
1163                pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
1164                       skd_name(skdev), skdev->drive_state);
1165
1166                /*
1167                 * Recovering does two things:
1168                 * 1. completes IO with error
1169                 * 2. reclaims dma resources
1170                 * When is it safe to recover requests?
1171                 * - if the drive state is faulted
1172                 * - if the state is still soft reset after out timeout
1173                 * - if the drive registers are dead (state = FF)
1174                 * If it is "unsafe", we still need to recover, so we will
1175                 * disable pci bus mastering and disable our interrupts.
1176                 */
1177
1178                if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1179                    (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1180                    (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1181                        /* It never came out of soft reset. Try to
1182                         * recover the requests and then let them
1183                         * fail. This is to mitigate hung processes. */
1184                        skd_recover_requests(skdev, 0);
1185                else {
1186                        pr_err("(%s): Disable BusMaster (%x)\n",
1187                               skd_name(skdev), skdev->drive_state);
1188                        pci_disable_device(skdev->pdev);
1189                        skd_disable_interrupts(skdev);
1190                        skd_recover_requests(skdev, 0);
1191                }
1192
1193                /*start the queue so we can respond with error to requests */
1194                /* wakeup anyone waiting for startup complete */
1195                blk_start_queue(skdev->queue);
1196                skdev->gendisk_on = -1;
1197                wake_up_interruptible(&skdev->waitq);
1198                break;
1199
1200        case SKD_DRVR_STATE_RESUMING:
1201        case SKD_DRVR_STATE_STOPPING:
1202        case SKD_DRVR_STATE_SYNCING:
1203        case SKD_DRVR_STATE_FAULT:
1204        case SKD_DRVR_STATE_DISAPPEARED:
1205        default:
1206                break;
1207        }
1208}
1209
1210static int skd_start_timer(struct skd_device *skdev)
1211{
1212        int rc;
1213
1214        init_timer(&skdev->timer);
1215        setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1216
1217        rc = mod_timer(&skdev->timer, (jiffies + HZ));
1218        if (rc)
1219                pr_err("%s: failed to start timer %d\n",
1220                       __func__, rc);
1221        return rc;
1222}
1223
1224static void skd_kill_timer(struct skd_device *skdev)
1225{
1226        del_timer_sync(&skdev->timer);
1227}
1228
1229/*
1230 *****************************************************************************
1231 * IOCTL
1232 *****************************************************************************
1233 */
1234static int skd_ioctl_sg_io(struct skd_device *skdev,
1235                           fmode_t mode, void __user *argp);
1236static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1237                                        struct skd_sg_io *sksgio);
1238static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1239                                   struct skd_sg_io *sksgio);
1240static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1241                                    struct skd_sg_io *sksgio);
1242static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1243                                 struct skd_sg_io *sksgio, int dxfer_dir);
1244static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1245                                 struct skd_sg_io *sksgio);
1246static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1247static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1248                                    struct skd_sg_io *sksgio);
1249static int skd_sg_io_put_status(struct skd_device *skdev,
1250                                struct skd_sg_io *sksgio);
1251
1252static void skd_complete_special(struct skd_device *skdev,
1253                                 volatile struct fit_completion_entry_v1
1254                                 *skcomp,
1255                                 volatile struct fit_comp_error_info *skerr,
1256                                 struct skd_special_context *skspcl);
1257
1258static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1259                          uint cmd_in, ulong arg)
1260{
1261        int rc = 0;
1262        struct gendisk *disk = bdev->bd_disk;
1263        struct skd_device *skdev = disk->private_data;
1264        void __user *p = (void *)arg;
1265
1266        pr_debug("%s:%s:%d %s: CMD[%s] ioctl  mode 0x%x, cmd 0x%x arg %0lx\n",
1267                 skdev->name, __func__, __LINE__,
1268                 disk->disk_name, current->comm, mode, cmd_in, arg);
1269
1270        if (!capable(CAP_SYS_ADMIN))
1271                return -EPERM;
1272
1273        switch (cmd_in) {
1274        case SG_SET_TIMEOUT:
1275        case SG_GET_TIMEOUT:
1276        case SG_GET_VERSION_NUM:
1277                rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
1278                break;
1279        case SG_IO:
1280                rc = skd_ioctl_sg_io(skdev, mode, p);
1281                break;
1282
1283        default:
1284                rc = -ENOTTY;
1285                break;
1286        }
1287
1288        pr_debug("%s:%s:%d %s:  completion rc %d\n",
1289                 skdev->name, __func__, __LINE__, disk->disk_name, rc);
1290        return rc;
1291}
1292
1293static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1294                           void __user *argp)
1295{
1296        int rc;
1297        struct skd_sg_io sksgio;
1298
1299        memset(&sksgio, 0, sizeof(sksgio));
1300        sksgio.mode = mode;
1301        sksgio.argp = argp;
1302        sksgio.iov = &sksgio.no_iov_iov;
1303
1304        switch (skdev->state) {
1305        case SKD_DRVR_STATE_ONLINE:
1306        case SKD_DRVR_STATE_BUSY_IMMINENT:
1307                break;
1308
1309        default:
1310                pr_debug("%s:%s:%d drive not online\n",
1311                         skdev->name, __func__, __LINE__);
1312                rc = -ENXIO;
1313                goto out;
1314        }
1315
1316        rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1317        if (rc)
1318                goto out;
1319
1320        rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1321        if (rc)
1322                goto out;
1323
1324        rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1325        if (rc)
1326                goto out;
1327
1328        rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1329        if (rc)
1330                goto out;
1331
1332        rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1333        if (rc)
1334                goto out;
1335
1336        rc = skd_sg_io_await(skdev, &sksgio);
1337        if (rc)
1338                goto out;
1339
1340        rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1341        if (rc)
1342                goto out;
1343
1344        rc = skd_sg_io_put_status(skdev, &sksgio);
1345        if (rc)
1346                goto out;
1347
1348        rc = 0;
1349
1350out:
1351        skd_sg_io_release_skspcl(skdev, &sksgio);
1352
1353        if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1354                kfree(sksgio.iov);
1355        return rc;
1356}
1357
1358static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1359                                        struct skd_sg_io *sksgio)
1360{
1361        struct sg_io_hdr *sgp = &sksgio->sg;
1362        int i, acc;
1363
1364        if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
1365                pr_debug("%s:%s:%d access sg failed %p\n",
1366                         skdev->name, __func__, __LINE__, sksgio->argp);
1367                return -EFAULT;
1368        }
1369
1370        if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
1371                pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
1372                         skdev->name, __func__, __LINE__, sksgio->argp);
1373                return -EFAULT;
1374        }
1375
1376        if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
1377                pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
1378                         skdev->name, __func__, __LINE__, sgp->interface_id);
1379                return -EINVAL;
1380        }
1381
1382        if (sgp->cmd_len > sizeof(sksgio->cdb)) {
1383                pr_debug("%s:%s:%d cmd_len invalid %d\n",
1384                         skdev->name, __func__, __LINE__, sgp->cmd_len);
1385                return -EINVAL;
1386        }
1387
1388        if (sgp->iovec_count > 256) {
1389                pr_debug("%s:%s:%d iovec_count invalid %d\n",
1390                         skdev->name, __func__, __LINE__, sgp->iovec_count);
1391                return -EINVAL;
1392        }
1393
1394        if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
1395                pr_debug("%s:%s:%d dxfer_len invalid %d\n",
1396                         skdev->name, __func__, __LINE__, sgp->dxfer_len);
1397                return -EINVAL;
1398        }
1399
1400        switch (sgp->dxfer_direction) {
1401        case SG_DXFER_NONE:
1402                acc = -1;
1403                break;
1404
1405        case SG_DXFER_TO_DEV:
1406                acc = VERIFY_READ;
1407                break;
1408
1409        case SG_DXFER_FROM_DEV:
1410        case SG_DXFER_TO_FROM_DEV:
1411                acc = VERIFY_WRITE;
1412                break;
1413
1414        default:
1415                pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
1416                         skdev->name, __func__, __LINE__, sgp->dxfer_direction);
1417                return -EINVAL;
1418        }
1419
1420        if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
1421                pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
1422                         skdev->name, __func__, __LINE__, sgp->cmdp);
1423                return -EFAULT;
1424        }
1425
1426        if (sgp->mx_sb_len != 0) {
1427                if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
1428                        pr_debug("%s:%s:%d access sbp failed %p\n",
1429                                 skdev->name, __func__, __LINE__, sgp->sbp);
1430                        return -EFAULT;
1431                }
1432        }
1433
1434        if (sgp->iovec_count == 0) {
1435                sksgio->iov[0].iov_base = sgp->dxferp;
1436                sksgio->iov[0].iov_len = sgp->dxfer_len;
1437                sksgio->iovcnt = 1;
1438                sksgio->dxfer_len = sgp->dxfer_len;
1439        } else {
1440                struct sg_iovec *iov;
1441                uint nbytes = sizeof(*iov) * sgp->iovec_count;
1442                size_t iov_data_len;
1443
1444                iov = kmalloc(nbytes, GFP_KERNEL);
1445                if (iov == NULL) {
1446                        pr_debug("%s:%s:%d alloc iovec failed %d\n",
1447                                 skdev->name, __func__, __LINE__,
1448                                 sgp->iovec_count);
1449                        return -ENOMEM;
1450                }
1451                sksgio->iov = iov;
1452                sksgio->iovcnt = sgp->iovec_count;
1453
1454                if (copy_from_user(iov, sgp->dxferp, nbytes)) {
1455                        pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
1456                                 skdev->name, __func__, __LINE__, sgp->dxferp);
1457                        return -EFAULT;
1458                }
1459
1460                /*
1461                 * Sum up the vecs, making sure they don't overflow
1462                 */
1463                iov_data_len = 0;
1464                for (i = 0; i < sgp->iovec_count; i++) {
1465                        if (iov_data_len + iov[i].iov_len < iov_data_len)
1466                                return -EINVAL;
1467                        iov_data_len += iov[i].iov_len;
1468                }
1469
1470                /* SG_IO howto says that the shorter of the two wins */
1471                if (sgp->dxfer_len < iov_data_len) {
1472                        sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1473                                                     sgp->iovec_count,
1474                                                     sgp->dxfer_len);
1475                        sksgio->dxfer_len = sgp->dxfer_len;
1476                } else
1477                        sksgio->dxfer_len = iov_data_len;
1478        }
1479
1480        if (sgp->dxfer_direction != SG_DXFER_NONE) {
1481                struct sg_iovec *iov = sksgio->iov;
1482                for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1483                        if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
1484                                pr_debug("%s:%s:%d access data failed %p/%d\n",
1485                                         skdev->name, __func__, __LINE__,
1486                                         iov->iov_base, (int)iov->iov_len);
1487                                return -EFAULT;
1488                        }
1489                }
1490        }
1491
1492        return 0;
1493}
1494
1495static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1496                                   struct skd_sg_io *sksgio)
1497{
1498        struct skd_special_context *skspcl = NULL;
1499        int rc;
1500
1501        for (;;) {
1502                ulong flags;
1503
1504                spin_lock_irqsave(&skdev->lock, flags);
1505                skspcl = skdev->skspcl_free_list;
1506                if (skspcl != NULL) {
1507                        skdev->skspcl_free_list =
1508                                (struct skd_special_context *)skspcl->req.next;
1509                        skspcl->req.id += SKD_ID_INCR;
1510                        skspcl->req.state = SKD_REQ_STATE_SETUP;
1511                        skspcl->orphaned = 0;
1512                        skspcl->req.n_sg = 0;
1513                }
1514                spin_unlock_irqrestore(&skdev->lock, flags);
1515
1516                if (skspcl != NULL) {
1517                        rc = 0;
1518                        break;
1519                }
1520
1521                pr_debug("%s:%s:%d blocking\n",
1522                         skdev->name, __func__, __LINE__);
1523
1524                rc = wait_event_interruptible_timeout(
1525                                skdev->waitq,
1526                                (skdev->skspcl_free_list != NULL),
1527                                msecs_to_jiffies(sksgio->sg.timeout));
1528
1529                pr_debug("%s:%s:%d unblocking, rc=%d\n",
1530                         skdev->name, __func__, __LINE__, rc);
1531
1532                if (rc <= 0) {
1533                        if (rc == 0)
1534                                rc = -ETIMEDOUT;
1535                        else
1536                                rc = -EINTR;
1537                        break;
1538                }
1539                /*
1540                 * If we get here rc > 0 meaning the timeout to
1541                 * wait_event_interruptible_timeout() had time left, hence the
1542                 * sought event -- non-empty free list -- happened.
1543                 * Retry the allocation.
1544                 */
1545        }
1546        sksgio->skspcl = skspcl;
1547
1548        return rc;
1549}
1550
1551static int skd_skreq_prep_buffering(struct skd_device *skdev,
1552                                    struct skd_request_context *skreq,
1553                                    u32 dxfer_len)
1554{
1555        u32 resid = dxfer_len;
1556
1557        /*
1558         * The DMA engine must have aligned addresses and byte counts.
1559         */
1560        resid += (-resid) & 3;
1561        skreq->sg_byte_count = resid;
1562
1563        skreq->n_sg = 0;
1564
1565        while (resid > 0) {
1566                u32 nbytes = PAGE_SIZE;
1567                u32 ix = skreq->n_sg;
1568                struct scatterlist *sg = &skreq->sg[ix];
1569                struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1570                struct page *page;
1571
1572                if (nbytes > resid)
1573                        nbytes = resid;
1574
1575                page = alloc_page(GFP_KERNEL);
1576                if (page == NULL)
1577                        return -ENOMEM;
1578
1579                sg_set_page(sg, page, nbytes, 0);
1580
1581                /* TODO: This should be going through a pci_???()
1582                 * routine to do proper mapping. */
1583                sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1584                sksg->byte_count = nbytes;
1585
1586                sksg->host_side_addr = sg_phys(sg);
1587
1588                sksg->dev_side_addr = 0;
1589                sksg->next_desc_ptr = skreq->sksg_dma_address +
1590                                      (ix + 1) * sizeof(*sksg);
1591
1592                skreq->n_sg++;
1593                resid -= nbytes;
1594        }
1595
1596        if (skreq->n_sg > 0) {
1597                u32 ix = skreq->n_sg - 1;
1598                struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1599
1600                sksg->control = FIT_SGD_CONTROL_LAST;
1601                sksg->next_desc_ptr = 0;
1602        }
1603
1604        if (unlikely(skdev->dbg_level > 1)) {
1605                u32 i;
1606
1607                pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1608                         skdev->name, __func__, __LINE__,
1609                         skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1610                for (i = 0; i < skreq->n_sg; i++) {
1611                        struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1612
1613                        pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
1614                                 "addr=0x%llx next=0x%llx\n",
1615                                 skdev->name, __func__, __LINE__,
1616                                 i, sgd->byte_count, sgd->control,
1617                                 sgd->host_side_addr, sgd->next_desc_ptr);
1618                }
1619        }
1620
1621        return 0;
1622}
1623
1624static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1625                                    struct skd_sg_io *sksgio)
1626{
1627        struct skd_special_context *skspcl = sksgio->skspcl;
1628        struct skd_request_context *skreq = &skspcl->req;
1629        u32 dxfer_len = sksgio->dxfer_len;
1630        int rc;
1631
1632        rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1633        /*
1634         * Eventually, errors or not, skd_release_special() is called
1635         * to recover allocations including partial allocations.
1636         */
1637        return rc;
1638}
1639
1640static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1641                                 struct skd_sg_io *sksgio, int dxfer_dir)
1642{
1643        struct skd_special_context *skspcl = sksgio->skspcl;
1644        u32 iov_ix = 0;
1645        struct sg_iovec curiov;
1646        u32 sksg_ix = 0;
1647        u8 *bufp = NULL;
1648        u32 buf_len = 0;
1649        u32 resid = sksgio->dxfer_len;
1650        int rc;
1651
1652        curiov.iov_len = 0;
1653        curiov.iov_base = NULL;
1654
1655        if (dxfer_dir != sksgio->sg.dxfer_direction) {
1656                if (dxfer_dir != SG_DXFER_TO_DEV ||
1657                    sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1658                        return 0;
1659        }
1660
1661        while (resid > 0) {
1662                u32 nbytes = PAGE_SIZE;
1663
1664                if (curiov.iov_len == 0) {
1665                        curiov = sksgio->iov[iov_ix++];
1666                        continue;
1667                }
1668
1669                if (buf_len == 0) {
1670                        struct page *page;
1671                        page = sg_page(&skspcl->req.sg[sksg_ix++]);
1672                        bufp = page_address(page);
1673                        buf_len = PAGE_SIZE;
1674                }
1675
1676                nbytes = min_t(u32, nbytes, resid);
1677                nbytes = min_t(u32, nbytes, curiov.iov_len);
1678                nbytes = min_t(u32, nbytes, buf_len);
1679
1680                if (dxfer_dir == SG_DXFER_TO_DEV)
1681                        rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1682                else
1683                        rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1684
1685                if (rc)
1686                        return -EFAULT;
1687
1688                resid -= nbytes;
1689                curiov.iov_len -= nbytes;
1690                curiov.iov_base += nbytes;
1691                buf_len -= nbytes;
1692        }
1693
1694        return 0;
1695}
1696
1697static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1698                                 struct skd_sg_io *sksgio)
1699{
1700        struct skd_special_context *skspcl = sksgio->skspcl;
1701        struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
1702        struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
1703
1704        memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1705
1706        /* Initialize the FIT msg header */
1707        fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1708        fmh->num_protocol_cmds_coalesced = 1;
1709
1710        /* Initialize the SCSI request */
1711        if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1712                scsi_req->hdr.sg_list_dma_address =
1713                        cpu_to_be64(skspcl->req.sksg_dma_address);
1714        scsi_req->hdr.tag = skspcl->req.id;
1715        scsi_req->hdr.sg_list_len_bytes =
1716                cpu_to_be32(skspcl->req.sg_byte_count);
1717        memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1718
1719        skspcl->req.state = SKD_REQ_STATE_BUSY;
1720        skd_send_special_fitmsg(skdev, skspcl);
1721
1722        return 0;
1723}
1724
1725static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1726{
1727        unsigned long flags;
1728        int rc;
1729
1730        rc = wait_event_interruptible_timeout(skdev->waitq,
1731                                              (sksgio->skspcl->req.state !=
1732                                               SKD_REQ_STATE_BUSY),
1733                                              msecs_to_jiffies(sksgio->sg.
1734                                                               timeout));
1735
1736        spin_lock_irqsave(&skdev->lock, flags);
1737
1738        if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
1739                pr_debug("%s:%s:%d skspcl %p aborted\n",
1740                         skdev->name, __func__, __LINE__, sksgio->skspcl);
1741
1742                /* Build check cond, sense and let command finish. */
1743                /* For a timeout, we must fabricate completion and sense
1744                 * data to complete the command */
1745                sksgio->skspcl->req.completion.status =
1746                        SAM_STAT_CHECK_CONDITION;
1747
1748                memset(&sksgio->skspcl->req.err_info, 0,
1749                       sizeof(sksgio->skspcl->req.err_info));
1750                sksgio->skspcl->req.err_info.type = 0x70;
1751                sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1752                sksgio->skspcl->req.err_info.code = 0x44;
1753                sksgio->skspcl->req.err_info.qual = 0;
1754                rc = 0;
1755        } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1756                /* No longer on the adapter. We finish. */
1757                rc = 0;
1758        else {
1759                /* Something's gone wrong. Still busy. Timeout or
1760                 * user interrupted (control-C). Mark as an orphan
1761                 * so it will be disposed when completed. */
1762                sksgio->skspcl->orphaned = 1;
1763                sksgio->skspcl = NULL;
1764                if (rc == 0) {
1765                        pr_debug("%s:%s:%d timed out %p (%u ms)\n",
1766                                 skdev->name, __func__, __LINE__,
1767                                 sksgio, sksgio->sg.timeout);
1768                        rc = -ETIMEDOUT;
1769                } else {
1770                        pr_debug("%s:%s:%d cntlc %p\n",
1771                                 skdev->name, __func__, __LINE__, sksgio);
1772                        rc = -EINTR;
1773                }
1774        }
1775
1776        spin_unlock_irqrestore(&skdev->lock, flags);
1777
1778        return rc;
1779}
1780
1781static int skd_sg_io_put_status(struct skd_device *skdev,
1782                                struct skd_sg_io *sksgio)
1783{
1784        struct sg_io_hdr *sgp = &sksgio->sg;
1785        struct skd_special_context *skspcl = sksgio->skspcl;
1786        int resid = 0;
1787
1788        u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1789
1790        sgp->status = skspcl->req.completion.status;
1791        resid = sksgio->dxfer_len - nb;
1792
1793        sgp->masked_status = sgp->status & STATUS_MASK;
1794        sgp->msg_status = 0;
1795        sgp->host_status = 0;
1796        sgp->driver_status = 0;
1797        sgp->resid = resid;
1798        if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1799                sgp->info |= SG_INFO_CHECK;
1800
1801        pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
1802                 skdev->name, __func__, __LINE__,
1803                 sgp->status, sgp->masked_status, sgp->resid);
1804
1805        if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1806                if (sgp->mx_sb_len > 0) {
1807                        struct fit_comp_error_info *ei = &skspcl->req.err_info;
1808                        u32 nbytes = sizeof(*ei);
1809
1810                        nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1811
1812                        sgp->sb_len_wr = nbytes;
1813
1814                        if (__copy_to_user(sgp->sbp, ei, nbytes)) {
1815                                pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
1816                                         skdev->name, __func__, __LINE__,
1817                                         sgp->sbp);
1818                                return -EFAULT;
1819                        }
1820                }
1821        }
1822
1823        if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
1824                pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
1825                         skdev->name, __func__, __LINE__, sksgio->argp);
1826                return -EFAULT;
1827        }
1828
1829        return 0;
1830}
1831
1832static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1833                                    struct skd_sg_io *sksgio)
1834{
1835        struct skd_special_context *skspcl = sksgio->skspcl;
1836
1837        if (skspcl != NULL) {
1838                ulong flags;
1839
1840                sksgio->skspcl = NULL;
1841
1842                spin_lock_irqsave(&skdev->lock, flags);
1843                skd_release_special(skdev, skspcl);
1844                spin_unlock_irqrestore(&skdev->lock, flags);
1845        }
1846
1847        return 0;
1848}
1849
1850/*
1851 *****************************************************************************
1852 * INTERNAL REQUESTS -- generated by driver itself
1853 *****************************************************************************
1854 */
1855
1856static int skd_format_internal_skspcl(struct skd_device *skdev)
1857{
1858        struct skd_special_context *skspcl = &skdev->internal_skspcl;
1859        struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1860        struct fit_msg_hdr *fmh;
1861        uint64_t dma_address;
1862        struct skd_scsi_request *scsi;
1863
1864        fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
1865        fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1866        fmh->num_protocol_cmds_coalesced = 1;
1867
1868        scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1869        memset(scsi, 0, sizeof(*scsi));
1870        dma_address = skspcl->req.sksg_dma_address;
1871        scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1872        sgd->control = FIT_SGD_CONTROL_LAST;
1873        sgd->byte_count = 0;
1874        sgd->host_side_addr = skspcl->db_dma_address;
1875        sgd->dev_side_addr = 0;
1876        sgd->next_desc_ptr = 0LL;
1877
1878        return 1;
1879}
1880
1881#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1882
1883static void skd_send_internal_skspcl(struct skd_device *skdev,
1884                                     struct skd_special_context *skspcl,
1885                                     u8 opcode)
1886{
1887        struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1888        struct skd_scsi_request *scsi;
1889        unsigned char *buf = skspcl->data_buf;
1890        int i;
1891
1892        if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1893                /*
1894                 * A refresh is already in progress.
1895                 * Just wait for it to finish.
1896                 */
1897                return;
1898
1899        SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1900        skspcl->req.state = SKD_REQ_STATE_BUSY;
1901        skspcl->req.id += SKD_ID_INCR;
1902
1903        scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1904        scsi->hdr.tag = skspcl->req.id;
1905
1906        memset(scsi->cdb, 0, sizeof(scsi->cdb));
1907
1908        switch (opcode) {
1909        case TEST_UNIT_READY:
1910                scsi->cdb[0] = TEST_UNIT_READY;
1911                sgd->byte_count = 0;
1912                scsi->hdr.sg_list_len_bytes = 0;
1913                break;
1914
1915        case READ_CAPACITY:
1916                scsi->cdb[0] = READ_CAPACITY;
1917                sgd->byte_count = SKD_N_READ_CAP_BYTES;
1918                scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1919                break;
1920
1921        case INQUIRY:
1922                scsi->cdb[0] = INQUIRY;
1923                scsi->cdb[1] = 0x01;    /* evpd */
1924                scsi->cdb[2] = 0x80;    /* serial number page */
1925                scsi->cdb[4] = 0x10;
1926                sgd->byte_count = 16;
1927                scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1928                break;
1929
1930        case SYNCHRONIZE_CACHE:
1931                scsi->cdb[0] = SYNCHRONIZE_CACHE;
1932                sgd->byte_count = 0;
1933                scsi->hdr.sg_list_len_bytes = 0;
1934                break;
1935
1936        case WRITE_BUFFER:
1937                scsi->cdb[0] = WRITE_BUFFER;
1938                scsi->cdb[1] = 0x02;
1939                scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1940                scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1941                sgd->byte_count = WR_BUF_SIZE;
1942                scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1943                /* fill incrementing byte pattern */
1944                for (i = 0; i < sgd->byte_count; i++)
1945                        buf[i] = i & 0xFF;
1946                break;
1947
1948        case READ_BUFFER:
1949                scsi->cdb[0] = READ_BUFFER;
1950                scsi->cdb[1] = 0x02;
1951                scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1952                scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1953                sgd->byte_count = WR_BUF_SIZE;
1954                scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1955                memset(skspcl->data_buf, 0, sgd->byte_count);
1956                break;
1957
1958        default:
1959                SKD_ASSERT("Don't know what to send");
1960                return;
1961
1962        }
1963        skd_send_special_fitmsg(skdev, skspcl);
1964}
1965
1966static void skd_refresh_device_data(struct skd_device *skdev)
1967{
1968        struct skd_special_context *skspcl = &skdev->internal_skspcl;
1969
1970        skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1971}
1972
1973static int skd_chk_read_buf(struct skd_device *skdev,
1974                            struct skd_special_context *skspcl)
1975{
1976        unsigned char *buf = skspcl->data_buf;
1977        int i;
1978
1979        /* check for incrementing byte pattern */
1980        for (i = 0; i < WR_BUF_SIZE; i++)
1981                if (buf[i] != (i & 0xFF))
1982                        return 1;
1983
1984        return 0;
1985}
1986
1987static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1988                                 u8 code, u8 qual, u8 fruc)
1989{
1990        /* If the check condition is of special interest, log a message */
1991        if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1992            && (code == 0x04) && (qual == 0x06)) {
1993                pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
1994                       "ascq/fruc %02x/%02x/%02x/%02x\n",
1995                       skd_name(skdev), key, code, qual, fruc);
1996        }
1997}
1998
1999static void skd_complete_internal(struct skd_device *skdev,
2000                                  volatile struct fit_completion_entry_v1
2001                                  *skcomp,
2002                                  volatile struct fit_comp_error_info *skerr,
2003                                  struct skd_special_context *skspcl)
2004{
2005        u8 *buf = skspcl->data_buf;
2006        u8 status;
2007        int i;
2008        struct skd_scsi_request *scsi =
2009                (struct skd_scsi_request *)&skspcl->msg_buf[64];
2010
2011        SKD_ASSERT(skspcl == &skdev->internal_skspcl);
2012
2013        pr_debug("%s:%s:%d complete internal %x\n",
2014                 skdev->name, __func__, __LINE__, scsi->cdb[0]);
2015
2016        skspcl->req.completion = *skcomp;
2017        skspcl->req.state = SKD_REQ_STATE_IDLE;
2018        skspcl->req.id += SKD_ID_INCR;
2019
2020        status = skspcl->req.completion.status;
2021
2022        skd_log_check_status(skdev, status, skerr->key, skerr->code,
2023                             skerr->qual, skerr->fruc);
2024
2025        switch (scsi->cdb[0]) {
2026        case TEST_UNIT_READY:
2027                if (status == SAM_STAT_GOOD)
2028                        skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
2029                else if ((status == SAM_STAT_CHECK_CONDITION) &&
2030                         (skerr->key == MEDIUM_ERROR))
2031                        skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
2032                else {
2033                        if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2034                                pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
2035                                         skdev->name, __func__, __LINE__,
2036                                         skdev->state);
2037                                return;
2038                        }
2039                        pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
2040                                 skdev->name, __func__, __LINE__);
2041                        skd_send_internal_skspcl(skdev, skspcl, 0x00);
2042                }
2043                break;
2044
2045        case WRITE_BUFFER:
2046                if (status == SAM_STAT_GOOD)
2047                        skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
2048                else {
2049                        if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2050                                pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
2051                                         skdev->name, __func__, __LINE__,
2052                                         skdev->state);
2053                                return;
2054                        }
2055                        pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
2056                                 skdev->name, __func__, __LINE__);
2057                        skd_send_internal_skspcl(skdev, skspcl, 0x00);
2058                }
2059                break;
2060
2061        case READ_BUFFER:
2062                if (status == SAM_STAT_GOOD) {
2063                        if (skd_chk_read_buf(skdev, skspcl) == 0)
2064                                skd_send_internal_skspcl(skdev, skspcl,
2065                                                         READ_CAPACITY);
2066                        else {
2067                                pr_err(
2068                                       "(%s):*** W/R Buffer mismatch %d ***\n",
2069                                       skd_name(skdev), skdev->connect_retries);
2070                                if (skdev->connect_retries <
2071                                    SKD_MAX_CONNECT_RETRIES) {
2072                                        skdev->connect_retries++;
2073                                        skd_soft_reset(skdev);
2074                                } else {
2075                                        pr_err(
2076                                               "(%s): W/R Buffer Connect Error\n",
2077                                               skd_name(skdev));
2078                                        return;
2079                                }
2080                        }
2081
2082                } else {
2083                        if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2084                                pr_debug("%s:%s:%d "
2085                                         "read buffer failed, don't send anymore state 0x%x\n",
2086                                         skdev->name, __func__, __LINE__,
2087                                         skdev->state);
2088                                return;
2089                        }
2090                        pr_debug("%s:%s:%d "
2091                                 "**** read buffer failed, retry skerr\n",
2092                                 skdev->name, __func__, __LINE__);
2093                        skd_send_internal_skspcl(skdev, skspcl, 0x00);
2094                }
2095                break;
2096
2097        case READ_CAPACITY:
2098                skdev->read_cap_is_valid = 0;
2099                if (status == SAM_STAT_GOOD) {
2100                        skdev->read_cap_last_lba =
2101                                (buf[0] << 24) | (buf[1] << 16) |
2102                                (buf[2] << 8) | buf[3];
2103                        skdev->read_cap_blocksize =
2104                                (buf[4] << 24) | (buf[5] << 16) |
2105                                (buf[6] << 8) | buf[7];
2106
2107                        pr_debug("%s:%s:%d last lba %d, bs %d\n",
2108                                 skdev->name, __func__, __LINE__,
2109                                 skdev->read_cap_last_lba,
2110                                 skdev->read_cap_blocksize);
2111
2112                        set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2113
2114                        skdev->read_cap_is_valid = 1;
2115
2116                        skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2117                } else if ((status == SAM_STAT_CHECK_CONDITION) &&
2118                           (skerr->key == MEDIUM_ERROR)) {
2119                        skdev->read_cap_last_lba = ~0;
2120                        set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2121                        pr_debug("%s:%s:%d "
2122                                 "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
2123                                 skdev->name, __func__, __LINE__);
2124                        skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2125                } else {
2126                        pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
2127                                 skdev->name, __func__, __LINE__);
2128                        skd_send_internal_skspcl(skdev, skspcl,
2129                                                 TEST_UNIT_READY);
2130                }
2131                break;
2132
2133        case INQUIRY:
2134                skdev->inquiry_is_valid = 0;
2135                if (status == SAM_STAT_GOOD) {
2136                        skdev->inquiry_is_valid = 1;
2137
2138                        for (i = 0; i < 12; i++)
2139                                skdev->inq_serial_num[i] = buf[i + 4];
2140                        skdev->inq_serial_num[12] = 0;
2141                }
2142
2143                if (skd_unquiesce_dev(skdev) < 0)
2144                        pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
2145                                 skdev->name, __func__, __LINE__);
2146                 /* connection is complete */
2147                skdev->connect_retries = 0;
2148                break;
2149
2150        case SYNCHRONIZE_CACHE:
2151                if (status == SAM_STAT_GOOD)
2152                        skdev->sync_done = 1;
2153                else
2154                        skdev->sync_done = -1;
2155                wake_up_interruptible(&skdev->waitq);
2156                break;
2157
2158        default:
2159                SKD_ASSERT("we didn't send this");
2160        }
2161}
2162
2163/*
2164 *****************************************************************************
2165 * FIT MESSAGES
2166 *****************************************************************************
2167 */
2168
2169static void skd_send_fitmsg(struct skd_device *skdev,
2170                            struct skd_fitmsg_context *skmsg)
2171{
2172        u64 qcmd;
2173        struct fit_msg_hdr *fmh;
2174
2175        pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
2176                 skdev->name, __func__, __LINE__,
2177                 skmsg->mb_dma_address, skdev->in_flight);
2178        pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
2179                 skdev->name, __func__, __LINE__,
2180                 skmsg->msg_buf, skmsg->offset);
2181
2182        qcmd = skmsg->mb_dma_address;
2183        qcmd |= FIT_QCMD_QID_NORMAL;
2184
2185        fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2186        skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2187
2188        if (unlikely(skdev->dbg_level > 1)) {
2189                u8 *bp = (u8 *)skmsg->msg_buf;
2190                int i;
2191                for (i = 0; i < skmsg->length; i += 8) {
2192                        pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
2193                                 "%02x %02x %02x %02x\n",
2194                                 skdev->name, __func__, __LINE__,
2195                                 i, bp[i + 0], bp[i + 1], bp[i + 2],
2196                                 bp[i + 3], bp[i + 4], bp[i + 5],
2197                                 bp[i + 6], bp[i + 7]);
2198                        if (i == 0)
2199                                i = 64 - 8;
2200                }
2201        }
2202
2203        if (skmsg->length > 256)
2204                qcmd |= FIT_QCMD_MSGSIZE_512;
2205        else if (skmsg->length > 128)
2206                qcmd |= FIT_QCMD_MSGSIZE_256;
2207        else if (skmsg->length > 64)
2208                qcmd |= FIT_QCMD_MSGSIZE_128;
2209        else
2210                /*
2211                 * This makes no sense because the FIT msg header is
2212                 * 64 bytes. If the msg is only 64 bytes long it has
2213                 * no payload.
2214                 */
2215                qcmd |= FIT_QCMD_MSGSIZE_64;
2216
2217        SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2218
2219}
2220
2221static void skd_send_special_fitmsg(struct skd_device *skdev,
2222                                    struct skd_special_context *skspcl)
2223{
2224        u64 qcmd;
2225
2226        if (unlikely(skdev->dbg_level > 1)) {
2227                u8 *bp = (u8 *)skspcl->msg_buf;
2228                int i;
2229
2230                for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2231                        pr_debug("%s:%s:%d  spcl[%2d] %02x %02x %02x %02x  "
2232                                 "%02x %02x %02x %02x\n",
2233                                 skdev->name, __func__, __LINE__, i,
2234                                 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
2235                                 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
2236                        if (i == 0)
2237                                i = 64 - 8;
2238                }
2239
2240                pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2241                         skdev->name, __func__, __LINE__,
2242                         skspcl, skspcl->req.id, skspcl->req.sksg_list,
2243                         skspcl->req.sksg_dma_address);
2244                for (i = 0; i < skspcl->req.n_sg; i++) {
2245                        struct fit_sg_descriptor *sgd =
2246                                &skspcl->req.sksg_list[i];
2247
2248                        pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
2249                                 "addr=0x%llx next=0x%llx\n",
2250                                 skdev->name, __func__, __LINE__,
2251                                 i, sgd->byte_count, sgd->control,
2252                                 sgd->host_side_addr, sgd->next_desc_ptr);
2253                }
2254        }
2255
2256        /*
2257         * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2258         * and one 64-byte SSDI command.
2259         */
2260        qcmd = skspcl->mb_dma_address;
2261        qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2262
2263        SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2264}
2265
2266/*
2267 *****************************************************************************
2268 * COMPLETION QUEUE
2269 *****************************************************************************
2270 */
2271
2272static void skd_complete_other(struct skd_device *skdev,
2273                               volatile struct fit_completion_entry_v1 *skcomp,
2274                               volatile struct fit_comp_error_info *skerr);
2275
2276struct sns_info {
2277        u8 type;
2278        u8 stat;
2279        u8 key;
2280        u8 asc;
2281        u8 ascq;
2282        u8 mask;
2283        enum skd_check_status_action action;
2284};
2285
2286static struct sns_info skd_chkstat_table[] = {
2287        /* Good */
2288        { 0x70, 0x02, RECOVERED_ERROR, 0,    0,    0x1c,
2289          SKD_CHECK_STATUS_REPORT_GOOD },
2290
2291        /* Smart alerts */
2292        { 0x70, 0x02, NO_SENSE,        0x0B, 0x00, 0x1E,        /* warnings */
2293          SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2294        { 0x70, 0x02, NO_SENSE,        0x5D, 0x00, 0x1E,        /* thresholds */
2295          SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2296        { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F,        /* temperature over trigger */
2297          SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2298
2299        /* Retry (with limits) */
2300        { 0x70, 0x02, 0x0B,            0,    0,    0x1C,        /* This one is for DMA ERROR */
2301          SKD_CHECK_STATUS_REQUEUE_REQUEST },
2302        { 0x70, 0x02, 0x06,            0x0B, 0x00, 0x1E,        /* warnings */
2303          SKD_CHECK_STATUS_REQUEUE_REQUEST },
2304        { 0x70, 0x02, 0x06,            0x5D, 0x00, 0x1E,        /* thresholds */
2305          SKD_CHECK_STATUS_REQUEUE_REQUEST },
2306        { 0x70, 0x02, 0x06,            0x80, 0x30, 0x1F,        /* backup power */
2307          SKD_CHECK_STATUS_REQUEUE_REQUEST },
2308
2309        /* Busy (or about to be) */
2310        { 0x70, 0x02, 0x06,            0x3f, 0x01, 0x1F, /* fw changed */
2311          SKD_CHECK_STATUS_BUSY_IMMINENT },
2312};
2313
2314/*
2315 * Look up status and sense data to decide how to handle the error
2316 * from the device.
2317 * mask says which fields must match e.g., mask=0x18 means check
2318 * type and stat, ignore key, asc, ascq.
2319 */
2320
2321static enum skd_check_status_action
2322skd_check_status(struct skd_device *skdev,
2323                 u8 cmp_status, volatile struct fit_comp_error_info *skerr)
2324{
2325        int i, n;
2326
2327        pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2328               skd_name(skdev), skerr->key, skerr->code, skerr->qual,
2329               skerr->fruc);
2330
2331        pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2332                 skdev->name, __func__, __LINE__, skerr->type, cmp_status,
2333                 skerr->key, skerr->code, skerr->qual, skerr->fruc);
2334
2335        /* Does the info match an entry in the good category? */
2336        n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2337        for (i = 0; i < n; i++) {
2338                struct sns_info *sns = &skd_chkstat_table[i];
2339
2340                if (sns->mask & 0x10)
2341                        if (skerr->type != sns->type)
2342                                continue;
2343
2344                if (sns->mask & 0x08)
2345                        if (cmp_status != sns->stat)
2346                                continue;
2347
2348                if (sns->mask & 0x04)
2349                        if (skerr->key != sns->key)
2350                                continue;
2351
2352                if (sns->mask & 0x02)
2353                        if (skerr->code != sns->asc)
2354                                continue;
2355
2356                if (sns->mask & 0x01)
2357                        if (skerr->qual != sns->ascq)
2358                                continue;
2359
2360                if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2361                        pr_err("(%s): SMART Alert: sense key/asc/ascq "
2362                               "%02x/%02x/%02x\n",
2363                               skd_name(skdev), skerr->key,
2364                               skerr->code, skerr->qual);
2365                }
2366                return sns->action;
2367        }
2368
2369        /* No other match, so nonzero status means error,
2370         * zero status means good
2371         */
2372        if (cmp_status) {
2373                pr_debug("%s:%s:%d status check: error\n",
2374                         skdev->name, __func__, __LINE__);
2375                return SKD_CHECK_STATUS_REPORT_ERROR;
2376        }
2377
2378        pr_debug("%s:%s:%d status check good default\n",
2379                 skdev->name, __func__, __LINE__);
2380        return SKD_CHECK_STATUS_REPORT_GOOD;
2381}
2382
2383static void skd_resolve_req_exception(struct skd_device *skdev,
2384                                      struct skd_request_context *skreq)
2385{
2386        u8 cmp_status = skreq->completion.status;
2387
2388        switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2389        case SKD_CHECK_STATUS_REPORT_GOOD:
2390        case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2391                skd_end_request(skdev, skreq, 0);
2392                break;
2393
2394        case SKD_CHECK_STATUS_BUSY_IMMINENT:
2395                skd_log_skreq(skdev, skreq, "retry(busy)");
2396                blk_requeue_request(skdev->queue, skreq->req);
2397                pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
2398                skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2399                skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2400                skd_quiesce_dev(skdev);
2401                break;
2402
2403        case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2404                if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2405                        skd_log_skreq(skdev, skreq, "retry");
2406                        blk_requeue_request(skdev->queue, skreq->req);
2407                        break;
2408                }
2409        /* fall through to report error */
2410
2411        case SKD_CHECK_STATUS_REPORT_ERROR:
2412        default:
2413                skd_end_request(skdev, skreq, -EIO);
2414                break;
2415        }
2416}
2417
2418/* assume spinlock is already held */
2419static void skd_release_skreq(struct skd_device *skdev,
2420                              struct skd_request_context *skreq)
2421{
2422        u32 msg_slot;
2423        struct skd_fitmsg_context *skmsg;
2424
2425        u32 timo_slot;
2426
2427        /*
2428         * Reclaim the FIT msg buffer if this is
2429         * the first of the requests it carried to
2430         * be completed. The FIT msg buffer used to
2431         * send this request cannot be reused until
2432         * we are sure the s1120 card has copied
2433         * it to its memory. The FIT msg might have
2434         * contained several requests. As soon as
2435         * any of them are completed we know that
2436         * the entire FIT msg was transferred.
2437         * Only the first completed request will
2438         * match the FIT msg buffer id. The FIT
2439         * msg buffer id is immediately updated.
2440         * When subsequent requests complete the FIT
2441         * msg buffer id won't match, so we know
2442         * quite cheaply that it is already done.
2443         */
2444        msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2445        SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2446
2447        skmsg = &skdev->skmsg_table[msg_slot];
2448        if (skmsg->id == skreq->fitmsg_id) {
2449                SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2450                SKD_ASSERT(skmsg->outstanding > 0);
2451                skmsg->outstanding--;
2452                if (skmsg->outstanding == 0) {
2453                        skmsg->state = SKD_MSG_STATE_IDLE;
2454                        skmsg->id += SKD_ID_INCR;
2455                        skmsg->next = skdev->skmsg_free_list;
2456                        skdev->skmsg_free_list = skmsg;
2457                }
2458        }
2459
2460        /*
2461         * Decrease the number of active requests.
2462         * Also decrements the count in the timeout slot.
2463         */
2464        SKD_ASSERT(skdev->in_flight > 0);
2465        skdev->in_flight -= 1;
2466
2467        timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2468        SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2469        skdev->timeout_slot[timo_slot] -= 1;
2470
2471        /*
2472         * Reset backpointer
2473         */
2474        skreq->req = NULL;
2475
2476        /*
2477         * Reclaim the skd_request_context
2478         */
2479        skreq->state = SKD_REQ_STATE_IDLE;
2480        skreq->id += SKD_ID_INCR;
2481        skreq->next = skdev->skreq_free_list;
2482        skdev->skreq_free_list = skreq;
2483}
2484
2485#define DRIVER_INQ_EVPD_PAGE_CODE   0xDA
2486
2487static void skd_do_inq_page_00(struct skd_device *skdev,
2488                               volatile struct fit_completion_entry_v1 *skcomp,
2489                               volatile struct fit_comp_error_info *skerr,
2490                               uint8_t *cdb, uint8_t *buf)
2491{
2492        uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2493
2494        /* Caller requested "supported pages".  The driver needs to insert
2495         * its page.
2496         */
2497        pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
2498                 skdev->name, __func__, __LINE__);
2499
2500        /* If the device rejected the request because the CDB was
2501         * improperly formed, then just leave.
2502         */
2503        if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2504            skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2505                return;
2506
2507        /* Get the amount of space the caller allocated */
2508        max_bytes = (cdb[3] << 8) | cdb[4];
2509
2510        /* Get the number of pages actually returned by the device */
2511        drive_pages = (buf[2] << 8) | buf[3];
2512        drive_bytes = drive_pages + 4;
2513        new_size = drive_pages + 1;
2514
2515        /* Supported pages must be in numerical order, so find where
2516         * the driver page needs to be inserted into the list of
2517         * pages returned by the device.
2518         */
2519        for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2520                if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2521                        return; /* Device using this page code. abort */
2522                else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2523                        break;
2524        }
2525
2526        if (insert_pt < max_bytes) {
2527                uint16_t u;
2528
2529                /* Shift everything up one byte to make room. */
2530                for (u = new_size + 3; u > insert_pt; u--)
2531                        buf[u] = buf[u - 1];
2532                buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2533
2534                /* SCSI byte order increment of num_returned_bytes by 1 */
2535                skcomp->num_returned_bytes =
2536                        be32_to_cpu(skcomp->num_returned_bytes) + 1;
2537                skcomp->num_returned_bytes =
2538                        be32_to_cpu(skcomp->num_returned_bytes);
2539        }
2540
2541        /* update page length field to reflect the driver's page too */
2542        buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2543        buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2544}
2545
2546static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2547{
2548        int pcie_reg;
2549        u16 pci_bus_speed;
2550        u8 pci_lanes;
2551
2552        pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2553        if (pcie_reg) {
2554                u16 linksta;
2555                pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2556
2557                pci_bus_speed = linksta & 0xF;
2558                pci_lanes = (linksta & 0x3F0) >> 4;
2559        } else {
2560                *speed = STEC_LINK_UNKNOWN;
2561                *width = 0xFF;
2562                return;
2563        }
2564
2565        switch (pci_bus_speed) {
2566        case 1:
2567                *speed = STEC_LINK_2_5GTS;
2568                break;
2569        case 2:
2570                *speed = STEC_LINK_5GTS;
2571                break;
2572        case 3:
2573                *speed = STEC_LINK_8GTS;
2574                break;
2575        default:
2576                *speed = STEC_LINK_UNKNOWN;
2577                break;
2578        }
2579
2580        if (pci_lanes <= 0x20)
2581                *width = pci_lanes;
2582        else
2583                *width = 0xFF;
2584}
2585
2586static void skd_do_inq_page_da(struct skd_device *skdev,
2587                               volatile struct fit_completion_entry_v1 *skcomp,
2588                               volatile struct fit_comp_error_info *skerr,
2589                               uint8_t *cdb, uint8_t *buf)
2590{
2591        struct pci_dev *pdev = skdev->pdev;
2592        unsigned max_bytes;
2593        struct driver_inquiry_data inq;
2594        u16 val;
2595
2596        pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
2597                 skdev->name, __func__, __LINE__);
2598
2599        memset(&inq, 0, sizeof(inq));
2600
2601        inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2602
2603        skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
2604        inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
2605        inq.pcie_device_number = PCI_SLOT(pdev->devfn);
2606        inq.pcie_function_number = PCI_FUNC(pdev->devfn);
2607
2608        pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
2609        inq.pcie_vendor_id = cpu_to_be16(val);
2610
2611        pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
2612        inq.pcie_device_id = cpu_to_be16(val);
2613
2614        pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
2615        inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2616
2617        pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
2618        inq.pcie_subsystem_device_id = cpu_to_be16(val);
2619
2620        /* Driver version, fixed lenth, padded with spaces on the right */
2621        inq.driver_version_length = sizeof(inq.driver_version);
2622        memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2623        memcpy(inq.driver_version, DRV_VER_COMPL,
2624               min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2625
2626        inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2627
2628        /* Clear the error set by the device */
2629        skcomp->status = SAM_STAT_GOOD;
2630        memset((void *)skerr, 0, sizeof(*skerr));
2631
2632        /* copy response into output buffer */
2633        max_bytes = (cdb[3] << 8) | cdb[4];
2634        memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2635
2636        skcomp->num_returned_bytes =
2637                be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
2638}
2639
2640static void skd_do_driver_inq(struct skd_device *skdev,
2641                              volatile struct fit_completion_entry_v1 *skcomp,
2642                              volatile struct fit_comp_error_info *skerr,
2643                              uint8_t *cdb, uint8_t *buf)
2644{
2645        if (!buf)
2646                return;
2647        else if (cdb[0] != INQUIRY)
2648                return;         /* Not an INQUIRY */
2649        else if ((cdb[1] & 1) == 0)
2650                return;         /* EVPD not set */
2651        else if (cdb[2] == 0)
2652                /* Need to add driver's page to supported pages list */
2653                skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2654        else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2655                /* Caller requested driver's page */
2656                skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2657}
2658
2659static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2660{
2661        if (!sg)
2662                return NULL;
2663        if (!sg_page(sg))
2664                return NULL;
2665        return sg_virt(sg);
2666}
2667
2668static void skd_process_scsi_inq(struct skd_device *skdev,
2669                                 volatile struct fit_completion_entry_v1
2670                                 *skcomp,
2671                                 volatile struct fit_comp_error_info *skerr,
2672                                 struct skd_special_context *skspcl)
2673{
2674        uint8_t *buf;
2675        struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2676        struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2677
2678        dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2679                            skspcl->req.sg_data_dir);
2680        buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2681
2682        if (buf)
2683                skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2684}
2685
2686
2687static int skd_isr_completion_posted(struct skd_device *skdev,
2688                                        int limit, int *enqueued)
2689{
2690        volatile struct fit_completion_entry_v1 *skcmp = NULL;
2691        volatile struct fit_comp_error_info *skerr;
2692        u16 req_id;
2693        u32 req_slot;
2694        struct skd_request_context *skreq;
2695        u16 cmp_cntxt = 0;
2696        u8 cmp_status = 0;
2697        u8 cmp_cycle = 0;
2698        u32 cmp_bytes = 0;
2699        int rc = 0;
2700        int processed = 0;
2701
2702        for (;; ) {
2703                SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2704
2705                skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2706                cmp_cycle = skcmp->cycle;
2707                cmp_cntxt = skcmp->tag;
2708                cmp_status = skcmp->status;
2709                cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2710
2711                skerr = &skdev->skerr_table[skdev->skcomp_ix];
2712
2713                pr_debug("%s:%s:%d "
2714                         "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
2715                         "busy=%d rbytes=0x%x proto=%d\n",
2716                         skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
2717                         skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
2718                         skdev->in_flight, cmp_bytes, skdev->proto_ver);
2719
2720                if (cmp_cycle != skdev->skcomp_cycle) {
2721                        pr_debug("%s:%s:%d end of completions\n",
2722                                 skdev->name, __func__, __LINE__);
2723                        break;
2724                }
2725                /*
2726                 * Update the completion queue head index and possibly
2727                 * the completion cycle count. 8-bit wrap-around.
2728                 */
2729                skdev->skcomp_ix++;
2730                if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2731                        skdev->skcomp_ix = 0;
2732                        skdev->skcomp_cycle++;
2733                }
2734
2735                /*
2736                 * The command context is a unique 32-bit ID. The low order
2737                 * bits help locate the request. The request is usually a
2738                 * r/w request (see skd_start() above) or a special request.
2739                 */
2740                req_id = cmp_cntxt;
2741                req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2742
2743                /* Is this other than a r/w request? */
2744                if (req_slot >= skdev->num_req_context) {
2745                        /*
2746                         * This is not a completion for a r/w request.
2747                         */
2748                        skd_complete_other(skdev, skcmp, skerr);
2749                        continue;
2750                }
2751
2752                skreq = &skdev->skreq_table[req_slot];
2753
2754                /*
2755                 * Make sure the request ID for the slot matches.
2756                 */
2757                if (skreq->id != req_id) {
2758                        pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
2759                                 skdev->name, __func__, __LINE__,
2760                                 req_id, skreq->id);
2761                        {
2762                                u16 new_id = cmp_cntxt;
2763                                pr_err("(%s): Completion mismatch "
2764                                       "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2765                                       skd_name(skdev), req_id,
2766                                       skreq->id, new_id);
2767
2768                                continue;
2769                        }
2770                }
2771
2772                SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2773
2774                if (skreq->state == SKD_REQ_STATE_ABORTED) {
2775                        pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
2776                                 skdev->name, __func__, __LINE__,
2777                                 skreq, skreq->id);
2778                        /* a previously timed out command can
2779                         * now be cleaned up */
2780                        skd_release_skreq(skdev, skreq);
2781                        continue;
2782                }
2783
2784                skreq->completion = *skcmp;
2785                if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2786                        skreq->err_info = *skerr;
2787                        skd_log_check_status(skdev, cmp_status, skerr->key,
2788                                             skerr->code, skerr->qual,
2789                                             skerr->fruc);
2790                }
2791                /* Release DMA resources for the request. */
2792                if (skreq->n_sg > 0)
2793                        skd_postop_sg_list(skdev, skreq);
2794
2795                if (!skreq->req) {
2796                        pr_debug("%s:%s:%d NULL backptr skdreq %p, "
2797                                 "req=0x%x req_id=0x%x\n",
2798                                 skdev->name, __func__, __LINE__,
2799                                 skreq, skreq->id, req_id);
2800                } else {
2801                        /*
2802                         * Capture the outcome and post it back to the
2803                         * native request.
2804                         */
2805                        if (likely(cmp_status == SAM_STAT_GOOD))
2806                                skd_end_request(skdev, skreq, 0);
2807                        else
2808                                skd_resolve_req_exception(skdev, skreq);
2809                }
2810
2811                /*
2812                 * Release the skreq, its FIT msg (if one), timeout slot,
2813                 * and queue depth.
2814                 */
2815                skd_release_skreq(skdev, skreq);
2816
2817                /* skd_isr_comp_limit equal zero means no limit */
2818                if (limit) {
2819                        if (++processed >= limit) {
2820                                rc = 1;
2821                                break;
2822                        }
2823                }
2824        }
2825
2826        if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2827                && (skdev->in_flight) == 0) {
2828                skdev->state = SKD_DRVR_STATE_PAUSED;
2829                wake_up_interruptible(&skdev->waitq);
2830        }
2831
2832        return rc;
2833}
2834
2835static void skd_complete_other(struct skd_device *skdev,
2836                               volatile struct fit_completion_entry_v1 *skcomp,
2837                               volatile struct fit_comp_error_info *skerr)
2838{
2839        u32 req_id = 0;
2840        u32 req_table;
2841        u32 req_slot;
2842        struct skd_special_context *skspcl;
2843
2844        req_id = skcomp->tag;
2845        req_table = req_id & SKD_ID_TABLE_MASK;
2846        req_slot = req_id & SKD_ID_SLOT_MASK;
2847
2848        pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
2849                 skdev->name, __func__, __LINE__,
2850                 req_table, req_id, req_slot);
2851
2852        /*
2853         * Based on the request id, determine how to dispatch this completion.
2854         * This swich/case is finding the good cases and forwarding the
2855         * completion entry. Errors are reported below the switch.
2856         */
2857        switch (req_table) {
2858        case SKD_ID_RW_REQUEST:
2859                /*
2860                 * The caller, skd_completion_posted_isr() above,
2861                 * handles r/w requests. The only way we get here
2862                 * is if the req_slot is out of bounds.
2863                 */
2864                break;
2865
2866        case SKD_ID_SPECIAL_REQUEST:
2867                /*
2868                 * Make sure the req_slot is in bounds and that the id
2869                 * matches.
2870                 */
2871                if (req_slot < skdev->n_special) {
2872                        skspcl = &skdev->skspcl_table[req_slot];
2873                        if (skspcl->req.id == req_id &&
2874                            skspcl->req.state == SKD_REQ_STATE_BUSY) {
2875                                skd_complete_special(skdev,
2876                                                     skcomp, skerr, skspcl);
2877                                return;
2878                        }
2879                }
2880                break;
2881
2882        case SKD_ID_INTERNAL:
2883                if (req_slot == 0) {
2884                        skspcl = &skdev->internal_skspcl;
2885                        if (skspcl->req.id == req_id &&
2886                            skspcl->req.state == SKD_REQ_STATE_BUSY) {
2887                                skd_complete_internal(skdev,
2888                                                      skcomp, skerr, skspcl);
2889                                return;
2890                        }
2891                }
2892                break;
2893
2894        case SKD_ID_FIT_MSG:
2895                /*
2896                 * These id's should never appear in a completion record.
2897                 */
2898                break;
2899
2900        default:
2901                /*
2902                 * These id's should never appear anywhere;
2903                 */
2904                break;
2905        }
2906
2907        /*
2908         * If we get here it is a bad or stale id.
2909         */
2910}
2911
2912static void skd_complete_special(struct skd_device *skdev,
2913                                 volatile struct fit_completion_entry_v1
2914                                 *skcomp,
2915                                 volatile struct fit_comp_error_info *skerr,
2916                                 struct skd_special_context *skspcl)
2917{
2918        pr_debug("%s:%s:%d  completing special request %p\n",
2919                 skdev->name, __func__, __LINE__, skspcl);
2920        if (skspcl->orphaned) {
2921                /* Discard orphaned request */
2922                /* ?: Can this release directly or does it need
2923                 * to use a worker? */
2924                pr_debug("%s:%s:%d release orphaned %p\n",
2925                         skdev->name, __func__, __LINE__, skspcl);
2926                skd_release_special(skdev, skspcl);
2927                return;
2928        }
2929
2930        skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2931
2932        skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2933        skspcl->req.completion = *skcomp;
2934        skspcl->req.err_info = *skerr;
2935
2936        skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2937                             skerr->code, skerr->qual, skerr->fruc);
2938
2939        wake_up_interruptible(&skdev->waitq);
2940}
2941
2942/* assume spinlock is already held */
2943static void skd_release_special(struct skd_device *skdev,
2944                                struct skd_special_context *skspcl)
2945{
2946        int i, was_depleted;
2947
2948        for (i = 0; i < skspcl->req.n_sg; i++) {
2949                struct page *page = sg_page(&skspcl->req.sg[i]);
2950                __free_page(page);
2951        }
2952
2953        was_depleted = (skdev->skspcl_free_list == NULL);
2954
2955        skspcl->req.state = SKD_REQ_STATE_IDLE;
2956        skspcl->req.id += SKD_ID_INCR;
2957        skspcl->req.next =
2958                (struct skd_request_context *)skdev->skspcl_free_list;
2959        skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2960
2961        if (was_depleted) {
2962                pr_debug("%s:%s:%d skspcl was depleted\n",
2963                         skdev->name, __func__, __LINE__);
2964                /* Free list was depleted. Their might be waiters. */
2965                wake_up_interruptible(&skdev->waitq);
2966        }
2967}
2968
2969static void skd_reset_skcomp(struct skd_device *skdev)
2970{
2971        u32 nbytes;
2972        struct fit_completion_entry_v1 *skcomp;
2973
2974        nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
2975        nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2976
2977        memset(skdev->skcomp_table, 0, nbytes);
2978
2979        skdev->skcomp_ix = 0;
2980        skdev->skcomp_cycle = 1;
2981}
2982
2983/*
2984 *****************************************************************************
2985 * INTERRUPTS
2986 *****************************************************************************
2987 */
2988static void skd_completion_worker(struct work_struct *work)
2989{
2990        struct skd_device *skdev =
2991                container_of(work, struct skd_device, completion_worker);
2992        unsigned long flags;
2993        int flush_enqueued = 0;
2994
2995        spin_lock_irqsave(&skdev->lock, flags);
2996
2997        /*
2998         * pass in limit=0, which means no limit..
2999         * process everything in compq
3000         */
3001        skd_isr_completion_posted(skdev, 0, &flush_enqueued);
3002        skd_request_fn(skdev->queue);
3003
3004        spin_unlock_irqrestore(&skdev->lock, flags);
3005}
3006
3007static void skd_isr_msg_from_dev(struct skd_device *skdev);
3008
3009irqreturn_t
3010static skd_isr(int irq, void *ptr)
3011{
3012        struct skd_device *skdev;
3013        u32 intstat;
3014        u32 ack;
3015        int rc = 0;
3016        int deferred = 0;
3017        int flush_enqueued = 0;
3018
3019        skdev = (struct skd_device *)ptr;
3020        spin_lock(&skdev->lock);
3021
3022        for (;; ) {
3023                intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3024
3025                ack = FIT_INT_DEF_MASK;
3026                ack &= intstat;
3027
3028                pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
3029                         skdev->name, __func__, __LINE__, intstat, ack);
3030
3031                /* As long as there is an int pending on device, keep
3032                 * running loop.  When none, get out, but if we've never
3033                 * done any processing, call completion handler?
3034                 */
3035                if (ack == 0) {
3036                        /* No interrupts on device, but run the completion
3037                         * processor anyway?
3038                         */
3039                        if (rc == 0)
3040                                if (likely (skdev->state
3041                                        == SKD_DRVR_STATE_ONLINE))
3042                                        deferred = 1;
3043                        break;
3044                }
3045
3046                rc = IRQ_HANDLED;
3047
3048                SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
3049
3050                if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
3051                           (skdev->state != SKD_DRVR_STATE_STOPPING))) {
3052                        if (intstat & FIT_ISH_COMPLETION_POSTED) {
3053                                /*
3054                                 * If we have already deferred completion
3055                                 * processing, don't bother running it again
3056                                 */
3057                                if (deferred == 0)
3058                                        deferred =
3059                                                skd_isr_completion_posted(skdev,
3060                                                skd_isr_comp_limit, &flush_enqueued);
3061                        }
3062
3063                        if (intstat & FIT_ISH_FW_STATE_CHANGE) {
3064                                skd_isr_fwstate(skdev);
3065                                if (skdev->state == SKD_DRVR_STATE_FAULT ||
3066                                    skdev->state ==
3067                                    SKD_DRVR_STATE_DISAPPEARED) {
3068                                        spin_unlock(&skdev->lock);
3069                                        return rc;
3070                                }
3071                        }
3072
3073                        if (intstat & FIT_ISH_MSG_FROM_DEV)
3074                                skd_isr_msg_from_dev(skdev);
3075                }
3076        }
3077
3078        if (unlikely(flush_enqueued))
3079                skd_request_fn(skdev->queue);
3080
3081        if (deferred)
3082                schedule_work(&skdev->completion_worker);
3083        else if (!flush_enqueued)
3084                skd_request_fn(skdev->queue);
3085
3086        spin_unlock(&skdev->lock);
3087
3088        return rc;
3089}
3090
3091static void skd_drive_fault(struct skd_device *skdev)
3092{
3093        skdev->state = SKD_DRVR_STATE_FAULT;
3094        pr_err("(%s): Drive FAULT\n", skd_name(skdev));
3095}
3096
3097static void skd_drive_disappeared(struct skd_device *skdev)
3098{
3099        skdev->state = SKD_DRVR_STATE_DISAPPEARED;
3100        pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
3101}
3102
3103static void skd_isr_fwstate(struct skd_device *skdev)
3104{
3105        u32 sense;
3106        u32 state;
3107        u32 mtd;
3108        int prev_driver_state = skdev->state;
3109
3110        sense = SKD_READL(skdev, FIT_STATUS);
3111        state = sense & FIT_SR_DRIVE_STATE_MASK;
3112
3113        pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
3114               skd_name(skdev),
3115               skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3116               skd_drive_state_to_str(state), state);
3117
3118        skdev->drive_state = state;
3119
3120        switch (skdev->drive_state) {
3121        case FIT_SR_DRIVE_INIT:
3122                if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
3123                        skd_disable_interrupts(skdev);
3124                        break;
3125                }
3126                if (skdev->state == SKD_DRVR_STATE_RESTARTING)
3127                        skd_recover_requests(skdev, 0);
3128                if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
3129                        skdev->timer_countdown = SKD_STARTING_TIMO;
3130                        skdev->state = SKD_DRVR_STATE_STARTING;
3131                        skd_soft_reset(skdev);
3132                        break;
3133                }
3134                mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
3135                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3136                skdev->last_mtd = mtd;
3137                break;
3138
3139        case FIT_SR_DRIVE_ONLINE:
3140                skdev->cur_max_queue_depth = skd_max_queue_depth;
3141                if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
3142                        skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3143
3144                skdev->queue_low_water_mark =
3145                        skdev->cur_max_queue_depth * 2 / 3 + 1;
3146                if (skdev->queue_low_water_mark < 1)
3147                        skdev->queue_low_water_mark = 1;
3148                pr_info(
3149                       "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
3150                       skd_name(skdev),
3151                       skdev->cur_max_queue_depth,
3152                       skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3153
3154                skd_refresh_device_data(skdev);
3155                break;
3156
3157        case FIT_SR_DRIVE_BUSY:
3158                skdev->state = SKD_DRVR_STATE_BUSY;
3159                skdev->timer_countdown = SKD_BUSY_TIMO;
3160                skd_quiesce_dev(skdev);
3161                break;
3162        case FIT_SR_DRIVE_BUSY_SANITIZE:
3163                /* set timer for 3 seconds, we'll abort any unfinished
3164                 * commands after that expires
3165                 */
3166                skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3167                skdev->timer_countdown = SKD_TIMER_SECONDS(3);
3168                blk_start_queue(skdev->queue);
3169                break;
3170        case FIT_SR_DRIVE_BUSY_ERASE:
3171                skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3172                skdev->timer_countdown = SKD_BUSY_TIMO;
3173                break;
3174        case FIT_SR_DRIVE_OFFLINE:
3175                skdev->state = SKD_DRVR_STATE_IDLE;
3176                break;
3177        case FIT_SR_DRIVE_SOFT_RESET:
3178                switch (skdev->state) {
3179                case SKD_DRVR_STATE_STARTING:
3180                case SKD_DRVR_STATE_RESTARTING:
3181                        /* Expected by a caller of skd_soft_reset() */
3182                        break;
3183                default:
3184                        skdev->state = SKD_DRVR_STATE_RESTARTING;
3185                        break;
3186                }
3187                break;
3188        case FIT_SR_DRIVE_FW_BOOTING:
3189                pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
3190                         skdev->name, __func__, __LINE__, skdev->name);
3191                skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3192                skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3193                break;
3194
3195        case FIT_SR_DRIVE_DEGRADED:
3196        case FIT_SR_PCIE_LINK_DOWN:
3197        case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3198                break;
3199
3200        case FIT_SR_DRIVE_FAULT:
3201                skd_drive_fault(skdev);
3202                skd_recover_requests(skdev, 0);
3203                blk_start_queue(skdev->queue);
3204                break;
3205
3206        /* PCIe bus returned all Fs? */
3207        case 0xFF:
3208                pr_info("(%s): state=0x%x sense=0x%x\n",
3209                       skd_name(skdev), state, sense);
3210                skd_drive_disappeared(skdev);
3211                skd_recover_requests(skdev, 0);
3212                blk_start_queue(skdev->queue);
3213                break;
3214        default:
3215                /*
3216                 * Uknown FW State. Wait for a state we recognize.
3217                 */
3218                break;
3219        }
3220        pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3221               skd_name(skdev),
3222               skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3223               skd_skdev_state_to_str(skdev->state), skdev->state);
3224}
3225
3226static void skd_recover_requests(struct skd_device *skdev, int requeue)
3227{
3228        int i;
3229
3230        for (i = 0; i < skdev->num_req_context; i++) {
3231                struct skd_request_context *skreq = &skdev->skreq_table[i];
3232
3233                if (skreq->state == SKD_REQ_STATE_BUSY) {
3234                        skd_log_skreq(skdev, skreq, "recover");
3235
3236                        SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3237                        SKD_ASSERT(skreq->req != NULL);
3238
3239                        /* Release DMA resources for the request. */
3240                        if (skreq->n_sg > 0)
3241                                skd_postop_sg_list(skdev, skreq);
3242
3243                        if (requeue &&
3244                            (unsigned long) ++skreq->req->special <
3245                            SKD_MAX_RETRIES)
3246                                blk_requeue_request(skdev->queue, skreq->req);
3247                        else
3248                                skd_end_request(skdev, skreq, -EIO);
3249
3250                        skreq->req = NULL;
3251
3252                        skreq->state = SKD_REQ_STATE_IDLE;
3253                        skreq->id += SKD_ID_INCR;
3254                }
3255                if (i > 0)
3256                        skreq[-1].next = skreq;
3257                skreq->next = NULL;
3258        }
3259        skdev->skreq_free_list = skdev->skreq_table;
3260
3261        for (i = 0; i < skdev->num_fitmsg_context; i++) {
3262                struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3263
3264                if (skmsg->state == SKD_MSG_STATE_BUSY) {
3265                        skd_log_skmsg(skdev, skmsg, "salvaged");
3266                        SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3267                        skmsg->state = SKD_MSG_STATE_IDLE;
3268                        skmsg->id += SKD_ID_INCR;
3269                }
3270                if (i > 0)
3271                        skmsg[-1].next = skmsg;
3272                skmsg->next = NULL;
3273        }
3274        skdev->skmsg_free_list = skdev->skmsg_table;
3275
3276        for (i = 0; i < skdev->n_special; i++) {
3277                struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3278
3279                /* If orphaned, reclaim it because it has already been reported
3280                 * to the process as an error (it was just waiting for
3281                 * a completion that didn't come, and now it will never come)
3282                 * If busy, change to a state that will cause it to error
3283                 * out in the wait routine and let it do the normal
3284                 * reporting and reclaiming
3285                 */
3286                if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3287                        if (skspcl->orphaned) {
3288                                pr_debug("%s:%s:%d orphaned %p\n",
3289                                         skdev->name, __func__, __LINE__,
3290                                         skspcl);
3291                                skd_release_special(skdev, skspcl);
3292                        } else {
3293                                pr_debug("%s:%s:%d not orphaned %p\n",
3294                                         skdev->name, __func__, __LINE__,
3295                                         skspcl);
3296                                skspcl->req.state = SKD_REQ_STATE_ABORTED;
3297                        }
3298                }
3299        }
3300        skdev->skspcl_free_list = skdev->skspcl_table;
3301
3302        for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3303                skdev->timeout_slot[i] = 0;
3304
3305        skdev->in_flight = 0;
3306}
3307
3308static void skd_isr_msg_from_dev(struct skd_device *skdev)
3309{
3310        u32 mfd;
3311        u32 mtd;
3312        u32 data;
3313
3314        mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3315
3316        pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
3317                 skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
3318
3319        /* ignore any mtd that is an ack for something we didn't send */
3320        if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3321                return;
3322
3323        switch (FIT_MXD_TYPE(mfd)) {
3324        case FIT_MTD_FITFW_INIT:
3325                skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3326
3327                if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3328                        pr_err("(%s): protocol mismatch\n",
3329                               skdev->name);
3330                        pr_err("(%s):   got=%d support=%d\n",
3331                               skdev->name, skdev->proto_ver,
3332                               FIT_PROTOCOL_VERSION_1);
3333                        pr_err("(%s):   please upgrade driver\n",
3334                               skdev->name);
3335                        skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3336                        skd_soft_reset(skdev);
3337                        break;
3338                }
3339                mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3340                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3341                skdev->last_mtd = mtd;
3342                break;
3343
3344        case FIT_MTD_GET_CMDQ_DEPTH:
3345                skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3346                mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3347                                   SKD_N_COMPLETION_ENTRY);
3348                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3349                skdev->last_mtd = mtd;
3350                break;
3351
3352        case FIT_MTD_SET_COMPQ_DEPTH:
3353                SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3354                mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3355                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3356                skdev->last_mtd = mtd;
3357                break;
3358
3359        case FIT_MTD_SET_COMPQ_ADDR:
3360                skd_reset_skcomp(skdev);
3361                mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3362                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3363                skdev->last_mtd = mtd;
3364                break;
3365
3366        case FIT_MTD_CMD_LOG_HOST_ID:
3367                skdev->connect_time_stamp = get_seconds();
3368                data = skdev->connect_time_stamp & 0xFFFF;
3369                mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3370                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3371                skdev->last_mtd = mtd;
3372                break;
3373
3374        case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3375                skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3376                data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3377                mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3378                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3379                skdev->last_mtd = mtd;
3380                break;
3381
3382        case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3383                skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3384                mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3385                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3386                skdev->last_mtd = mtd;
3387
3388                pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
3389                       skd_name(skdev),
3390                       skdev->connect_time_stamp, skdev->drive_jiffies);
3391                break;
3392
3393        case FIT_MTD_ARM_QUEUE:
3394                skdev->last_mtd = 0;
3395                /*
3396                 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3397                 */
3398                break;
3399
3400        default:
3401                break;
3402        }
3403}
3404
3405static void skd_disable_interrupts(struct skd_device *skdev)
3406{
3407        u32 sense;
3408
3409        sense = SKD_READL(skdev, FIT_CONTROL);
3410        sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3411        SKD_WRITEL(skdev, sense, FIT_CONTROL);
3412        pr_debug("%s:%s:%d sense 0x%x\n",
3413                 skdev->name, __func__, __LINE__, sense);
3414
3415        /* Note that the 1s is written. A 1-bit means
3416         * disable, a 0 means enable.
3417         */
3418        SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3419}
3420
3421static void skd_enable_interrupts(struct skd_device *skdev)
3422{
3423        u32 val;
3424
3425        /* unmask interrupts first */
3426        val = FIT_ISH_FW_STATE_CHANGE +
3427              FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3428
3429        /* Note that the compliment of mask is written. A 1-bit means
3430         * disable, a 0 means enable. */
3431        SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
3432        pr_debug("%s:%s:%d interrupt mask=0x%x\n",
3433                 skdev->name, __func__, __LINE__, ~val);
3434
3435        val = SKD_READL(skdev, FIT_CONTROL);
3436        val |= FIT_CR_ENABLE_INTERRUPTS;
3437        pr_debug("%s:%s:%d control=0x%x\n",
3438                 skdev->name, __func__, __LINE__, val);
3439        SKD_WRITEL(skdev, val, FIT_CONTROL);
3440}
3441
3442/*
3443 *****************************************************************************
3444 * START, STOP, RESTART, QUIESCE, UNQUIESCE
3445 *****************************************************************************
3446 */
3447
3448static void skd_soft_reset(struct skd_device *skdev)
3449{
3450        u32 val;
3451
3452        val = SKD_READL(skdev, FIT_CONTROL);
3453        val |= (FIT_CR_SOFT_RESET);
3454        pr_debug("%s:%s:%d control=0x%x\n",
3455                 skdev->name, __func__, __LINE__, val);
3456        SKD_WRITEL(skdev, val, FIT_CONTROL);
3457}
3458
3459static void skd_start_device(struct skd_device *skdev)
3460{
3461        unsigned long flags;
3462        u32 sense;
3463        u32 state;
3464
3465        spin_lock_irqsave(&skdev->lock, flags);
3466
3467        /* ack all ghost interrupts */
3468        SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3469
3470        sense = SKD_READL(skdev, FIT_STATUS);
3471
3472        pr_debug("%s:%s:%d initial status=0x%x\n",
3473                 skdev->name, __func__, __LINE__, sense);
3474
3475        state = sense & FIT_SR_DRIVE_STATE_MASK;
3476        skdev->drive_state = state;
3477        skdev->last_mtd = 0;
3478
3479        skdev->state = SKD_DRVR_STATE_STARTING;
3480        skdev->timer_countdown = SKD_STARTING_TIMO;
3481
3482        skd_enable_interrupts(skdev);
3483
3484        switch (skdev->drive_state) {
3485        case FIT_SR_DRIVE_OFFLINE:
3486                pr_err("(%s): Drive offline...\n", skd_name(skdev));
3487                break;
3488
3489        case FIT_SR_DRIVE_FW_BOOTING:
3490                pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
3491                         skdev->name, __func__, __LINE__, skdev->name);
3492                skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3493                skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3494                break;
3495
3496        case FIT_SR_DRIVE_BUSY_SANITIZE:
3497                pr_info("(%s): Start: BUSY_SANITIZE\n",
3498                       skd_name(skdev));
3499                skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3500                skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3501                break;
3502
3503        case FIT_SR_DRIVE_BUSY_ERASE:
3504                pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
3505                skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3506                skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3507                break;
3508
3509        case FIT_SR_DRIVE_INIT:
3510        case FIT_SR_DRIVE_ONLINE:
3511                skd_soft_reset(skdev);
3512                break;
3513
3514        case FIT_SR_DRIVE_BUSY:
3515                pr_err("(%s): Drive Busy...\n", skd_name(skdev));
3516                skdev->state = SKD_DRVR_STATE_BUSY;
3517                skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3518                break;
3519
3520        case FIT_SR_DRIVE_SOFT_RESET:
3521                pr_err("(%s) drive soft reset in prog\n",
3522                       skd_name(skdev));
3523                break;
3524
3525        case FIT_SR_DRIVE_FAULT:
3526                /* Fault state is bad...soft reset won't do it...
3527                 * Hard reset, maybe, but does it work on device?
3528                 * For now, just fault so the system doesn't hang.
3529                 */
3530                skd_drive_fault(skdev);
3531                /*start the queue so we can respond with error to requests */
3532                pr_debug("%s:%s:%d starting %s queue\n",
3533                         skdev->name, __func__, __LINE__, skdev->name);
3534                blk_start_queue(skdev->queue);
3535                skdev->gendisk_on = -1;
3536                wake_up_interruptible(&skdev->waitq);
3537                break;
3538
3539        case 0xFF:
3540                /* Most likely the device isn't there or isn't responding
3541                 * to the BAR1 addresses. */
3542                skd_drive_disappeared(skdev);
3543                /*start the queue so we can respond with error to requests */
3544                pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
3545                         skdev->name, __func__, __LINE__, skdev->name);
3546                blk_start_queue(skdev->queue);
3547                skdev->gendisk_on = -1;
3548                wake_up_interruptible(&skdev->waitq);
3549                break;
3550
3551        default:
3552                pr_err("(%s) Start: unknown state %x\n",
3553                       skd_name(skdev), skdev->drive_state);
3554                break;
3555        }
3556
3557        state = SKD_READL(skdev, FIT_CONTROL);
3558        pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
3559                 skdev->name, __func__, __LINE__, state);
3560
3561        state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3562        pr_debug("%s:%s:%d Intr Status=0x%x\n",
3563                 skdev->name, __func__, __LINE__, state);
3564
3565        state = SKD_READL(skdev, FIT_INT_MASK_HOST);
3566        pr_debug("%s:%s:%d Intr Mask=0x%x\n",
3567                 skdev->name, __func__, __LINE__, state);
3568
3569        state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3570        pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
3571                 skdev->name, __func__, __LINE__, state);
3572
3573        state = SKD_READL(skdev, FIT_HW_VERSION);
3574        pr_debug("%s:%s:%d HW version=0x%x\n",
3575                 skdev->name, __func__, __LINE__, state);
3576
3577        spin_unlock_irqrestore(&skdev->lock, flags);
3578}
3579
3580static void skd_stop_device(struct skd_device *skdev)
3581{
3582        unsigned long flags;
3583        struct skd_special_context *skspcl = &skdev->internal_skspcl;
3584        u32 dev_state;
3585        int i;
3586
3587        spin_lock_irqsave(&skdev->lock, flags);
3588
3589        if (skdev->state != SKD_DRVR_STATE_ONLINE) {
3590                pr_err("(%s): skd_stop_device not online no sync\n",
3591                       skd_name(skdev));
3592                goto stop_out;
3593        }
3594
3595        if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
3596                pr_err("(%s): skd_stop_device no special\n",
3597                       skd_name(skdev));
3598                goto stop_out;
3599        }
3600
3601        skdev->state = SKD_DRVR_STATE_SYNCING;
3602        skdev->sync_done = 0;
3603
3604        skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3605
3606        spin_unlock_irqrestore(&skdev->lock, flags);
3607
3608        wait_event_interruptible_timeout(skdev->waitq,
3609                                         (skdev->sync_done), (10 * HZ));
3610
3611        spin_lock_irqsave(&skdev->lock, flags);
3612
3613        switch (skdev->sync_done) {
3614        case 0:
3615                pr_err("(%s): skd_stop_device no sync\n",
3616                       skd_name(skdev));
3617                break;
3618        case 1:
3619                pr_err("(%s): skd_stop_device sync done\n",
3620                       skd_name(skdev));
3621                break;
3622        default:
3623                pr_err("(%s): skd_stop_device sync error\n",
3624                       skd_name(skdev));
3625        }
3626
3627stop_out:
3628        skdev->state = SKD_DRVR_STATE_STOPPING;
3629        spin_unlock_irqrestore(&skdev->lock, flags);
3630
3631        skd_kill_timer(skdev);
3632
3633        spin_lock_irqsave(&skdev->lock, flags);
3634        skd_disable_interrupts(skdev);
3635
3636        /* ensure all ints on device are cleared */
3637        /* soft reset the device to unload with a clean slate */
3638        SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3639        SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3640
3641        spin_unlock_irqrestore(&skdev->lock, flags);
3642
3643        /* poll every 100ms, 1 second timeout */
3644        for (i = 0; i < 10; i++) {
3645                dev_state =
3646                        SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3647                if (dev_state == FIT_SR_DRIVE_INIT)
3648                        break;
3649                set_current_state(TASK_INTERRUPTIBLE);
3650                schedule_timeout(msecs_to_jiffies(100));
3651        }
3652
3653        if (dev_state != FIT_SR_DRIVE_INIT)
3654                pr_err("(%s): skd_stop_device state error 0x%02x\n",
3655                       skd_name(skdev), dev_state);
3656}
3657
3658/* assume spinlock is held */
3659static void skd_restart_device(struct skd_device *skdev)
3660{
3661        u32 state;
3662
3663        /* ack all ghost interrupts */
3664        SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3665
3666        state = SKD_READL(skdev, FIT_STATUS);
3667
3668        pr_debug("%s:%s:%d drive status=0x%x\n",
3669                 skdev->name, __func__, __LINE__, state);
3670
3671        state &= FIT_SR_DRIVE_STATE_MASK;
3672        skdev->drive_state = state;
3673        skdev->last_mtd = 0;
3674
3675        skdev->state = SKD_DRVR_STATE_RESTARTING;
3676        skdev->timer_countdown = SKD_RESTARTING_TIMO;
3677
3678        skd_soft_reset(skdev);
3679}
3680
3681/* assume spinlock is held */
3682static int skd_quiesce_dev(struct skd_device *skdev)
3683{
3684        int rc = 0;
3685
3686        switch (skdev->state) {
3687        case SKD_DRVR_STATE_BUSY:
3688        case SKD_DRVR_STATE_BUSY_IMMINENT:
3689                pr_debug("%s:%s:%d stopping %s queue\n",
3690                         skdev->name, __func__, __LINE__, skdev->name);
3691                blk_stop_queue(skdev->queue);
3692                break;
3693        case SKD_DRVR_STATE_ONLINE:
3694        case SKD_DRVR_STATE_STOPPING:
3695        case SKD_DRVR_STATE_SYNCING:
3696        case SKD_DRVR_STATE_PAUSING:
3697        case SKD_DRVR_STATE_PAUSED:
3698        case SKD_DRVR_STATE_STARTING:
3699        case SKD_DRVR_STATE_RESTARTING:
3700        case SKD_DRVR_STATE_RESUMING:
3701        default:
3702                rc = -EINVAL;
3703                pr_debug("%s:%s:%d state [%d] not implemented\n",
3704                         skdev->name, __func__, __LINE__, skdev->state);
3705        }
3706        return rc;
3707}
3708
3709/* assume spinlock is held */
3710static int skd_unquiesce_dev(struct skd_device *skdev)
3711{
3712        int prev_driver_state = skdev->state;
3713
3714        skd_log_skdev(skdev, "unquiesce");
3715        if (skdev->state == SKD_DRVR_STATE_ONLINE) {
3716                pr_debug("%s:%s:%d **** device already ONLINE\n",
3717                         skdev->name, __func__, __LINE__);
3718                return 0;
3719        }
3720        if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3721                /*
3722                 * If there has been an state change to other than
3723                 * ONLINE, we will rely on controller state change
3724                 * to come back online and restart the queue.
3725                 * The BUSY state means that driver is ready to
3726                 * continue normal processing but waiting for controller
3727                 * to become available.
3728                 */
3729                skdev->state = SKD_DRVR_STATE_BUSY;
3730                pr_debug("%s:%s:%d drive BUSY state\n",
3731                         skdev->name, __func__, __LINE__);
3732                return 0;
3733        }
3734
3735        /*
3736         * Drive has just come online, driver is either in startup,
3737         * paused performing a task, or bust waiting for hardware.
3738         */
3739        switch (skdev->state) {
3740        case SKD_DRVR_STATE_PAUSED:
3741        case SKD_DRVR_STATE_BUSY:
3742        case SKD_DRVR_STATE_BUSY_IMMINENT:
3743        case SKD_DRVR_STATE_BUSY_ERASE:
3744        case SKD_DRVR_STATE_STARTING:
3745        case SKD_DRVR_STATE_RESTARTING:
3746        case SKD_DRVR_STATE_FAULT:
3747        case SKD_DRVR_STATE_IDLE:
3748        case SKD_DRVR_STATE_LOAD:
3749                skdev->state = SKD_DRVR_STATE_ONLINE;
3750                pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3751                       skd_name(skdev),
3752                       skd_skdev_state_to_str(prev_driver_state),
3753                       prev_driver_state, skd_skdev_state_to_str(skdev->state),
3754                       skdev->state);
3755                pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
3756                         skdev->name, __func__, __LINE__);
3757                pr_debug("%s:%s:%d starting %s queue\n",
3758                         skdev->name, __func__, __LINE__, skdev->name);
3759                pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
3760                blk_start_queue(skdev->queue);
3761                skdev->gendisk_on = 1;
3762                wake_up_interruptible(&skdev->waitq);
3763                break;
3764
3765        case SKD_DRVR_STATE_DISAPPEARED:
3766        default:
3767                pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
3768                         skdev->name, __func__, __LINE__,
3769                         skdev->state);
3770                return -EBUSY;
3771        }
3772        return 0;
3773}
3774
3775/*
3776 *****************************************************************************
3777 * PCIe MSI/MSI-X INTERRUPT HANDLERS
3778 *****************************************************************************
3779 */
3780
3781static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3782{
3783        struct skd_device *skdev = skd_host_data;
3784        unsigned long flags;
3785
3786        spin_lock_irqsave(&skdev->lock, flags);
3787        pr_debug("%s:%s:%d MSIX = 0x%x\n",
3788                 skdev->name, __func__, __LINE__,
3789                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3790        pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
3791               irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
3792        SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3793        spin_unlock_irqrestore(&skdev->lock, flags);
3794        return IRQ_HANDLED;
3795}
3796
3797static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3798{
3799        struct skd_device *skdev = skd_host_data;
3800        unsigned long flags;
3801
3802        spin_lock_irqsave(&skdev->lock, flags);
3803        pr_debug("%s:%s:%d MSIX = 0x%x\n",
3804                 skdev->name, __func__, __LINE__,
3805                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3806        SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3807        skd_isr_fwstate(skdev);
3808        spin_unlock_irqrestore(&skdev->lock, flags);
3809        return IRQ_HANDLED;
3810}
3811
3812static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3813{
3814        struct skd_device *skdev = skd_host_data;
3815        unsigned long flags;
3816        int flush_enqueued = 0;
3817        int deferred;
3818
3819        spin_lock_irqsave(&skdev->lock, flags);
3820        pr_debug("%s:%s:%d MSIX = 0x%x\n",
3821                 skdev->name, __func__, __LINE__,
3822                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3823        SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3824        deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3825                                                &flush_enqueued);
3826        if (flush_enqueued)
3827                skd_request_fn(skdev->queue);
3828
3829        if (deferred)
3830                schedule_work(&skdev->completion_worker);
3831        else if (!flush_enqueued)
3832                skd_request_fn(skdev->queue);
3833
3834        spin_unlock_irqrestore(&skdev->lock, flags);
3835
3836        return IRQ_HANDLED;
3837}
3838
3839static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3840{
3841        struct skd_device *skdev = skd_host_data;
3842        unsigned long flags;
3843
3844        spin_lock_irqsave(&skdev->lock, flags);
3845        pr_debug("%s:%s:%d MSIX = 0x%x\n",
3846                 skdev->name, __func__, __LINE__,
3847                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3848        SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3849        skd_isr_msg_from_dev(skdev);
3850        spin_unlock_irqrestore(&skdev->lock, flags);
3851        return IRQ_HANDLED;
3852}
3853
3854static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3855{
3856        struct skd_device *skdev = skd_host_data;
3857        unsigned long flags;
3858
3859        spin_lock_irqsave(&skdev->lock, flags);
3860        pr_debug("%s:%s:%d MSIX = 0x%x\n",
3861                 skdev->name, __func__, __LINE__,
3862                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3863        SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3864        spin_unlock_irqrestore(&skdev->lock, flags);
3865        return IRQ_HANDLED;
3866}
3867
3868/*
3869 *****************************************************************************
3870 * PCIe MSI/MSI-X SETUP
3871 *****************************************************************************
3872 */
3873
3874struct skd_msix_entry {
3875        int have_irq;
3876        u32 vector;
3877        u32 entry;
3878        struct skd_device *rsp;
3879        char isr_name[30];
3880};
3881
3882struct skd_init_msix_entry {
3883        const char *name;
3884        irq_handler_t handler;
3885};
3886
3887#define SKD_MAX_MSIX_COUNT              13
3888#define SKD_MIN_MSIX_COUNT              7
3889#define SKD_BASE_MSIX_IRQ               4
3890
3891static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3892        { "(DMA 0)",        skd_reserved_isr },
3893        { "(DMA 1)",        skd_reserved_isr },
3894        { "(DMA 2)",        skd_reserved_isr },
3895        { "(DMA 3)",        skd_reserved_isr },
3896        { "(State Change)", skd_statec_isr   },
3897        { "(COMPL_Q)",      skd_comp_q       },
3898        { "(MSG)",          skd_msg_isr      },
3899        { "(Reserved)",     skd_reserved_isr },
3900        { "(Reserved)",     skd_reserved_isr },
3901        { "(Queue Full 0)", skd_qfull_isr    },
3902        { "(Queue Full 1)", skd_qfull_isr    },
3903        { "(Queue Full 2)", skd_qfull_isr    },
3904        { "(Queue Full 3)", skd_qfull_isr    },
3905};
3906
3907static void skd_release_msix(struct skd_device *skdev)
3908{
3909        struct skd_msix_entry *qentry;
3910        int i;
3911
3912        if (skdev->msix_entries) {
3913                for (i = 0; i < skdev->msix_count; i++) {
3914                        qentry = &skdev->msix_entries[i];
3915                        skdev = qentry->rsp;
3916
3917                        if (qentry->have_irq)
3918                                devm_free_irq(&skdev->pdev->dev,
3919                                              qentry->vector, qentry->rsp);
3920                }
3921
3922                kfree(skdev->msix_entries);
3923        }
3924
3925        if (skdev->msix_count)
3926                pci_disable_msix(skdev->pdev);
3927
3928        skdev->msix_count = 0;
3929        skdev->msix_entries = NULL;
3930}
3931
3932static int skd_acquire_msix(struct skd_device *skdev)
3933{
3934        int i, rc;
3935        struct pci_dev *pdev = skdev->pdev;
3936        struct msix_entry *entries;
3937        struct skd_msix_entry *qentry;
3938
3939        entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
3940                          GFP_KERNEL);
3941        if (!entries)
3942                return -ENOMEM;
3943
3944        for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
3945                entries[i].entry = i;
3946
3947        rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT);
3948        if (rc) {
3949                pr_err("(%s): failed to enable MSI-X %d\n",
3950                       skd_name(skdev), rc);
3951                goto msix_out;
3952        }
3953
3954        skdev->msix_count = SKD_MAX_MSIX_COUNT;
3955        skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
3956                                      skdev->msix_count, GFP_KERNEL);
3957        if (!skdev->msix_entries) {
3958                rc = -ENOMEM;
3959                pr_err("(%s): msix table allocation error\n",
3960                       skd_name(skdev));
3961                goto msix_out;
3962        }
3963
3964        for (i = 0; i < skdev->msix_count; i++) {
3965                qentry = &skdev->msix_entries[i];
3966                qentry->vector = entries[i].vector;
3967                qentry->entry = entries[i].entry;
3968                qentry->rsp = NULL;
3969                qentry->have_irq = 0;
3970                pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
3971                         skdev->name, __func__, __LINE__,
3972                         pci_name(pdev), skdev->name,
3973                         i, qentry->vector, qentry->entry);
3974        }
3975
3976        /* Enable MSI-X vectors for the base queue */
3977        for (i = 0; i < skdev->msix_count; i++) {
3978                qentry = &skdev->msix_entries[i];
3979                snprintf(qentry->isr_name, sizeof(qentry->isr_name),
3980                         "%s%d-msix %s", DRV_NAME, skdev->devno,
3981                         msix_entries[i].name);
3982                rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
3983                                      msix_entries[i].handler, 0,
3984                                      qentry->isr_name, skdev);
3985                if (rc) {
3986                        pr_err("(%s): Unable to register(%d) MSI-X "
3987                               "handler %d: %s\n",
3988                               skd_name(skdev), rc, i, qentry->isr_name);
3989                        goto msix_out;
3990                } else {
3991                        qentry->have_irq = 1;
3992                        qentry->rsp = skdev;
3993                }
3994        }
3995        pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
3996                 skdev->name, __func__, __LINE__,
3997                 pci_name(pdev), skdev->name, skdev->msix_count);
3998        return 0;
3999
4000msix_out:
4001        if (entries)
4002                kfree(entries);
4003        skd_release_msix(skdev);
4004        return rc;
4005}
4006
4007static int skd_acquire_irq(struct skd_device *skdev)
4008{
4009        int rc;
4010        struct pci_dev *pdev;
4011
4012        pdev = skdev->pdev;
4013        skdev->msix_count = 0;
4014
4015RETRY_IRQ_TYPE:
4016        switch (skdev->irq_type) {
4017        case SKD_IRQ_MSIX:
4018                rc = skd_acquire_msix(skdev);
4019                if (!rc)
4020                        pr_info("(%s): MSI-X %d irqs enabled\n",
4021                               skd_name(skdev), skdev->msix_count);
4022                else {
4023                        pr_err(
4024                               "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
4025                               skd_name(skdev), rc);
4026                        skdev->irq_type = SKD_IRQ_MSI;
4027                        goto RETRY_IRQ_TYPE;
4028                }
4029                break;
4030        case SKD_IRQ_MSI:
4031                snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
4032                         DRV_NAME, skdev->devno);
4033                rc = pci_enable_msi_range(pdev, 1, 1);
4034                if (rc > 0) {
4035                        rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
4036                                              skdev->isr_name, skdev);
4037                        if (rc) {
4038                                pci_disable_msi(pdev);
4039                                pr_err(
4040                                       "(%s): failed to allocate the MSI interrupt %d\n",
4041                                       skd_name(skdev), rc);
4042                                goto RETRY_IRQ_LEGACY;
4043                        }
4044                        pr_info("(%s): MSI irq %d enabled\n",
4045                               skd_name(skdev), pdev->irq);
4046                } else {
4047RETRY_IRQ_LEGACY:
4048                        pr_err(
4049                               "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
4050                               skd_name(skdev), rc);
4051                        skdev->irq_type = SKD_IRQ_LEGACY;
4052                        goto RETRY_IRQ_TYPE;
4053                }
4054                break;
4055        case SKD_IRQ_LEGACY:
4056                snprintf(skdev->isr_name, sizeof(skdev->isr_name),
4057                         "%s%d-legacy", DRV_NAME, skdev->devno);
4058                rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
4059                                      IRQF_SHARED, skdev->isr_name, skdev);
4060                if (!rc)
4061                        pr_info("(%s): LEGACY irq %d enabled\n",
4062                               skd_name(skdev), pdev->irq);
4063                else
4064                        pr_err("(%s): request LEGACY irq error %d\n",
4065                               skd_name(skdev), rc);
4066                break;
4067        default:
4068                pr_info("(%s): irq_type %d invalid, re-set to %d\n",
4069                       skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
4070                skdev->irq_type = SKD_IRQ_LEGACY;
4071                goto RETRY_IRQ_TYPE;
4072        }
4073        return rc;
4074}
4075
4076static void skd_release_irq(struct skd_device *skdev)
4077{
4078        switch (skdev->irq_type) {
4079        case SKD_IRQ_MSIX:
4080                skd_release_msix(skdev);
4081                break;
4082        case SKD_IRQ_MSI:
4083                devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4084                pci_disable_msi(skdev->pdev);
4085                break;
4086        case SKD_IRQ_LEGACY:
4087                devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4088                break;
4089        default:
4090                pr_err("(%s): wrong irq type %d!",
4091                       skd_name(skdev), skdev->irq_type);
4092                break;
4093        }
4094}
4095
4096/*
4097 *****************************************************************************
4098 * CONSTRUCT
4099 *****************************************************************************
4100 */
4101
4102static int skd_cons_skcomp(struct skd_device *skdev)
4103{
4104        int rc = 0;
4105        struct fit_completion_entry_v1 *skcomp;
4106        u32 nbytes;
4107
4108        nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
4109        nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
4110
4111        pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
4112                 skdev->name, __func__, __LINE__,
4113                 nbytes, SKD_N_COMPLETION_ENTRY);
4114
4115        skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
4116                                       &skdev->cq_dma_address);
4117
4118        if (skcomp == NULL) {
4119                rc = -ENOMEM;
4120                goto err_out;
4121        }
4122
4123        skdev->skcomp_table = skcomp;
4124        skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
4125                                                           sizeof(*skcomp) *
4126                                                           SKD_N_COMPLETION_ENTRY);
4127
4128err_out:
4129        return rc;
4130}
4131
4132static int skd_cons_skmsg(struct skd_device *skdev)
4133{
4134        int rc = 0;
4135        u32 i;
4136
4137        pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
4138                 skdev->name, __func__, __LINE__,
4139                 sizeof(struct skd_fitmsg_context),
4140                 skdev->num_fitmsg_context,
4141                 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
4142
4143        skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
4144                                     *skdev->num_fitmsg_context, GFP_KERNEL);
4145        if (skdev->skmsg_table == NULL) {
4146                rc = -ENOMEM;
4147                goto err_out;
4148        }
4149
4150        for (i = 0; i < skdev->num_fitmsg_context; i++) {
4151                struct skd_fitmsg_context *skmsg;
4152
4153                skmsg = &skdev->skmsg_table[i];
4154
4155                skmsg->id = i + SKD_ID_FIT_MSG;
4156
4157                skmsg->state = SKD_MSG_STATE_IDLE;
4158                skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
4159                                                      SKD_N_FITMSG_BYTES + 64,
4160                                                      &skmsg->mb_dma_address);
4161
4162                if (skmsg->msg_buf == NULL) {
4163                        rc = -ENOMEM;
4164                        goto err_out;
4165                }
4166
4167                skmsg->offset = (u32)((u64)skmsg->msg_buf &
4168                                      (~FIT_QCMD_BASE_ADDRESS_MASK));
4169                skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
4170                skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
4171                                       FIT_QCMD_BASE_ADDRESS_MASK);
4172                skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
4173                skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
4174                memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
4175
4176                skmsg->next = &skmsg[1];
4177        }
4178
4179        /* Free list is in order starting with the 0th entry. */
4180        skdev->skmsg_table[i - 1].next = NULL;
4181        skdev->skmsg_free_list = skdev->skmsg_table;
4182
4183err_out:
4184        return rc;
4185}
4186
4187static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4188                                                  u32 n_sg,
4189                                                  dma_addr_t *ret_dma_addr)
4190{
4191        struct fit_sg_descriptor *sg_list;
4192        u32 nbytes;
4193
4194        nbytes = sizeof(*sg_list) * n_sg;
4195
4196        sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
4197
4198        if (sg_list != NULL) {
4199                uint64_t dma_address = *ret_dma_addr;
4200                u32 i;
4201
4202                memset(sg_list, 0, nbytes);
4203
4204                for (i = 0; i < n_sg - 1; i++) {
4205                        uint64_t ndp_off;
4206                        ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
4207
4208                        sg_list[i].next_desc_ptr = dma_address + ndp_off;
4209                }
4210                sg_list[i].next_desc_ptr = 0LL;
4211        }
4212
4213        return sg_list;
4214}
4215
4216static int skd_cons_skreq(struct skd_device *skdev)
4217{
4218        int rc = 0;
4219        u32 i;
4220
4221        pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
4222                 skdev->name, __func__, __LINE__,
4223                 sizeof(struct skd_request_context),
4224                 skdev->num_req_context,
4225                 sizeof(struct skd_request_context) * skdev->num_req_context);
4226
4227        skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
4228                                     * skdev->num_req_context, GFP_KERNEL);
4229        if (skdev->skreq_table == NULL) {
4230                rc = -ENOMEM;
4231                goto err_out;
4232        }
4233
4234        pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
4235                 skdev->name, __func__, __LINE__,
4236                 skdev->sgs_per_request, sizeof(struct scatterlist),
4237                 skdev->sgs_per_request * sizeof(struct scatterlist));
4238
4239        for (i = 0; i < skdev->num_req_context; i++) {
4240                struct skd_request_context *skreq;
4241
4242                skreq = &skdev->skreq_table[i];
4243
4244                skreq->id = i + SKD_ID_RW_REQUEST;
4245                skreq->state = SKD_REQ_STATE_IDLE;
4246
4247                skreq->sg = kzalloc(sizeof(struct scatterlist) *
4248                                    skdev->sgs_per_request, GFP_KERNEL);
4249                if (skreq->sg == NULL) {
4250                        rc = -ENOMEM;
4251                        goto err_out;
4252                }
4253                sg_init_table(skreq->sg, skdev->sgs_per_request);
4254
4255                skreq->sksg_list = skd_cons_sg_list(skdev,
4256                                                    skdev->sgs_per_request,
4257                                                    &skreq->sksg_dma_address);
4258
4259                if (skreq->sksg_list == NULL) {
4260                        rc = -ENOMEM;
4261                        goto err_out;
4262                }
4263
4264                skreq->next = &skreq[1];
4265        }
4266
4267        /* Free list is in order starting with the 0th entry. */
4268        skdev->skreq_table[i - 1].next = NULL;
4269        skdev->skreq_free_list = skdev->skreq_table;
4270
4271err_out:
4272        return rc;
4273}
4274
4275static int skd_cons_skspcl(struct skd_device *skdev)
4276{
4277        int rc = 0;
4278        u32 i, nbytes;
4279
4280        pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4281                 skdev->name, __func__, __LINE__,
4282                 sizeof(struct skd_special_context),
4283                 skdev->n_special,
4284                 sizeof(struct skd_special_context) * skdev->n_special);
4285
4286        skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4287                                      * skdev->n_special, GFP_KERNEL);
4288        if (skdev->skspcl_table == NULL) {
4289                rc = -ENOMEM;
4290                goto err_out;
4291        }
4292
4293        for (i = 0; i < skdev->n_special; i++) {
4294                struct skd_special_context *skspcl;
4295
4296                skspcl = &skdev->skspcl_table[i];
4297
4298                skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4299                skspcl->req.state = SKD_REQ_STATE_IDLE;
4300
4301                skspcl->req.next = &skspcl[1].req;
4302
4303                nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4304
4305                skspcl->msg_buf =
4306                        pci_zalloc_consistent(skdev->pdev, nbytes,
4307                                              &skspcl->mb_dma_address);
4308                if (skspcl->msg_buf == NULL) {
4309                        rc = -ENOMEM;
4310                        goto err_out;
4311                }
4312
4313                skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4314                                         SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4315                if (skspcl->req.sg == NULL) {
4316                        rc = -ENOMEM;
4317                        goto err_out;
4318                }
4319
4320                skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4321                                                         SKD_N_SG_PER_SPECIAL,
4322                                                         &skspcl->req.
4323                                                         sksg_dma_address);
4324                if (skspcl->req.sksg_list == NULL) {
4325                        rc = -ENOMEM;
4326                        goto err_out;
4327                }
4328        }
4329
4330        /* Free list is in order starting with the 0th entry. */
4331        skdev->skspcl_table[i - 1].req.next = NULL;
4332        skdev->skspcl_free_list = skdev->skspcl_table;
4333
4334        return rc;
4335
4336err_out:
4337        return rc;
4338}
4339
4340static int skd_cons_sksb(struct skd_device *skdev)
4341{
4342        int rc = 0;
4343        struct skd_special_context *skspcl;
4344        u32 nbytes;
4345
4346        skspcl = &skdev->internal_skspcl;
4347
4348        skspcl->req.id = 0 + SKD_ID_INTERNAL;
4349        skspcl->req.state = SKD_REQ_STATE_IDLE;
4350
4351        nbytes = SKD_N_INTERNAL_BYTES;
4352
4353        skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4354                                                 &skspcl->db_dma_address);
4355        if (skspcl->data_buf == NULL) {
4356                rc = -ENOMEM;
4357                goto err_out;
4358        }
4359
4360        nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4361        skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4362                                                &skspcl->mb_dma_address);
4363        if (skspcl->msg_buf == NULL) {
4364                rc = -ENOMEM;
4365                goto err_out;
4366        }
4367
4368        skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4369                                                 &skspcl->req.sksg_dma_address);
4370        if (skspcl->req.sksg_list == NULL) {
4371                rc = -ENOMEM;
4372                goto err_out;
4373        }
4374
4375        if (!skd_format_internal_skspcl(skdev)) {
4376                rc = -EINVAL;
4377                goto err_out;
4378        }
4379
4380err_out:
4381        return rc;
4382}
4383
4384static int skd_cons_disk(struct skd_device *skdev)
4385{
4386        int rc = 0;
4387        struct gendisk *disk;
4388        struct request_queue *q;
4389        unsigned long flags;
4390
4391        disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4392        if (!disk) {
4393                rc = -ENOMEM;
4394                goto err_out;
4395        }
4396
4397        skdev->disk = disk;
4398        sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4399
4400        disk->major = skdev->major;
4401        disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4402        disk->fops = &skd_blockdev_ops;
4403        disk->private_data = skdev;
4404
4405        q = blk_init_queue(skd_request_fn, &skdev->lock);
4406        if (!q) {
4407                rc = -ENOMEM;
4408                goto err_out;
4409        }
4410
4411        skdev->queue = q;
4412        disk->queue = q;
4413        q->queuedata = skdev;
4414
4415        blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
4416        blk_queue_max_segments(q, skdev->sgs_per_request);
4417        blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4418
4419        /* set sysfs ptimal_io_size to 8K */
4420        blk_queue_io_opt(q, 8192);
4421
4422        /* DISCARD Flag initialization. */
4423        q->limits.discard_granularity = 8192;
4424        q->limits.discard_alignment = 0;
4425        blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
4426        q->limits.discard_zeroes_data = 1;
4427        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4428        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4429        queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
4430
4431        spin_lock_irqsave(&skdev->lock, flags);
4432        pr_debug("%s:%s:%d stopping %s queue\n",
4433                 skdev->name, __func__, __LINE__, skdev->name);
4434        blk_stop_queue(skdev->queue);
4435        spin_unlock_irqrestore(&skdev->lock, flags);
4436
4437err_out:
4438        return rc;
4439}
4440
4441#define SKD_N_DEV_TABLE         16u
4442static u32 skd_next_devno;
4443
4444static struct skd_device *skd_construct(struct pci_dev *pdev)
4445{
4446        struct skd_device *skdev;
4447        int blk_major = skd_major;
4448        int rc;
4449
4450        skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4451
4452        if (!skdev) {
4453                pr_err(PFX "(%s): memory alloc failure\n",
4454                       pci_name(pdev));
4455                return NULL;
4456        }
4457
4458        skdev->state = SKD_DRVR_STATE_LOAD;
4459        skdev->pdev = pdev;
4460        skdev->devno = skd_next_devno++;
4461        skdev->major = blk_major;
4462        skdev->irq_type = skd_isr_type;
4463        sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
4464        skdev->dev_max_queue_depth = 0;
4465
4466        skdev->num_req_context = skd_max_queue_depth;
4467        skdev->num_fitmsg_context = skd_max_queue_depth;
4468        skdev->n_special = skd_max_pass_thru;
4469        skdev->cur_max_queue_depth = 1;
4470        skdev->queue_low_water_mark = 1;
4471        skdev->proto_ver = 99;
4472        skdev->sgs_per_request = skd_sgs_per_request;
4473        skdev->dbg_level = skd_dbg_level;
4474
4475        atomic_set(&skdev->device_count, 0);
4476
4477        spin_lock_init(&skdev->lock);
4478
4479        INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4480
4481        pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4482        rc = skd_cons_skcomp(skdev);
4483        if (rc < 0)
4484                goto err_out;
4485
4486        pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4487        rc = skd_cons_skmsg(skdev);
4488        if (rc < 0)
4489                goto err_out;
4490
4491        pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4492        rc = skd_cons_skreq(skdev);
4493        if (rc < 0)
4494                goto err_out;
4495
4496        pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4497        rc = skd_cons_skspcl(skdev);
4498        if (rc < 0)
4499                goto err_out;
4500
4501        pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4502        rc = skd_cons_sksb(skdev);
4503        if (rc < 0)
4504                goto err_out;
4505
4506        pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4507        rc = skd_cons_disk(skdev);
4508        if (rc < 0)
4509                goto err_out;
4510
4511        pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
4512        return skdev;
4513
4514err_out:
4515        pr_debug("%s:%s:%d construct failed\n",
4516                 skdev->name, __func__, __LINE__);
4517        skd_destruct(skdev);
4518        return NULL;
4519}
4520
4521/*
4522 *****************************************************************************
4523 * DESTRUCT (FREE)
4524 *****************************************************************************
4525 */
4526
4527static void skd_free_skcomp(struct skd_device *skdev)
4528{
4529        if (skdev->skcomp_table != NULL) {
4530                u32 nbytes;
4531
4532                nbytes = sizeof(skdev->skcomp_table[0]) *
4533                         SKD_N_COMPLETION_ENTRY;
4534                pci_free_consistent(skdev->pdev, nbytes,
4535                                    skdev->skcomp_table, skdev->cq_dma_address);
4536        }
4537
4538        skdev->skcomp_table = NULL;
4539        skdev->cq_dma_address = 0;
4540}
4541
4542static void skd_free_skmsg(struct skd_device *skdev)
4543{
4544        u32 i;
4545
4546        if (skdev->skmsg_table == NULL)
4547                return;
4548
4549        for (i = 0; i < skdev->num_fitmsg_context; i++) {
4550                struct skd_fitmsg_context *skmsg;
4551
4552                skmsg = &skdev->skmsg_table[i];
4553
4554                if (skmsg->msg_buf != NULL) {
4555                        skmsg->msg_buf += skmsg->offset;
4556                        skmsg->mb_dma_address += skmsg->offset;
4557                        pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4558                                            skmsg->msg_buf,
4559                                            skmsg->mb_dma_address);
4560                }
4561                skmsg->msg_buf = NULL;
4562                skmsg->mb_dma_address = 0;
4563        }
4564
4565        kfree(skdev->skmsg_table);
4566        skdev->skmsg_table = NULL;
4567}
4568
4569static void skd_free_sg_list(struct skd_device *skdev,
4570                             struct fit_sg_descriptor *sg_list,
4571                             u32 n_sg, dma_addr_t dma_addr)
4572{
4573        if (sg_list != NULL) {
4574                u32 nbytes;
4575
4576                nbytes = sizeof(*sg_list) * n_sg;
4577
4578                pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4579        }
4580}
4581
4582static void skd_free_skreq(struct skd_device *skdev)
4583{
4584        u32 i;
4585
4586        if (skdev->skreq_table == NULL)
4587                return;
4588
4589        for (i = 0; i < skdev->num_req_context; i++) {
4590                struct skd_request_context *skreq;
4591
4592                skreq = &skdev->skreq_table[i];
4593
4594                skd_free_sg_list(skdev, skreq->sksg_list,
4595                                 skdev->sgs_per_request,
4596                                 skreq->sksg_dma_address);
4597
4598                skreq->sksg_list = NULL;
4599                skreq->sksg_dma_address = 0;
4600
4601                kfree(skreq->sg);
4602        }
4603
4604        kfree(skdev->skreq_table);
4605        skdev->skreq_table = NULL;
4606}
4607
4608static void skd_free_skspcl(struct skd_device *skdev)
4609{
4610        u32 i;
4611        u32 nbytes;
4612
4613        if (skdev->skspcl_table == NULL)
4614                return;
4615
4616        for (i = 0; i < skdev->n_special; i++) {
4617                struct skd_special_context *skspcl;
4618
4619                skspcl = &skdev->skspcl_table[i];
4620
4621                if (skspcl->msg_buf != NULL) {
4622                        nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4623                        pci_free_consistent(skdev->pdev, nbytes,
4624                                            skspcl->msg_buf,
4625                                            skspcl->mb_dma_address);
4626                }
4627
4628                skspcl->msg_buf = NULL;
4629                skspcl->mb_dma_address = 0;
4630
4631                skd_free_sg_list(skdev, skspcl->req.sksg_list,
4632                                 SKD_N_SG_PER_SPECIAL,
4633                                 skspcl->req.sksg_dma_address);
4634
4635                skspcl->req.sksg_list = NULL;
4636                skspcl->req.sksg_dma_address = 0;
4637
4638                kfree(skspcl->req.sg);
4639        }
4640
4641        kfree(skdev->skspcl_table);
4642        skdev->skspcl_table = NULL;
4643}
4644
4645static void skd_free_sksb(struct skd_device *skdev)
4646{
4647        struct skd_special_context *skspcl;
4648        u32 nbytes;
4649
4650        skspcl = &skdev->internal_skspcl;
4651
4652        if (skspcl->data_buf != NULL) {
4653                nbytes = SKD_N_INTERNAL_BYTES;
4654
4655                pci_free_consistent(skdev->pdev, nbytes,
4656                                    skspcl->data_buf, skspcl->db_dma_address);
4657        }
4658
4659        skspcl->data_buf = NULL;
4660        skspcl->db_dma_address = 0;
4661
4662        if (skspcl->msg_buf != NULL) {
4663                nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4664                pci_free_consistent(skdev->pdev, nbytes,
4665                                    skspcl->msg_buf, skspcl->mb_dma_address);
4666        }
4667
4668        skspcl->msg_buf = NULL;
4669        skspcl->mb_dma_address = 0;
4670
4671        skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4672                         skspcl->req.sksg_dma_address);
4673
4674        skspcl->req.sksg_list = NULL;
4675        skspcl->req.sksg_dma_address = 0;
4676}
4677
4678static void skd_free_disk(struct skd_device *skdev)
4679{
4680        struct gendisk *disk = skdev->disk;
4681
4682        if (disk != NULL) {
4683                struct request_queue *q = disk->queue;
4684
4685                if (disk->flags & GENHD_FL_UP)
4686                        del_gendisk(disk);
4687                if (q)
4688                        blk_cleanup_queue(q);
4689                put_disk(disk);
4690        }
4691        skdev->disk = NULL;
4692}
4693
4694static void skd_destruct(struct skd_device *skdev)
4695{
4696        if (skdev == NULL)
4697                return;
4698
4699
4700        pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4701        skd_free_disk(skdev);
4702
4703        pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4704        skd_free_sksb(skdev);
4705
4706        pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4707        skd_free_skspcl(skdev);
4708
4709        pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4710        skd_free_skreq(skdev);
4711
4712        pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4713        skd_free_skmsg(skdev);
4714
4715        pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4716        skd_free_skcomp(skdev);
4717
4718        pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
4719        kfree(skdev);
4720}
4721
4722/*
4723 *****************************************************************************
4724 * BLOCK DEVICE (BDEV) GLUE
4725 *****************************************************************************
4726 */
4727
4728static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4729{
4730        struct skd_device *skdev;
4731        u64 capacity;
4732
4733        skdev = bdev->bd_disk->private_data;
4734
4735        pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
4736                 skdev->name, __func__, __LINE__,
4737                 bdev->bd_disk->disk_name, current->comm);
4738
4739        if (skdev->read_cap_is_valid) {
4740                capacity = get_capacity(skdev->disk);
4741                geo->heads = 64;
4742                geo->sectors = 255;
4743                geo->cylinders = (capacity) / (255 * 64);
4744
4745                return 0;
4746        }
4747        return -EIO;
4748}
4749
4750static int skd_bdev_attach(struct skd_device *skdev)
4751{
4752        pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
4753        add_disk(skdev->disk);
4754        return 0;
4755}
4756
4757static const struct block_device_operations skd_blockdev_ops = {
4758        .owner          = THIS_MODULE,
4759        .ioctl          = skd_bdev_ioctl,
4760        .getgeo         = skd_bdev_getgeo,
4761};
4762
4763
4764/*
4765 *****************************************************************************
4766 * PCIe DRIVER GLUE
4767 *****************************************************************************
4768 */
4769
4770static const struct pci_device_id skd_pci_tbl[] = {
4771        { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4772          PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4773        { 0 }                     /* terminate list */
4774};
4775
4776MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4777
4778static char *skd_pci_info(struct skd_device *skdev, char *str)
4779{
4780        int pcie_reg;
4781
4782        strcpy(str, "PCIe (");
4783        pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4784
4785        if (pcie_reg) {
4786
4787                char lwstr[6];
4788                uint16_t pcie_lstat, lspeed, lwidth;
4789
4790                pcie_reg += 0x12;
4791                pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4792                lspeed = pcie_lstat & (0xF);
4793                lwidth = (pcie_lstat & 0x3F0) >> 4;
4794
4795                if (lspeed == 1)
4796                        strcat(str, "2.5GT/s ");
4797                else if (lspeed == 2)
4798                        strcat(str, "5.0GT/s ");
4799                else
4800                        strcat(str, "<unknown> ");
4801                snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4802                strcat(str, lwstr);
4803        }
4804        return str;
4805}
4806
4807static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4808{
4809        int i;
4810        int rc = 0;
4811        char pci_str[32];
4812        struct skd_device *skdev;
4813
4814        pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
4815               DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4816        pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
4817               pci_name(pdev), pdev->vendor, pdev->device);
4818
4819        rc = pci_enable_device(pdev);
4820        if (rc)
4821                return rc;
4822        rc = pci_request_regions(pdev, DRV_NAME);
4823        if (rc)
4824                goto err_out;
4825        rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4826        if (!rc) {
4827                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4828
4829                        pr_err("(%s): consistent DMA mask error %d\n",
4830                               pci_name(pdev), rc);
4831                }
4832        } else {
4833                (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
4834                if (rc) {
4835
4836                        pr_err("(%s): DMA mask error %d\n",
4837                               pci_name(pdev), rc);
4838                        goto err_out_regions;
4839                }
4840        }
4841
4842        if (!skd_major) {
4843                rc = register_blkdev(0, DRV_NAME);
4844                if (rc < 0)
4845                        goto err_out_regions;
4846                BUG_ON(!rc);
4847                skd_major = rc;
4848        }
4849
4850        skdev = skd_construct(pdev);
4851        if (skdev == NULL) {
4852                rc = -ENOMEM;
4853                goto err_out_regions;
4854        }
4855
4856        skd_pci_info(skdev, pci_str);
4857        pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
4858
4859        pci_set_master(pdev);
4860        rc = pci_enable_pcie_error_reporting(pdev);
4861        if (rc) {
4862                pr_err(
4863                       "(%s): bad enable of PCIe error reporting rc=%d\n",
4864                       skd_name(skdev), rc);
4865                skdev->pcie_error_reporting_is_enabled = 0;
4866        } else
4867                skdev->pcie_error_reporting_is_enabled = 1;
4868
4869
4870        pci_set_drvdata(pdev, skdev);
4871
4872        skdev->disk->driverfs_dev = &pdev->dev;
4873
4874        for (i = 0; i < SKD_MAX_BARS; i++) {
4875                skdev->mem_phys[i] = pci_resource_start(pdev, i);
4876                skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4877                skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4878                                            skdev->mem_size[i]);
4879                if (!skdev->mem_map[i]) {
4880                        pr_err("(%s): Unable to map adapter memory!\n",
4881                               skd_name(skdev));
4882                        rc = -ENODEV;
4883                        goto err_out_iounmap;
4884                }
4885                pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4886                         skdev->name, __func__, __LINE__,
4887                         skdev->mem_map[i],
4888                         (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
4889        }
4890
4891        rc = skd_acquire_irq(skdev);
4892        if (rc) {
4893                pr_err("(%s): interrupt resource error %d\n",
4894                       skd_name(skdev), rc);
4895                goto err_out_iounmap;
4896        }
4897
4898        rc = skd_start_timer(skdev);
4899        if (rc)
4900                goto err_out_timer;
4901
4902        init_waitqueue_head(&skdev->waitq);
4903
4904        skd_start_device(skdev);
4905
4906        rc = wait_event_interruptible_timeout(skdev->waitq,
4907                                              (skdev->gendisk_on),
4908                                              (SKD_START_WAIT_SECONDS * HZ));
4909        if (skdev->gendisk_on > 0) {
4910                /* device came on-line after reset */
4911                skd_bdev_attach(skdev);
4912                rc = 0;
4913        } else {
4914                /* we timed out, something is wrong with the device,
4915                   don't add the disk structure */
4916                pr_err(
4917                       "(%s): error: waiting for s1120 timed out %d!\n",
4918                       skd_name(skdev), rc);
4919                /* in case of no error; we timeout with ENXIO */
4920                if (!rc)
4921                        rc = -ENXIO;
4922                goto err_out_timer;
4923        }
4924
4925
4926#ifdef SKD_VMK_POLL_HANDLER
4927        if (skdev->irq_type == SKD_IRQ_MSIX) {
4928                /* MSIX completion handler is being used for coredump */
4929                vmklnx_scsi_register_poll_handler(skdev->scsi_host,
4930                                                  skdev->msix_entries[5].vector,
4931                                                  skd_comp_q, skdev);
4932        } else {
4933                vmklnx_scsi_register_poll_handler(skdev->scsi_host,
4934                                                  skdev->pdev->irq, skd_isr,
4935                                                  skdev);
4936        }
4937#endif  /* SKD_VMK_POLL_HANDLER */
4938
4939        return rc;
4940
4941err_out_timer:
4942        skd_stop_device(skdev);
4943        skd_release_irq(skdev);
4944
4945err_out_iounmap:
4946        for (i = 0; i < SKD_MAX_BARS; i++)
4947                if (skdev->mem_map[i])
4948                        iounmap(skdev->mem_map[i]);
4949
4950        if (skdev->pcie_error_reporting_is_enabled)
4951                pci_disable_pcie_error_reporting(pdev);
4952
4953        skd_destruct(skdev);
4954
4955err_out_regions:
4956        pci_release_regions(pdev);
4957
4958err_out:
4959        pci_disable_device(pdev);
4960        pci_set_drvdata(pdev, NULL);
4961        return rc;
4962}
4963
4964static void skd_pci_remove(struct pci_dev *pdev)
4965{
4966        int i;
4967        struct skd_device *skdev;
4968
4969        skdev = pci_get_drvdata(pdev);
4970        if (!skdev) {
4971                pr_err("%s: no device data for PCI\n", pci_name(pdev));
4972                return;
4973        }
4974        skd_stop_device(skdev);
4975        skd_release_irq(skdev);
4976
4977        for (i = 0; i < SKD_MAX_BARS; i++)
4978                if (skdev->mem_map[i])
4979                        iounmap((u32 *)skdev->mem_map[i]);
4980
4981        if (skdev->pcie_error_reporting_is_enabled)
4982                pci_disable_pcie_error_reporting(pdev);
4983
4984        skd_destruct(skdev);
4985
4986        pci_release_regions(pdev);
4987        pci_disable_device(pdev);
4988        pci_set_drvdata(pdev, NULL);
4989
4990        return;
4991}
4992
4993static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4994{
4995        int i;
4996        struct skd_device *skdev;
4997
4998        skdev = pci_get_drvdata(pdev);
4999        if (!skdev) {
5000                pr_err("%s: no device data for PCI\n", pci_name(pdev));
5001                return -EIO;
5002        }
5003
5004        skd_stop_device(skdev);
5005
5006        skd_release_irq(skdev);
5007
5008        for (i = 0; i < SKD_MAX_BARS; i++)
5009                if (skdev->mem_map[i])
5010                        iounmap((u32 *)skdev->mem_map[i]);
5011
5012        if (skdev->pcie_error_reporting_is_enabled)
5013                pci_disable_pcie_error_reporting(pdev);
5014
5015        pci_release_regions(pdev);
5016        pci_save_state(pdev);
5017        pci_disable_device(pdev);
5018        pci_set_power_state(pdev, pci_choose_state(pdev, state));
5019        return 0;
5020}
5021
5022static int skd_pci_resume(struct pci_dev *pdev)
5023{
5024        int i;
5025        int rc = 0;
5026        struct skd_device *skdev;
5027
5028        skdev = pci_get_drvdata(pdev);
5029        if (!skdev) {
5030                pr_err("%s: no device data for PCI\n", pci_name(pdev));
5031                return -1;
5032        }
5033
5034        pci_set_power_state(pdev, PCI_D0);
5035        pci_enable_wake(pdev, PCI_D0, 0);
5036        pci_restore_state(pdev);
5037
5038        rc = pci_enable_device(pdev);
5039        if (rc)
5040                return rc;
5041        rc = pci_request_regions(pdev, DRV_NAME);
5042        if (rc)
5043                goto err_out;
5044        rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5045        if (!rc) {
5046                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
5047
5048                        pr_err("(%s): consistent DMA mask error %d\n",
5049                               pci_name(pdev), rc);
5050                }
5051        } else {
5052                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5053                if (rc) {
5054
5055                        pr_err("(%s): DMA mask error %d\n",
5056                               pci_name(pdev), rc);
5057                        goto err_out_regions;
5058                }
5059        }
5060
5061        pci_set_master(pdev);
5062        rc = pci_enable_pcie_error_reporting(pdev);
5063        if (rc) {
5064                pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
5065                       skdev->name, rc);
5066                skdev->pcie_error_reporting_is_enabled = 0;
5067        } else
5068                skdev->pcie_error_reporting_is_enabled = 1;
5069
5070        for (i = 0; i < SKD_MAX_BARS; i++) {
5071
5072                skdev->mem_phys[i] = pci_resource_start(pdev, i);
5073                skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
5074                skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
5075                                            skdev->mem_size[i]);
5076                if (!skdev->mem_map[i]) {
5077                        pr_err("(%s): Unable to map adapter memory!\n",
5078                               skd_name(skdev));
5079                        rc = -ENODEV;
5080                        goto err_out_iounmap;
5081                }
5082                pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
5083                         skdev->name, __func__, __LINE__,
5084                         skdev->mem_map[i],
5085                         (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
5086        }
5087        rc = skd_acquire_irq(skdev);
5088        if (rc) {
5089
5090                pr_err("(%s): interrupt resource error %d\n",
5091                       pci_name(pdev), rc);
5092                goto err_out_iounmap;
5093        }
5094
5095        rc = skd_start_timer(skdev);
5096        if (rc)
5097                goto err_out_timer;
5098
5099        init_waitqueue_head(&skdev->waitq);
5100
5101        skd_start_device(skdev);
5102
5103        return rc;
5104
5105err_out_timer:
5106        skd_stop_device(skdev);
5107        skd_release_irq(skdev);
5108
5109err_out_iounmap:
5110        for (i = 0; i < SKD_MAX_BARS; i++)
5111                if (skdev->mem_map[i])
5112                        iounmap(skdev->mem_map[i]);
5113
5114        if (skdev->pcie_error_reporting_is_enabled)
5115                pci_disable_pcie_error_reporting(pdev);
5116
5117err_out_regions:
5118        pci_release_regions(pdev);
5119
5120err_out:
5121        pci_disable_device(pdev);
5122        return rc;
5123}
5124
5125static void skd_pci_shutdown(struct pci_dev *pdev)
5126{
5127        struct skd_device *skdev;
5128
5129        pr_err("skd_pci_shutdown called\n");
5130
5131        skdev = pci_get_drvdata(pdev);
5132        if (!skdev) {
5133                pr_err("%s: no device data for PCI\n", pci_name(pdev));
5134                return;
5135        }
5136
5137        pr_err("%s: calling stop\n", skd_name(skdev));
5138        skd_stop_device(skdev);
5139}
5140
5141static struct pci_driver skd_driver = {
5142        .name           = DRV_NAME,
5143        .id_table       = skd_pci_tbl,
5144        .probe          = skd_pci_probe,
5145        .remove         = skd_pci_remove,
5146        .suspend        = skd_pci_suspend,
5147        .resume         = skd_pci_resume,
5148        .shutdown       = skd_pci_shutdown,
5149};
5150
5151/*
5152 *****************************************************************************
5153 * LOGGING SUPPORT
5154 *****************************************************************************
5155 */
5156
5157static const char *skd_name(struct skd_device *skdev)
5158{
5159        memset(skdev->id_str, 0, sizeof(skdev->id_str));
5160
5161        if (skdev->inquiry_is_valid)
5162                snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
5163                         skdev->name, skdev->inq_serial_num,
5164                         pci_name(skdev->pdev));
5165        else
5166                snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
5167                         skdev->name, pci_name(skdev->pdev));
5168
5169        return skdev->id_str;
5170}
5171
5172const char *skd_drive_state_to_str(int state)
5173{
5174        switch (state) {
5175        case FIT_SR_DRIVE_OFFLINE:
5176                return "OFFLINE";
5177        case FIT_SR_DRIVE_INIT:
5178                return "INIT";
5179        case FIT_SR_DRIVE_ONLINE:
5180                return "ONLINE";
5181        case FIT_SR_DRIVE_BUSY:
5182                return "BUSY";
5183        case FIT_SR_DRIVE_FAULT:
5184                return "FAULT";
5185        case FIT_SR_DRIVE_DEGRADED:
5186                return "DEGRADED";
5187        case FIT_SR_PCIE_LINK_DOWN:
5188                return "INK_DOWN";
5189        case FIT_SR_DRIVE_SOFT_RESET:
5190                return "SOFT_RESET";
5191        case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
5192                return "NEED_FW";
5193        case FIT_SR_DRIVE_INIT_FAULT:
5194                return "INIT_FAULT";
5195        case FIT_SR_DRIVE_BUSY_SANITIZE:
5196                return "BUSY_SANITIZE";
5197        case FIT_SR_DRIVE_BUSY_ERASE:
5198                return "BUSY_ERASE";
5199        case FIT_SR_DRIVE_FW_BOOTING:
5200                return "FW_BOOTING";
5201        default:
5202                return "???";
5203        }
5204}
5205
5206const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5207{
5208        switch (state) {
5209        case SKD_DRVR_STATE_LOAD:
5210                return "LOAD";
5211        case SKD_DRVR_STATE_IDLE:
5212                return "IDLE";
5213        case SKD_DRVR_STATE_BUSY:
5214                return "BUSY";
5215        case SKD_DRVR_STATE_STARTING:
5216                return "STARTING";
5217        case SKD_DRVR_STATE_ONLINE:
5218                return "ONLINE";
5219        case SKD_DRVR_STATE_PAUSING:
5220                return "PAUSING";
5221        case SKD_DRVR_STATE_PAUSED:
5222                return "PAUSED";
5223        case SKD_DRVR_STATE_DRAINING_TIMEOUT:
5224                return "DRAINING_TIMEOUT";
5225        case SKD_DRVR_STATE_RESTARTING:
5226                return "RESTARTING";
5227        case SKD_DRVR_STATE_RESUMING:
5228                return "RESUMING";
5229        case SKD_DRVR_STATE_STOPPING:
5230                return "STOPPING";
5231        case SKD_DRVR_STATE_SYNCING:
5232                return "SYNCING";
5233        case SKD_DRVR_STATE_FAULT:
5234                return "FAULT";
5235        case SKD_DRVR_STATE_DISAPPEARED:
5236                return "DISAPPEARED";
5237        case SKD_DRVR_STATE_BUSY_ERASE:
5238                return "BUSY_ERASE";
5239        case SKD_DRVR_STATE_BUSY_SANITIZE:
5240                return "BUSY_SANITIZE";
5241        case SKD_DRVR_STATE_BUSY_IMMINENT:
5242                return "BUSY_IMMINENT";
5243        case SKD_DRVR_STATE_WAIT_BOOT:
5244                return "WAIT_BOOT";
5245
5246        default:
5247                return "???";
5248        }
5249}
5250
5251static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5252{
5253        switch (state) {
5254        case SKD_MSG_STATE_IDLE:
5255                return "IDLE";
5256        case SKD_MSG_STATE_BUSY:
5257                return "BUSY";
5258        default:
5259                return "???";
5260        }
5261}
5262
5263static const char *skd_skreq_state_to_str(enum skd_req_state state)
5264{
5265        switch (state) {
5266        case SKD_REQ_STATE_IDLE:
5267                return "IDLE";
5268        case SKD_REQ_STATE_SETUP:
5269                return "SETUP";
5270        case SKD_REQ_STATE_BUSY:
5271                return "BUSY";
5272        case SKD_REQ_STATE_COMPLETED:
5273                return "COMPLETED";
5274        case SKD_REQ_STATE_TIMEOUT:
5275                return "TIMEOUT";
5276        case SKD_REQ_STATE_ABORTED:
5277                return "ABORTED";
5278        default:
5279                return "???";
5280        }
5281}
5282
5283static void skd_log_skdev(struct skd_device *skdev, const char *event)
5284{
5285        pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
5286                 skdev->name, __func__, __LINE__, skdev->name, skdev, event);
5287        pr_debug("%s:%s:%d   drive_state=%s(%d) driver_state=%s(%d)\n",
5288                 skdev->name, __func__, __LINE__,
5289                 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
5290                 skd_skdev_state_to_str(skdev->state), skdev->state);
5291        pr_debug("%s:%s:%d   busy=%d limit=%d dev=%d lowat=%d\n",
5292                 skdev->name, __func__, __LINE__,
5293                 skdev->in_flight, skdev->cur_max_queue_depth,
5294                 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
5295        pr_debug("%s:%s:%d   timestamp=0x%x cycle=%d cycle_ix=%d\n",
5296                 skdev->name, __func__, __LINE__,
5297                 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
5298}
5299
5300static void skd_log_skmsg(struct skd_device *skdev,
5301                          struct skd_fitmsg_context *skmsg, const char *event)
5302{
5303        pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
5304                 skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
5305        pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x length=%d\n",
5306                 skdev->name, __func__, __LINE__,
5307                 skd_skmsg_state_to_str(skmsg->state), skmsg->state,
5308                 skmsg->id, skmsg->length);
5309}
5310
5311static void skd_log_skreq(struct skd_device *skdev,
5312                          struct skd_request_context *skreq, const char *event)
5313{
5314        pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
5315                 skdev->name, __func__, __LINE__, skdev->name, skreq, event);
5316        pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
5317                 skdev->name, __func__, __LINE__,
5318                 skd_skreq_state_to_str(skreq->state), skreq->state,
5319                 skreq->id, skreq->fitmsg_id);
5320        pr_debug("%s:%s:%d   timo=0x%x sg_dir=%d n_sg=%d\n",
5321                 skdev->name, __func__, __LINE__,
5322                 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
5323
5324        if (skreq->req != NULL) {
5325                struct request *req = skreq->req;
5326                u32 lba = (u32)blk_rq_pos(req);
5327                u32 count = blk_rq_sectors(req);
5328
5329                pr_debug("%s:%s:%d "
5330                         "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5331                         skdev->name, __func__, __LINE__,
5332                         req, lba, lba, count, count,
5333                         (int)rq_data_dir(req));
5334        } else
5335                pr_debug("%s:%s:%d req=NULL\n",
5336                         skdev->name, __func__, __LINE__);
5337}
5338
5339/*
5340 *****************************************************************************
5341 * MODULE GLUE
5342 *****************************************************************************
5343 */
5344
5345static int __init skd_init(void)
5346{
5347        pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5348
5349        switch (skd_isr_type) {
5350        case SKD_IRQ_LEGACY:
5351        case SKD_IRQ_MSI:
5352        case SKD_IRQ_MSIX:
5353                break;
5354        default:
5355                pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
5356                       skd_isr_type, SKD_IRQ_DEFAULT);
5357                skd_isr_type = SKD_IRQ_DEFAULT;
5358        }
5359
5360        if (skd_max_queue_depth < 1 ||
5361            skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5362                pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
5363                       skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5364                skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5365        }
5366
5367        if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
5368                pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
5369                       skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5370                skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5371        }
5372
5373        if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
5374                pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
5375                       skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5376                skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5377        }
5378
5379        if (skd_dbg_level < 0 || skd_dbg_level > 2) {
5380                pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
5381                       skd_dbg_level, 0);
5382                skd_dbg_level = 0;
5383        }
5384
5385        if (skd_isr_comp_limit < 0) {
5386                pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
5387                       skd_isr_comp_limit, 0);
5388                skd_isr_comp_limit = 0;
5389        }
5390
5391        if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5392                pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
5393                       skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5394                skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5395        }
5396
5397        return pci_register_driver(&skd_driver);
5398}
5399
5400static void __exit skd_exit(void)
5401{
5402        pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5403
5404        pci_unregister_driver(&skd_driver);
5405
5406        if (skd_major)
5407                unregister_blkdev(skd_major, DRV_NAME);
5408}
5409
5410module_init(skd_init);
5411module_exit(skd_exit);
5412