linux/drivers/block/skd_main.c
<<
>>
Prefs
   1/* Copyright 2012 STEC, Inc.
   2 *
   3 * This file is licensed under the terms of the 3-clause
   4 * BSD License (http://opensource.org/licenses/BSD-3-Clause)
   5 * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
   6 * at your option. Both licenses are also available in the LICENSE file
   7 * distributed with this project. This file may not be copied, modified,
   8 * or distributed except in accordance with those terms.
   9 * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
  10 * Initial Driver Design!
  11 * Thomas Swann <tswann@stec-inc.com>
  12 * Interrupt handling.
  13 * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
  14 * biomode implementation.
  15 * Akhil Bhansali <abhansali@stec-inc.com>
  16 * Added support for DISCARD / FLUSH and FUA.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/init.h>
  22#include <linux/pci.h>
  23#include <linux/slab.h>
  24#include <linux/spinlock.h>
  25#include <linux/blkdev.h>
  26#include <linux/sched.h>
  27#include <linux/interrupt.h>
  28#include <linux/compiler.h>
  29#include <linux/workqueue.h>
  30#include <linux/bitops.h>
  31#include <linux/delay.h>
  32#include <linux/time.h>
  33#include <linux/hdreg.h>
  34#include <linux/dma-mapping.h>
  35#include <linux/completion.h>
  36#include <linux/scatterlist.h>
  37#include <linux/version.h>
  38#include <linux/err.h>
  39#include <linux/scatterlist.h>
  40#include <linux/aer.h>
  41#include <linux/ctype.h>
  42#include <linux/wait.h>
  43#include <linux/uio.h>
  44#include <scsi/scsi.h>
  45#include <scsi/sg.h>
  46#include <linux/io.h>
  47#include <linux/uaccess.h>
  48#include <asm/unaligned.h>
  49
  50#include "skd_s1120.h"
  51
  52static int skd_dbg_level;
  53static int skd_isr_comp_limit = 4;
  54
  55enum {
  56        STEC_LINK_2_5GTS = 0,
  57        STEC_LINK_5GTS = 1,
  58        STEC_LINK_8GTS = 2,
  59        STEC_LINK_UNKNOWN = 0xFF
  60};
  61
  62enum {
  63        SKD_FLUSH_INITIALIZER,
  64        SKD_FLUSH_ZERO_SIZE_FIRST,
  65        SKD_FLUSH_DATA_SECOND,
  66};
  67
  68#define SKD_ASSERT(expr) \
  69        do { \
  70                if (unlikely(!(expr))) { \
  71                        pr_err("Assertion failed! %s,%s,%s,line=%d\n",  \
  72                               # expr, __FILE__, __func__, __LINE__); \
  73                } \
  74        } while (0)
  75
  76#define DRV_NAME "skd"
  77#define DRV_VERSION "2.2.1"
  78#define DRV_BUILD_ID "0260"
  79#define PFX DRV_NAME ": "
  80#define DRV_BIN_VERSION 0x100
  81#define DRV_VER_COMPL   "2.2.1." DRV_BUILD_ID
  82
  83MODULE_AUTHOR("bug-reports: support@stec-inc.com");
  84MODULE_LICENSE("Dual BSD/GPL");
  85
  86MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
  87MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
  88
  89#define PCI_VENDOR_ID_STEC      0x1B39
  90#define PCI_DEVICE_ID_S1120     0x0001
  91
  92#define SKD_FUA_NV              (1 << 1)
  93#define SKD_MINORS_PER_DEVICE   16
  94
  95#define SKD_MAX_QUEUE_DEPTH     200u
  96
  97#define SKD_PAUSE_TIMEOUT       (5 * 1000)
  98
  99#define SKD_N_FITMSG_BYTES      (512u)
 100
 101#define SKD_N_SPECIAL_CONTEXT   32u
 102#define SKD_N_SPECIAL_FITMSG_BYTES      (128u)
 103
 104/* SG elements are 32 bytes, so we can make this 4096 and still be under the
 105 * 128KB limit.  That allows 4096*4K = 16M xfer size
 106 */
 107#define SKD_N_SG_PER_REQ_DEFAULT 256u
 108#define SKD_N_SG_PER_SPECIAL    256u
 109
 110#define SKD_N_COMPLETION_ENTRY  256u
 111#define SKD_N_READ_CAP_BYTES    (8u)
 112
 113#define SKD_N_INTERNAL_BYTES    (512u)
 114
 115/* 5 bits of uniqifier, 0xF800 */
 116#define SKD_ID_INCR             (0x400)
 117#define SKD_ID_TABLE_MASK       (3u << 8u)
 118#define  SKD_ID_RW_REQUEST      (0u << 8u)
 119#define  SKD_ID_INTERNAL        (1u << 8u)
 120#define  SKD_ID_SPECIAL_REQUEST (2u << 8u)
 121#define  SKD_ID_FIT_MSG         (3u << 8u)
 122#define SKD_ID_SLOT_MASK        0x00FFu
 123#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
 124
 125#define SKD_N_TIMEOUT_SLOT      4u
 126#define SKD_TIMEOUT_SLOT_MASK   3u
 127
 128#define SKD_N_MAX_SECTORS 2048u
 129
 130#define SKD_MAX_RETRIES 2u
 131
 132#define SKD_TIMER_SECONDS(seconds) (seconds)
 133#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
 134
 135#define INQ_STD_NBYTES 36
 136#define SKD_DISCARD_CDB_LENGTH  24
 137
 138enum skd_drvr_state {
 139        SKD_DRVR_STATE_LOAD,
 140        SKD_DRVR_STATE_IDLE,
 141        SKD_DRVR_STATE_BUSY,
 142        SKD_DRVR_STATE_STARTING,
 143        SKD_DRVR_STATE_ONLINE,
 144        SKD_DRVR_STATE_PAUSING,
 145        SKD_DRVR_STATE_PAUSED,
 146        SKD_DRVR_STATE_DRAINING_TIMEOUT,
 147        SKD_DRVR_STATE_RESTARTING,
 148        SKD_DRVR_STATE_RESUMING,
 149        SKD_DRVR_STATE_STOPPING,
 150        SKD_DRVR_STATE_FAULT,
 151        SKD_DRVR_STATE_DISAPPEARED,
 152        SKD_DRVR_STATE_PROTOCOL_MISMATCH,
 153        SKD_DRVR_STATE_BUSY_ERASE,
 154        SKD_DRVR_STATE_BUSY_SANITIZE,
 155        SKD_DRVR_STATE_BUSY_IMMINENT,
 156        SKD_DRVR_STATE_WAIT_BOOT,
 157        SKD_DRVR_STATE_SYNCING,
 158};
 159
 160#define SKD_WAIT_BOOT_TIMO      SKD_TIMER_SECONDS(90u)
 161#define SKD_STARTING_TIMO       SKD_TIMER_SECONDS(8u)
 162#define SKD_RESTARTING_TIMO     SKD_TIMER_MINUTES(4u)
 163#define SKD_DRAINING_TIMO       SKD_TIMER_SECONDS(6u)
 164#define SKD_BUSY_TIMO           SKD_TIMER_MINUTES(20u)
 165#define SKD_STARTED_BUSY_TIMO   SKD_TIMER_SECONDS(60u)
 166#define SKD_START_WAIT_SECONDS  90u
 167
 168enum skd_req_state {
 169        SKD_REQ_STATE_IDLE,
 170        SKD_REQ_STATE_SETUP,
 171        SKD_REQ_STATE_BUSY,
 172        SKD_REQ_STATE_COMPLETED,
 173        SKD_REQ_STATE_TIMEOUT,
 174        SKD_REQ_STATE_ABORTED,
 175};
 176
 177enum skd_fit_msg_state {
 178        SKD_MSG_STATE_IDLE,
 179        SKD_MSG_STATE_BUSY,
 180};
 181
 182enum skd_check_status_action {
 183        SKD_CHECK_STATUS_REPORT_GOOD,
 184        SKD_CHECK_STATUS_REPORT_SMART_ALERT,
 185        SKD_CHECK_STATUS_REQUEUE_REQUEST,
 186        SKD_CHECK_STATUS_REPORT_ERROR,
 187        SKD_CHECK_STATUS_BUSY_IMMINENT,
 188};
 189
 190struct skd_fitmsg_context {
 191        enum skd_fit_msg_state state;
 192
 193        struct skd_fitmsg_context *next;
 194
 195        u32 id;
 196        u16 outstanding;
 197
 198        u32 length;
 199        u32 offset;
 200
 201        u8 *msg_buf;
 202        dma_addr_t mb_dma_address;
 203};
 204
 205struct skd_request_context {
 206        enum skd_req_state state;
 207
 208        struct skd_request_context *next;
 209
 210        u16 id;
 211        u32 fitmsg_id;
 212
 213        struct request *req;
 214        u8 flush_cmd;
 215        u8 discard_page;
 216
 217        u32 timeout_stamp;
 218        u8 sg_data_dir;
 219        struct scatterlist *sg;
 220        u32 n_sg;
 221        u32 sg_byte_count;
 222
 223        struct fit_sg_descriptor *sksg_list;
 224        dma_addr_t sksg_dma_address;
 225
 226        struct fit_completion_entry_v1 completion;
 227
 228        struct fit_comp_error_info err_info;
 229
 230};
 231#define SKD_DATA_DIR_HOST_TO_CARD       1
 232#define SKD_DATA_DIR_CARD_TO_HOST       2
 233#define SKD_DATA_DIR_NONE               3       /* especially for DISCARD requests. */
 234
 235struct skd_special_context {
 236        struct skd_request_context req;
 237
 238        u8 orphaned;
 239
 240        void *data_buf;
 241        dma_addr_t db_dma_address;
 242
 243        u8 *msg_buf;
 244        dma_addr_t mb_dma_address;
 245};
 246
 247struct skd_sg_io {
 248        fmode_t mode;
 249        void __user *argp;
 250
 251        struct sg_io_hdr sg;
 252
 253        u8 cdb[16];
 254
 255        u32 dxfer_len;
 256        u32 iovcnt;
 257        struct sg_iovec *iov;
 258        struct sg_iovec no_iov_iov;
 259
 260        struct skd_special_context *skspcl;
 261};
 262
 263typedef enum skd_irq_type {
 264        SKD_IRQ_LEGACY,
 265        SKD_IRQ_MSI,
 266        SKD_IRQ_MSIX
 267} skd_irq_type_t;
 268
 269#define SKD_MAX_BARS                    2
 270
 271struct skd_device {
 272        volatile void __iomem *mem_map[SKD_MAX_BARS];
 273        resource_size_t mem_phys[SKD_MAX_BARS];
 274        u32 mem_size[SKD_MAX_BARS];
 275
 276        skd_irq_type_t irq_type;
 277        u32 msix_count;
 278        struct skd_msix_entry *msix_entries;
 279
 280        struct pci_dev *pdev;
 281        int pcie_error_reporting_is_enabled;
 282
 283        spinlock_t lock;
 284        struct gendisk *disk;
 285        struct request_queue *queue;
 286        struct device *class_dev;
 287        int gendisk_on;
 288        int sync_done;
 289
 290        atomic_t device_count;
 291        u32 devno;
 292        u32 major;
 293        char name[32];
 294        char isr_name[30];
 295
 296        enum skd_drvr_state state;
 297        u32 drive_state;
 298
 299        u32 in_flight;
 300        u32 cur_max_queue_depth;
 301        u32 queue_low_water_mark;
 302        u32 dev_max_queue_depth;
 303
 304        u32 num_fitmsg_context;
 305        u32 num_req_context;
 306
 307        u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
 308        u32 timeout_stamp;
 309        struct skd_fitmsg_context *skmsg_free_list;
 310        struct skd_fitmsg_context *skmsg_table;
 311
 312        struct skd_request_context *skreq_free_list;
 313        struct skd_request_context *skreq_table;
 314
 315        struct skd_special_context *skspcl_free_list;
 316        struct skd_special_context *skspcl_table;
 317
 318        struct skd_special_context internal_skspcl;
 319        u32 read_cap_blocksize;
 320        u32 read_cap_last_lba;
 321        int read_cap_is_valid;
 322        int inquiry_is_valid;
 323        u8 inq_serial_num[13];  /*12 chars plus null term */
 324        u8 id_str[80];          /* holds a composite name (pci + sernum) */
 325
 326        u8 skcomp_cycle;
 327        u32 skcomp_ix;
 328        struct fit_completion_entry_v1 *skcomp_table;
 329        struct fit_comp_error_info *skerr_table;
 330        dma_addr_t cq_dma_address;
 331
 332        wait_queue_head_t waitq;
 333
 334        struct timer_list timer;
 335        u32 timer_countdown;
 336        u32 timer_substate;
 337
 338        int n_special;
 339        int sgs_per_request;
 340        u32 last_mtd;
 341
 342        u32 proto_ver;
 343
 344        int dbg_level;
 345        u32 connect_time_stamp;
 346        int connect_retries;
 347#define SKD_MAX_CONNECT_RETRIES 16
 348        u32 drive_jiffies;
 349
 350        u32 timo_slot;
 351
 352
 353        struct work_struct completion_worker;
 354};
 355
 356#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
 357#define SKD_READL(DEV, OFF)      skd_reg_read32(DEV, OFF)
 358#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
 359
 360static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
 361{
 362        u32 val;
 363
 364        if (likely(skdev->dbg_level < 2))
 365                return readl(skdev->mem_map[1] + offset);
 366        else {
 367                barrier();
 368                val = readl(skdev->mem_map[1] + offset);
 369                barrier();
 370                pr_debug("%s:%s:%d offset %x = %x\n",
 371                         skdev->name, __func__, __LINE__, offset, val);
 372                return val;
 373        }
 374
 375}
 376
 377static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
 378                                   u32 offset)
 379{
 380        if (likely(skdev->dbg_level < 2)) {
 381                writel(val, skdev->mem_map[1] + offset);
 382                barrier();
 383        } else {
 384                barrier();
 385                writel(val, skdev->mem_map[1] + offset);
 386                barrier();
 387                pr_debug("%s:%s:%d offset %x = %x\n",
 388                         skdev->name, __func__, __LINE__, offset, val);
 389        }
 390}
 391
 392static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
 393                                   u32 offset)
 394{
 395        if (likely(skdev->dbg_level < 2)) {
 396                writeq(val, skdev->mem_map[1] + offset);
 397                barrier();
 398        } else {
 399                barrier();
 400                writeq(val, skdev->mem_map[1] + offset);
 401                barrier();
 402                pr_debug("%s:%s:%d offset %x = %016llx\n",
 403                         skdev->name, __func__, __LINE__, offset, val);
 404        }
 405}
 406
 407
 408#define SKD_IRQ_DEFAULT SKD_IRQ_MSI
 409static int skd_isr_type = SKD_IRQ_DEFAULT;
 410
 411module_param(skd_isr_type, int, 0444);
 412MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
 413                 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
 414
 415#define SKD_MAX_REQ_PER_MSG_DEFAULT 1
 416static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
 417
 418module_param(skd_max_req_per_msg, int, 0444);
 419MODULE_PARM_DESC(skd_max_req_per_msg,
 420                 "Maximum SCSI requests packed in a single message."
 421                 " (1-14, default==1)");
 422
 423#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
 424#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
 425static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
 426
 427module_param(skd_max_queue_depth, int, 0444);
 428MODULE_PARM_DESC(skd_max_queue_depth,
 429                 "Maximum SCSI requests issued to s1120."
 430                 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
 431
 432static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
 433module_param(skd_sgs_per_request, int, 0444);
 434MODULE_PARM_DESC(skd_sgs_per_request,
 435                 "Maximum SG elements per block request."
 436                 " (1-4096, default==256)");
 437
 438static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
 439module_param(skd_max_pass_thru, int, 0444);
 440MODULE_PARM_DESC(skd_max_pass_thru,
 441                 "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
 442
 443module_param(skd_dbg_level, int, 0444);
 444MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
 445
 446module_param(skd_isr_comp_limit, int, 0444);
 447MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
 448
 449/* Major device number dynamically assigned. */
 450static u32 skd_major;
 451
 452static void skd_destruct(struct skd_device *skdev);
 453static const struct block_device_operations skd_blockdev_ops;
 454static void skd_send_fitmsg(struct skd_device *skdev,
 455                            struct skd_fitmsg_context *skmsg);
 456static void skd_send_special_fitmsg(struct skd_device *skdev,
 457                                    struct skd_special_context *skspcl);
 458static void skd_request_fn(struct request_queue *rq);
 459static void skd_end_request(struct skd_device *skdev,
 460                            struct skd_request_context *skreq, int error);
 461static int skd_preop_sg_list(struct skd_device *skdev,
 462                             struct skd_request_context *skreq);
 463static void skd_postop_sg_list(struct skd_device *skdev,
 464                               struct skd_request_context *skreq);
 465
 466static void skd_restart_device(struct skd_device *skdev);
 467static int skd_quiesce_dev(struct skd_device *skdev);
 468static int skd_unquiesce_dev(struct skd_device *skdev);
 469static void skd_release_special(struct skd_device *skdev,
 470                                struct skd_special_context *skspcl);
 471static void skd_disable_interrupts(struct skd_device *skdev);
 472static void skd_isr_fwstate(struct skd_device *skdev);
 473static void skd_recover_requests(struct skd_device *skdev, int requeue);
 474static void skd_soft_reset(struct skd_device *skdev);
 475
 476static const char *skd_name(struct skd_device *skdev);
 477const char *skd_drive_state_to_str(int state);
 478const char *skd_skdev_state_to_str(enum skd_drvr_state state);
 479static void skd_log_skdev(struct skd_device *skdev, const char *event);
 480static void skd_log_skmsg(struct skd_device *skdev,
 481                          struct skd_fitmsg_context *skmsg, const char *event);
 482static void skd_log_skreq(struct skd_device *skdev,
 483                          struct skd_request_context *skreq, const char *event);
 484
 485/*
 486 *****************************************************************************
 487 * READ/WRITE REQUESTS
 488 *****************************************************************************
 489 */
 490static void skd_fail_all_pending(struct skd_device *skdev)
 491{
 492        struct request_queue *q = skdev->queue;
 493        struct request *req;
 494
 495        for (;; ) {
 496                req = blk_peek_request(q);
 497                if (req == NULL)
 498                        break;
 499                blk_start_request(req);
 500                __blk_end_request_all(req, -EIO);
 501        }
 502}
 503
 504static void
 505skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
 506                int data_dir, unsigned lba,
 507                unsigned count)
 508{
 509        if (data_dir == READ)
 510                scsi_req->cdb[0] = 0x28;
 511        else
 512                scsi_req->cdb[0] = 0x2a;
 513
 514        scsi_req->cdb[1] = 0;
 515        scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
 516        scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
 517        scsi_req->cdb[4] = (lba & 0xff00) >> 8;
 518        scsi_req->cdb[5] = (lba & 0xff);
 519        scsi_req->cdb[6] = 0;
 520        scsi_req->cdb[7] = (count & 0xff00) >> 8;
 521        scsi_req->cdb[8] = count & 0xff;
 522        scsi_req->cdb[9] = 0;
 523}
 524
 525static void
 526skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
 527                            struct skd_request_context *skreq)
 528{
 529        skreq->flush_cmd = 1;
 530
 531        scsi_req->cdb[0] = 0x35;
 532        scsi_req->cdb[1] = 0;
 533        scsi_req->cdb[2] = 0;
 534        scsi_req->cdb[3] = 0;
 535        scsi_req->cdb[4] = 0;
 536        scsi_req->cdb[5] = 0;
 537        scsi_req->cdb[6] = 0;
 538        scsi_req->cdb[7] = 0;
 539        scsi_req->cdb[8] = 0;
 540        scsi_req->cdb[9] = 0;
 541}
 542
 543static void
 544skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
 545                     struct skd_request_context *skreq,
 546                     struct page *page,
 547                     u32 lba, u32 count)
 548{
 549        char *buf;
 550        unsigned long len;
 551        struct request *req;
 552
 553        buf = page_address(page);
 554        len = SKD_DISCARD_CDB_LENGTH;
 555
 556        scsi_req->cdb[0] = UNMAP;
 557        scsi_req->cdb[8] = len;
 558
 559        put_unaligned_be16(6 + 16, &buf[0]);
 560        put_unaligned_be16(16, &buf[2]);
 561        put_unaligned_be64(lba, &buf[8]);
 562        put_unaligned_be32(count, &buf[16]);
 563
 564        req = skreq->req;
 565        blk_add_request_payload(req, page, len);
 566}
 567
 568static void skd_request_fn_not_online(struct request_queue *q);
 569
 570static void skd_request_fn(struct request_queue *q)
 571{
 572        struct skd_device *skdev = q->queuedata;
 573        struct skd_fitmsg_context *skmsg = NULL;
 574        struct fit_msg_hdr *fmh = NULL;
 575        struct skd_request_context *skreq;
 576        struct request *req = NULL;
 577        struct skd_scsi_request *scsi_req;
 578        struct page *page;
 579        unsigned long io_flags;
 580        int error;
 581        u32 lba;
 582        u32 count;
 583        int data_dir;
 584        u32 be_lba;
 585        u32 be_count;
 586        u64 be_dmaa;
 587        u64 cmdctxt;
 588        u32 timo_slot;
 589        void *cmd_ptr;
 590        int flush, fua;
 591
 592        if (skdev->state != SKD_DRVR_STATE_ONLINE) {
 593                skd_request_fn_not_online(q);
 594                return;
 595        }
 596
 597        if (blk_queue_stopped(skdev->queue)) {
 598                if (skdev->skmsg_free_list == NULL ||
 599                    skdev->skreq_free_list == NULL ||
 600                    skdev->in_flight >= skdev->queue_low_water_mark)
 601                        /* There is still some kind of shortage */
 602                        return;
 603
 604                queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
 605        }
 606
 607        /*
 608         * Stop conditions:
 609         *  - There are no more native requests
 610         *  - There are already the maximum number of requests in progress
 611         *  - There are no more skd_request_context entries
 612         *  - There are no more FIT msg buffers
 613         */
 614        for (;; ) {
 615
 616                flush = fua = 0;
 617
 618                req = blk_peek_request(q);
 619
 620                /* Are there any native requests to start? */
 621                if (req == NULL)
 622                        break;
 623
 624                lba = (u32)blk_rq_pos(req);
 625                count = blk_rq_sectors(req);
 626                data_dir = rq_data_dir(req);
 627                io_flags = req->cmd_flags;
 628
 629                if (io_flags & REQ_FLUSH)
 630                        flush++;
 631
 632                if (io_flags & REQ_FUA)
 633                        fua++;
 634
 635                pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
 636                         "count=%u(0x%x) dir=%d\n",
 637                         skdev->name, __func__, __LINE__,
 638                         req, lba, lba, count, count, data_dir);
 639
 640                /* At this point we know there is a request */
 641
 642                /* Are too many requets already in progress? */
 643                if (skdev->in_flight >= skdev->cur_max_queue_depth) {
 644                        pr_debug("%s:%s:%d qdepth %d, limit %d\n",
 645                                 skdev->name, __func__, __LINE__,
 646                                 skdev->in_flight, skdev->cur_max_queue_depth);
 647                        break;
 648                }
 649
 650                /* Is a skd_request_context available? */
 651                skreq = skdev->skreq_free_list;
 652                if (skreq == NULL) {
 653                        pr_debug("%s:%s:%d Out of req=%p\n",
 654                                 skdev->name, __func__, __LINE__, q);
 655                        break;
 656                }
 657                SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
 658                SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
 659
 660                /* Now we check to see if we can get a fit msg */
 661                if (skmsg == NULL) {
 662                        if (skdev->skmsg_free_list == NULL) {
 663                                pr_debug("%s:%s:%d Out of msg\n",
 664                                         skdev->name, __func__, __LINE__);
 665                                break;
 666                        }
 667                }
 668
 669                skreq->flush_cmd = 0;
 670                skreq->n_sg = 0;
 671                skreq->sg_byte_count = 0;
 672                skreq->discard_page = 0;
 673
 674                /*
 675                 * OK to now dequeue request from q.
 676                 *
 677                 * At this point we are comitted to either start or reject
 678                 * the native request. Note that skd_request_context is
 679                 * available but is still at the head of the free list.
 680                 */
 681                blk_start_request(req);
 682                skreq->req = req;
 683                skreq->fitmsg_id = 0;
 684
 685                /* Either a FIT msg is in progress or we have to start one. */
 686                if (skmsg == NULL) {
 687                        /* Are there any FIT msg buffers available? */
 688                        skmsg = skdev->skmsg_free_list;
 689                        if (skmsg == NULL) {
 690                                pr_debug("%s:%s:%d Out of msg skdev=%p\n",
 691                                         skdev->name, __func__, __LINE__,
 692                                         skdev);
 693                                break;
 694                        }
 695                        SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
 696                        SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
 697
 698                        skdev->skmsg_free_list = skmsg->next;
 699
 700                        skmsg->state = SKD_MSG_STATE_BUSY;
 701                        skmsg->id += SKD_ID_INCR;
 702
 703                        /* Initialize the FIT msg header */
 704                        fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
 705                        memset(fmh, 0, sizeof(*fmh));
 706                        fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
 707                        skmsg->length = sizeof(*fmh);
 708                }
 709
 710                skreq->fitmsg_id = skmsg->id;
 711
 712                /*
 713                 * Note that a FIT msg may have just been started
 714                 * but contains no SoFIT requests yet.
 715                 */
 716
 717                /*
 718                 * Transcode the request, checking as we go. The outcome of
 719                 * the transcoding is represented by the error variable.
 720                 */
 721                cmd_ptr = &skmsg->msg_buf[skmsg->length];
 722                memset(cmd_ptr, 0, 32);
 723
 724                be_lba = cpu_to_be32(lba);
 725                be_count = cpu_to_be32(count);
 726                be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
 727                cmdctxt = skreq->id + SKD_ID_INCR;
 728
 729                scsi_req = cmd_ptr;
 730                scsi_req->hdr.tag = cmdctxt;
 731                scsi_req->hdr.sg_list_dma_address = be_dmaa;
 732
 733                if (data_dir == READ)
 734                        skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
 735                else
 736                        skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
 737
 738                if (io_flags & REQ_DISCARD) {
 739                        page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
 740                        if (!page) {
 741                                pr_err("request_fn:Page allocation failed.\n");
 742                                skd_end_request(skdev, skreq, -ENOMEM);
 743                                break;
 744                        }
 745                        skreq->discard_page = 1;
 746                        req->completion_data = page;
 747                        skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
 748
 749                } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
 750                        skd_prep_zerosize_flush_cdb(scsi_req, skreq);
 751                        SKD_ASSERT(skreq->flush_cmd == 1);
 752
 753                } else {
 754                        skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
 755                }
 756
 757                if (fua)
 758                        scsi_req->cdb[1] |= SKD_FUA_NV;
 759
 760                if (!req->bio)
 761                        goto skip_sg;
 762
 763                error = skd_preop_sg_list(skdev, skreq);
 764
 765                if (error != 0) {
 766                        /*
 767                         * Complete the native request with error.
 768                         * Note that the request context is still at the
 769                         * head of the free list, and that the SoFIT request
 770                         * was encoded into the FIT msg buffer but the FIT
 771                         * msg length has not been updated. In short, the
 772                         * only resource that has been allocated but might
 773                         * not be used is that the FIT msg could be empty.
 774                         */
 775                        pr_debug("%s:%s:%d error Out\n",
 776                                 skdev->name, __func__, __LINE__);
 777                        skd_end_request(skdev, skreq, error);
 778                        continue;
 779                }
 780
 781skip_sg:
 782                scsi_req->hdr.sg_list_len_bytes =
 783                        cpu_to_be32(skreq->sg_byte_count);
 784
 785                /* Complete resource allocations. */
 786                skdev->skreq_free_list = skreq->next;
 787                skreq->state = SKD_REQ_STATE_BUSY;
 788                skreq->id += SKD_ID_INCR;
 789
 790                skmsg->length += sizeof(struct skd_scsi_request);
 791                fmh->num_protocol_cmds_coalesced++;
 792
 793                /*
 794                 * Update the active request counts.
 795                 * Capture the timeout timestamp.
 796                 */
 797                skreq->timeout_stamp = skdev->timeout_stamp;
 798                timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
 799                skdev->timeout_slot[timo_slot]++;
 800                skdev->in_flight++;
 801                pr_debug("%s:%s:%d req=0x%x busy=%d\n",
 802                         skdev->name, __func__, __LINE__,
 803                         skreq->id, skdev->in_flight);
 804
 805                /*
 806                 * If the FIT msg buffer is full send it.
 807                 */
 808                if (skmsg->length >= SKD_N_FITMSG_BYTES ||
 809                    fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
 810                        skd_send_fitmsg(skdev, skmsg);
 811                        skmsg = NULL;
 812                        fmh = NULL;
 813                }
 814        }
 815
 816        /*
 817         * Is a FIT msg in progress? If it is empty put the buffer back
 818         * on the free list. If it is non-empty send what we got.
 819         * This minimizes latency when there are fewer requests than
 820         * what fits in a FIT msg.
 821         */
 822        if (skmsg != NULL) {
 823                /* Bigger than just a FIT msg header? */
 824                if (skmsg->length > sizeof(struct fit_msg_hdr)) {
 825                        pr_debug("%s:%s:%d sending msg=%p, len %d\n",
 826                                 skdev->name, __func__, __LINE__,
 827                                 skmsg, skmsg->length);
 828                        skd_send_fitmsg(skdev, skmsg);
 829                } else {
 830                        /*
 831                         * The FIT msg is empty. It means we got started
 832                         * on the msg, but the requests were rejected.
 833                         */
 834                        skmsg->state = SKD_MSG_STATE_IDLE;
 835                        skmsg->id += SKD_ID_INCR;
 836                        skmsg->next = skdev->skmsg_free_list;
 837                        skdev->skmsg_free_list = skmsg;
 838                }
 839                skmsg = NULL;
 840                fmh = NULL;
 841        }
 842
 843        /*
 844         * If req is non-NULL it means there is something to do but
 845         * we are out of a resource.
 846         */
 847        if (req)
 848                blk_stop_queue(skdev->queue);
 849}
 850
 851static void skd_end_request(struct skd_device *skdev,
 852                            struct skd_request_context *skreq, int error)
 853{
 854        struct request *req = skreq->req;
 855        unsigned int io_flags = req->cmd_flags;
 856
 857        if ((io_flags & REQ_DISCARD) &&
 858                (skreq->discard_page == 1)) {
 859                pr_debug("%s:%s:%d, free the page!",
 860                         skdev->name, __func__, __LINE__);
 861                __free_page(req->completion_data);
 862        }
 863
 864        if (unlikely(error)) {
 865                struct request *req = skreq->req;
 866                char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
 867                u32 lba = (u32)blk_rq_pos(req);
 868                u32 count = blk_rq_sectors(req);
 869
 870                pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
 871                       skd_name(skdev), cmd, lba, count, skreq->id);
 872        } else
 873                pr_debug("%s:%s:%d id=0x%x error=%d\n",
 874                         skdev->name, __func__, __LINE__, skreq->id, error);
 875
 876        __blk_end_request_all(skreq->req, error);
 877}
 878
 879static int skd_preop_sg_list(struct skd_device *skdev,
 880                             struct skd_request_context *skreq)
 881{
 882        struct request *req = skreq->req;
 883        int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
 884        int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
 885        struct scatterlist *sg = &skreq->sg[0];
 886        int n_sg;
 887        int i;
 888
 889        skreq->sg_byte_count = 0;
 890
 891        /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
 892                   skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
 893
 894        n_sg = blk_rq_map_sg(skdev->queue, req, sg);
 895        if (n_sg <= 0)
 896                return -EINVAL;
 897
 898        /*
 899         * Map scatterlist to PCI bus addresses.
 900         * Note PCI might change the number of entries.
 901         */
 902        n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
 903        if (n_sg <= 0)
 904                return -EINVAL;
 905
 906        SKD_ASSERT(n_sg <= skdev->sgs_per_request);
 907
 908        skreq->n_sg = n_sg;
 909
 910        for (i = 0; i < n_sg; i++) {
 911                struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
 912                u32 cnt = sg_dma_len(&sg[i]);
 913                uint64_t dma_addr = sg_dma_address(&sg[i]);
 914
 915                sgd->control = FIT_SGD_CONTROL_NOT_LAST;
 916                sgd->byte_count = cnt;
 917                skreq->sg_byte_count += cnt;
 918                sgd->host_side_addr = dma_addr;
 919                sgd->dev_side_addr = 0;
 920        }
 921
 922        skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
 923        skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
 924
 925        if (unlikely(skdev->dbg_level > 1)) {
 926                pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
 927                         skdev->name, __func__, __LINE__,
 928                         skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
 929                for (i = 0; i < n_sg; i++) {
 930                        struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
 931                        pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
 932                                 "addr=0x%llx next=0x%llx\n",
 933                                 skdev->name, __func__, __LINE__,
 934                                 i, sgd->byte_count, sgd->control,
 935                                 sgd->host_side_addr, sgd->next_desc_ptr);
 936                }
 937        }
 938
 939        return 0;
 940}
 941
 942static void skd_postop_sg_list(struct skd_device *skdev,
 943                               struct skd_request_context *skreq)
 944{
 945        int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
 946        int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
 947
 948        /*
 949         * restore the next ptr for next IO request so we
 950         * don't have to set it every time.
 951         */
 952        skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
 953                skreq->sksg_dma_address +
 954                ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
 955        pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
 956}
 957
 958static void skd_request_fn_not_online(struct request_queue *q)
 959{
 960        struct skd_device *skdev = q->queuedata;
 961        int error;
 962
 963        SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
 964
 965        skd_log_skdev(skdev, "req_not_online");
 966        switch (skdev->state) {
 967        case SKD_DRVR_STATE_PAUSING:
 968        case SKD_DRVR_STATE_PAUSED:
 969        case SKD_DRVR_STATE_STARTING:
 970        case SKD_DRVR_STATE_RESTARTING:
 971        case SKD_DRVR_STATE_WAIT_BOOT:
 972        /* In case of starting, we haven't started the queue,
 973         * so we can't get here... but requests are
 974         * possibly hanging out waiting for us because we
 975         * reported the dev/skd0 already.  They'll wait
 976         * forever if connect doesn't complete.
 977         * What to do??? delay dev/skd0 ??
 978         */
 979        case SKD_DRVR_STATE_BUSY:
 980        case SKD_DRVR_STATE_BUSY_IMMINENT:
 981        case SKD_DRVR_STATE_BUSY_ERASE:
 982        case SKD_DRVR_STATE_DRAINING_TIMEOUT:
 983                return;
 984
 985        case SKD_DRVR_STATE_BUSY_SANITIZE:
 986        case SKD_DRVR_STATE_STOPPING:
 987        case SKD_DRVR_STATE_SYNCING:
 988        case SKD_DRVR_STATE_FAULT:
 989        case SKD_DRVR_STATE_DISAPPEARED:
 990        default:
 991                error = -EIO;
 992                break;
 993        }
 994
 995        /* If we get here, terminate all pending block requeusts
 996         * with EIO and any scsi pass thru with appropriate sense
 997         */
 998
 999        skd_fail_all_pending(skdev);
1000}
1001
1002/*
1003 *****************************************************************************
1004 * TIMER
1005 *****************************************************************************
1006 */
1007
1008static void skd_timer_tick_not_online(struct skd_device *skdev);
1009
1010static void skd_timer_tick(ulong arg)
1011{
1012        struct skd_device *skdev = (struct skd_device *)arg;
1013
1014        u32 timo_slot;
1015        u32 overdue_timestamp;
1016        unsigned long reqflags;
1017        u32 state;
1018
1019        if (skdev->state == SKD_DRVR_STATE_FAULT)
1020                /* The driver has declared fault, and we want it to
1021                 * stay that way until driver is reloaded.
1022                 */
1023                return;
1024
1025        spin_lock_irqsave(&skdev->lock, reqflags);
1026
1027        state = SKD_READL(skdev, FIT_STATUS);
1028        state &= FIT_SR_DRIVE_STATE_MASK;
1029        if (state != skdev->drive_state)
1030                skd_isr_fwstate(skdev);
1031
1032        if (skdev->state != SKD_DRVR_STATE_ONLINE) {
1033                skd_timer_tick_not_online(skdev);
1034                goto timer_func_out;
1035        }
1036        skdev->timeout_stamp++;
1037        timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
1038
1039        /*
1040         * All requests that happened during the previous use of
1041         * this slot should be done by now. The previous use was
1042         * over 7 seconds ago.
1043         */
1044        if (skdev->timeout_slot[timo_slot] == 0)
1045                goto timer_func_out;
1046
1047        /* Something is overdue */
1048        overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
1049
1050        pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
1051                 skdev->name, __func__, __LINE__,
1052                 skdev->timeout_slot[timo_slot], skdev->in_flight);
1053        pr_err("(%s): Overdue IOs (%d), busy %d\n",
1054               skd_name(skdev), skdev->timeout_slot[timo_slot],
1055               skdev->in_flight);
1056
1057        skdev->timer_countdown = SKD_DRAINING_TIMO;
1058        skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
1059        skdev->timo_slot = timo_slot;
1060        blk_stop_queue(skdev->queue);
1061
1062timer_func_out:
1063        mod_timer(&skdev->timer, (jiffies + HZ));
1064
1065        spin_unlock_irqrestore(&skdev->lock, reqflags);
1066}
1067
1068static void skd_timer_tick_not_online(struct skd_device *skdev)
1069{
1070        switch (skdev->state) {
1071        case SKD_DRVR_STATE_IDLE:
1072        case SKD_DRVR_STATE_LOAD:
1073                break;
1074        case SKD_DRVR_STATE_BUSY_SANITIZE:
1075                pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
1076                         skdev->name, __func__, __LINE__,
1077                         skdev->drive_state, skdev->state);
1078                /* If we've been in sanitize for 3 seconds, we figure we're not
1079                 * going to get anymore completions, so recover requests now
1080                 */
1081                if (skdev->timer_countdown > 0) {
1082                        skdev->timer_countdown--;
1083                        return;
1084                }
1085                skd_recover_requests(skdev, 0);
1086                break;
1087
1088        case SKD_DRVR_STATE_BUSY:
1089        case SKD_DRVR_STATE_BUSY_IMMINENT:
1090        case SKD_DRVR_STATE_BUSY_ERASE:
1091                pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
1092                         skdev->name, __func__, __LINE__,
1093                         skdev->state, skdev->timer_countdown);
1094                if (skdev->timer_countdown > 0) {
1095                        skdev->timer_countdown--;
1096                        return;
1097                }
1098                pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
1099                         skdev->name, __func__, __LINE__,
1100                         skdev->state, skdev->timer_countdown);
1101                skd_restart_device(skdev);
1102                break;
1103
1104        case SKD_DRVR_STATE_WAIT_BOOT:
1105        case SKD_DRVR_STATE_STARTING:
1106                if (skdev->timer_countdown > 0) {
1107                        skdev->timer_countdown--;
1108                        return;
1109                }
1110                /* For now, we fault the drive.  Could attempt resets to
1111                 * revcover at some point. */
1112                skdev->state = SKD_DRVR_STATE_FAULT;
1113
1114                pr_err("(%s): DriveFault Connect Timeout (%x)\n",
1115                       skd_name(skdev), skdev->drive_state);
1116
1117                /*start the queue so we can respond with error to requests */
1118                /* wakeup anyone waiting for startup complete */
1119                blk_start_queue(skdev->queue);
1120                skdev->gendisk_on = -1;
1121                wake_up_interruptible(&skdev->waitq);
1122                break;
1123
1124        case SKD_DRVR_STATE_ONLINE:
1125                /* shouldn't get here. */
1126                break;
1127
1128        case SKD_DRVR_STATE_PAUSING:
1129        case SKD_DRVR_STATE_PAUSED:
1130                break;
1131
1132        case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1133                pr_debug("%s:%s:%d "
1134                         "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1135                         skdev->name, __func__, __LINE__,
1136                         skdev->timo_slot,
1137                         skdev->timer_countdown,
1138                         skdev->in_flight,
1139                         skdev->timeout_slot[skdev->timo_slot]);
1140                /* if the slot has cleared we can let the I/O continue */
1141                if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1142                        pr_debug("%s:%s:%d Slot drained, starting queue.\n",
1143                                 skdev->name, __func__, __LINE__);
1144                        skdev->state = SKD_DRVR_STATE_ONLINE;
1145                        blk_start_queue(skdev->queue);
1146                        return;
1147                }
1148                if (skdev->timer_countdown > 0) {
1149                        skdev->timer_countdown--;
1150                        return;
1151                }
1152                skd_restart_device(skdev);
1153                break;
1154
1155        case SKD_DRVR_STATE_RESTARTING:
1156                if (skdev->timer_countdown > 0) {
1157                        skdev->timer_countdown--;
1158                        return;
1159                }
1160                /* For now, we fault the drive. Could attempt resets to
1161                 * revcover at some point. */
1162                skdev->state = SKD_DRVR_STATE_FAULT;
1163                pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
1164                       skd_name(skdev), skdev->drive_state);
1165
1166                /*
1167                 * Recovering does two things:
1168                 * 1. completes IO with error
1169                 * 2. reclaims dma resources
1170                 * When is it safe to recover requests?
1171                 * - if the drive state is faulted
1172                 * - if the state is still soft reset after out timeout
1173                 * - if the drive registers are dead (state = FF)
1174                 * If it is "unsafe", we still need to recover, so we will
1175                 * disable pci bus mastering and disable our interrupts.
1176                 */
1177
1178                if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1179                    (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1180                    (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1181                        /* It never came out of soft reset. Try to
1182                         * recover the requests and then let them
1183                         * fail. This is to mitigate hung processes. */
1184                        skd_recover_requests(skdev, 0);
1185                else {
1186                        pr_err("(%s): Disable BusMaster (%x)\n",
1187                               skd_name(skdev), skdev->drive_state);
1188                        pci_disable_device(skdev->pdev);
1189                        skd_disable_interrupts(skdev);
1190                        skd_recover_requests(skdev, 0);
1191                }
1192
1193                /*start the queue so we can respond with error to requests */
1194                /* wakeup anyone waiting for startup complete */
1195                blk_start_queue(skdev->queue);
1196                skdev->gendisk_on = -1;
1197                wake_up_interruptible(&skdev->waitq);
1198                break;
1199
1200        case SKD_DRVR_STATE_RESUMING:
1201        case SKD_DRVR_STATE_STOPPING:
1202        case SKD_DRVR_STATE_SYNCING:
1203        case SKD_DRVR_STATE_FAULT:
1204        case SKD_DRVR_STATE_DISAPPEARED:
1205        default:
1206                break;
1207        }
1208}
1209
1210static int skd_start_timer(struct skd_device *skdev)
1211{
1212        int rc;
1213
1214        init_timer(&skdev->timer);
1215        setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1216
1217        rc = mod_timer(&skdev->timer, (jiffies + HZ));
1218        if (rc)
1219                pr_err("%s: failed to start timer %d\n",
1220                       __func__, rc);
1221        return rc;
1222}
1223
1224static void skd_kill_timer(struct skd_device *skdev)
1225{
1226        del_timer_sync(&skdev->timer);
1227}
1228
1229/*
1230 *****************************************************************************
1231 * IOCTL
1232 *****************************************************************************
1233 */
1234static int skd_ioctl_sg_io(struct skd_device *skdev,
1235                           fmode_t mode, void __user *argp);
1236static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1237                                        struct skd_sg_io *sksgio);
1238static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1239                                   struct skd_sg_io *sksgio);
1240static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1241                                    struct skd_sg_io *sksgio);
1242static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1243                                 struct skd_sg_io *sksgio, int dxfer_dir);
1244static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1245                                 struct skd_sg_io *sksgio);
1246static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1247static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1248                                    struct skd_sg_io *sksgio);
1249static int skd_sg_io_put_status(struct skd_device *skdev,
1250                                struct skd_sg_io *sksgio);
1251
1252static void skd_complete_special(struct skd_device *skdev,
1253                                 volatile struct fit_completion_entry_v1
1254                                 *skcomp,
1255                                 volatile struct fit_comp_error_info *skerr,
1256                                 struct skd_special_context *skspcl);
1257
1258static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1259                          uint cmd_in, ulong arg)
1260{
1261        int rc = 0;
1262        struct gendisk *disk = bdev->bd_disk;
1263        struct skd_device *skdev = disk->private_data;
1264        void __user *p = (void *)arg;
1265
1266        pr_debug("%s:%s:%d %s: CMD[%s] ioctl  mode 0x%x, cmd 0x%x arg %0lx\n",
1267                 skdev->name, __func__, __LINE__,
1268                 disk->disk_name, current->comm, mode, cmd_in, arg);
1269
1270        if (!capable(CAP_SYS_ADMIN))
1271                return -EPERM;
1272
1273        switch (cmd_in) {
1274        case SG_SET_TIMEOUT:
1275        case SG_GET_TIMEOUT:
1276        case SG_GET_VERSION_NUM:
1277                rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
1278                break;
1279        case SG_IO:
1280                rc = skd_ioctl_sg_io(skdev, mode, p);
1281                break;
1282
1283        default:
1284                rc = -ENOTTY;
1285                break;
1286        }
1287
1288        pr_debug("%s:%s:%d %s:  completion rc %d\n",
1289                 skdev->name, __func__, __LINE__, disk->disk_name, rc);
1290        return rc;
1291}
1292
1293static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1294                           void __user *argp)
1295{
1296        int rc;
1297        struct skd_sg_io sksgio;
1298
1299        memset(&sksgio, 0, sizeof(sksgio));
1300        sksgio.mode = mode;
1301        sksgio.argp = argp;
1302        sksgio.iov = &sksgio.no_iov_iov;
1303
1304        switch (skdev->state) {
1305        case SKD_DRVR_STATE_ONLINE:
1306        case SKD_DRVR_STATE_BUSY_IMMINENT:
1307                break;
1308
1309        default:
1310                pr_debug("%s:%s:%d drive not online\n",
1311                         skdev->name, __func__, __LINE__);
1312                rc = -ENXIO;
1313                goto out;
1314        }
1315
1316        rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1317        if (rc)
1318                goto out;
1319
1320        rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1321        if (rc)
1322                goto out;
1323
1324        rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1325        if (rc)
1326                goto out;
1327
1328        rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1329        if (rc)
1330                goto out;
1331
1332        rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1333        if (rc)
1334                goto out;
1335
1336        rc = skd_sg_io_await(skdev, &sksgio);
1337        if (rc)
1338                goto out;
1339
1340        rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1341        if (rc)
1342                goto out;
1343
1344        rc = skd_sg_io_put_status(skdev, &sksgio);
1345        if (rc)
1346                goto out;
1347
1348        rc = 0;
1349
1350out:
1351        skd_sg_io_release_skspcl(skdev, &sksgio);
1352
1353        if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1354                kfree(sksgio.iov);
1355        return rc;
1356}
1357
1358static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1359                                        struct skd_sg_io *sksgio)
1360{
1361        struct sg_io_hdr *sgp = &sksgio->sg;
1362        int i, acc;
1363
1364        if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
1365                pr_debug("%s:%s:%d access sg failed %p\n",
1366                         skdev->name, __func__, __LINE__, sksgio->argp);
1367                return -EFAULT;
1368        }
1369
1370        if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
1371                pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
1372                         skdev->name, __func__, __LINE__, sksgio->argp);
1373                return -EFAULT;
1374        }
1375
1376        if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
1377                pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
1378                         skdev->name, __func__, __LINE__, sgp->interface_id);
1379                return -EINVAL;
1380        }
1381
1382        if (sgp->cmd_len > sizeof(sksgio->cdb)) {
1383                pr_debug("%s:%s:%d cmd_len invalid %d\n",
1384                         skdev->name, __func__, __LINE__, sgp->cmd_len);
1385                return -EINVAL;
1386        }
1387
1388        if (sgp->iovec_count > 256) {
1389                pr_debug("%s:%s:%d iovec_count invalid %d\n",
1390                         skdev->name, __func__, __LINE__, sgp->iovec_count);
1391                return -EINVAL;
1392        }
1393
1394        if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
1395                pr_debug("%s:%s:%d dxfer_len invalid %d\n",
1396                         skdev->name, __func__, __LINE__, sgp->dxfer_len);
1397                return -EINVAL;
1398        }
1399
1400        switch (sgp->dxfer_direction) {
1401        case SG_DXFER_NONE:
1402                acc = -1;
1403                break;
1404
1405        case SG_DXFER_TO_DEV:
1406                acc = VERIFY_READ;
1407                break;
1408
1409        case SG_DXFER_FROM_DEV:
1410        case SG_DXFER_TO_FROM_DEV:
1411                acc = VERIFY_WRITE;
1412                break;
1413
1414        default:
1415                pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
1416                         skdev->name, __func__, __LINE__, sgp->dxfer_direction);
1417                return -EINVAL;
1418        }
1419
1420        if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
1421                pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
1422                         skdev->name, __func__, __LINE__, sgp->cmdp);
1423                return -EFAULT;
1424        }
1425
1426        if (sgp->mx_sb_len != 0) {
1427                if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
1428                        pr_debug("%s:%s:%d access sbp failed %p\n",
1429                                 skdev->name, __func__, __LINE__, sgp->sbp);
1430                        return -EFAULT;
1431                }
1432        }
1433
1434        if (sgp->iovec_count == 0) {
1435                sksgio->iov[0].iov_base = sgp->dxferp;
1436                sksgio->iov[0].iov_len = sgp->dxfer_len;
1437                sksgio->iovcnt = 1;
1438                sksgio->dxfer_len = sgp->dxfer_len;
1439        } else {
1440                struct sg_iovec *iov;
1441                uint nbytes = sizeof(*iov) * sgp->iovec_count;
1442                size_t iov_data_len;
1443
1444                iov = kmalloc(nbytes, GFP_KERNEL);
1445                if (iov == NULL) {
1446                        pr_debug("%s:%s:%d alloc iovec failed %d\n",
1447                                 skdev->name, __func__, __LINE__,
1448                                 sgp->iovec_count);
1449                        return -ENOMEM;
1450                }
1451                sksgio->iov = iov;
1452                sksgio->iovcnt = sgp->iovec_count;
1453
1454                if (copy_from_user(iov, sgp->dxferp, nbytes)) {
1455                        pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
1456                                 skdev->name, __func__, __LINE__, sgp->dxferp);
1457                        return -EFAULT;
1458                }
1459
1460                /*
1461                 * Sum up the vecs, making sure they don't overflow
1462                 */
1463                iov_data_len = 0;
1464                for (i = 0; i < sgp->iovec_count; i++) {
1465                        if (iov_data_len + iov[i].iov_len < iov_data_len)
1466                                return -EINVAL;
1467                        iov_data_len += iov[i].iov_len;
1468                }
1469
1470                /* SG_IO howto says that the shorter of the two wins */
1471                if (sgp->dxfer_len < iov_data_len) {
1472                        sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1473                                                     sgp->iovec_count,
1474                                                     sgp->dxfer_len);
1475                        sksgio->dxfer_len = sgp->dxfer_len;
1476                } else
1477                        sksgio->dxfer_len = iov_data_len;
1478        }
1479
1480        if (sgp->dxfer_direction != SG_DXFER_NONE) {
1481                struct sg_iovec *iov = sksgio->iov;
1482                for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1483                        if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
1484                                pr_debug("%s:%s:%d access data failed %p/%d\n",
1485                                         skdev->name, __func__, __LINE__,
1486                                         iov->iov_base, (int)iov->iov_len);
1487                                return -EFAULT;
1488                        }
1489                }
1490        }
1491
1492        return 0;
1493}
1494
1495static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1496                                   struct skd_sg_io *sksgio)
1497{
1498        struct skd_special_context *skspcl = NULL;
1499        int rc;
1500
1501        for (;;) {
1502                ulong flags;
1503
1504                spin_lock_irqsave(&skdev->lock, flags);
1505                skspcl = skdev->skspcl_free_list;
1506                if (skspcl != NULL) {
1507                        skdev->skspcl_free_list =
1508                                (struct skd_special_context *)skspcl->req.next;
1509                        skspcl->req.id += SKD_ID_INCR;
1510                        skspcl->req.state = SKD_REQ_STATE_SETUP;
1511                        skspcl->orphaned = 0;
1512                        skspcl->req.n_sg = 0;
1513                }
1514                spin_unlock_irqrestore(&skdev->lock, flags);
1515
1516                if (skspcl != NULL) {
1517                        rc = 0;
1518                        break;
1519                }
1520
1521                pr_debug("%s:%s:%d blocking\n",
1522                         skdev->name, __func__, __LINE__);
1523
1524                rc = wait_event_interruptible_timeout(
1525                                skdev->waitq,
1526                                (skdev->skspcl_free_list != NULL),
1527                                msecs_to_jiffies(sksgio->sg.timeout));
1528
1529                pr_debug("%s:%s:%d unblocking, rc=%d\n",
1530                         skdev->name, __func__, __LINE__, rc);
1531
1532                if (rc <= 0) {
1533                        if (rc == 0)
1534                                rc = -ETIMEDOUT;
1535                        else
1536                                rc = -EINTR;
1537                        break;
1538                }
1539                /*
1540                 * If we get here rc > 0 meaning the timeout to
1541                 * wait_event_interruptible_timeout() had time left, hence the
1542                 * sought event -- non-empty free list -- happened.
1543                 * Retry the allocation.
1544                 */
1545        }
1546        sksgio->skspcl = skspcl;
1547
1548        return rc;
1549}
1550
1551static int skd_skreq_prep_buffering(struct skd_device *skdev,
1552                                    struct skd_request_context *skreq,
1553                                    u32 dxfer_len)
1554{
1555        u32 resid = dxfer_len;
1556
1557        /*
1558         * The DMA engine must have aligned addresses and byte counts.
1559         */
1560        resid += (-resid) & 3;
1561        skreq->sg_byte_count = resid;
1562
1563        skreq->n_sg = 0;
1564
1565        while (resid > 0) {
1566                u32 nbytes = PAGE_SIZE;
1567                u32 ix = skreq->n_sg;
1568                struct scatterlist *sg = &skreq->sg[ix];
1569                struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1570                struct page *page;
1571
1572                if (nbytes > resid)
1573                        nbytes = resid;
1574
1575                page = alloc_page(GFP_KERNEL);
1576                if (page == NULL)
1577                        return -ENOMEM;
1578
1579                sg_set_page(sg, page, nbytes, 0);
1580
1581                /* TODO: This should be going through a pci_???()
1582                 * routine to do proper mapping. */
1583                sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1584                sksg->byte_count = nbytes;
1585
1586                sksg->host_side_addr = sg_phys(sg);
1587
1588                sksg->dev_side_addr = 0;
1589                sksg->next_desc_ptr = skreq->sksg_dma_address +
1590                                      (ix + 1) * sizeof(*sksg);
1591
1592                skreq->n_sg++;
1593                resid -= nbytes;
1594        }
1595
1596        if (skreq->n_sg > 0) {
1597                u32 ix = skreq->n_sg - 1;
1598                struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1599
1600                sksg->control = FIT_SGD_CONTROL_LAST;
1601                sksg->next_desc_ptr = 0;
1602        }
1603
1604        if (unlikely(skdev->dbg_level > 1)) {
1605                u32 i;
1606
1607                pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1608                         skdev->name, __func__, __LINE__,
1609                         skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1610                for (i = 0; i < skreq->n_sg; i++) {
1611                        struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1612
1613                        pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
1614                                 "addr=0x%llx next=0x%llx\n",
1615                                 skdev->name, __func__, __LINE__,
1616                                 i, sgd->byte_count, sgd->control,
1617                                 sgd->host_side_addr, sgd->next_desc_ptr);
1618                }
1619        }
1620
1621        return 0;
1622}
1623
1624static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1625                                    struct skd_sg_io *sksgio)
1626{
1627        struct skd_special_context *skspcl = sksgio->skspcl;
1628        struct skd_request_context *skreq = &skspcl->req;
1629        u32 dxfer_len = sksgio->dxfer_len;
1630        int rc;
1631
1632        rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1633        /*
1634         * Eventually, errors or not, skd_release_special() is called
1635         * to recover allocations including partial allocations.
1636         */
1637        return rc;
1638}
1639
1640static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1641                                 struct skd_sg_io *sksgio, int dxfer_dir)
1642{
1643        struct skd_special_context *skspcl = sksgio->skspcl;
1644        u32 iov_ix = 0;
1645        struct sg_iovec curiov;
1646        u32 sksg_ix = 0;
1647        u8 *bufp = NULL;
1648        u32 buf_len = 0;
1649        u32 resid = sksgio->dxfer_len;
1650        int rc;
1651
1652        curiov.iov_len = 0;
1653        curiov.iov_base = NULL;
1654
1655        if (dxfer_dir != sksgio->sg.dxfer_direction) {
1656                if (dxfer_dir != SG_DXFER_TO_DEV ||
1657                    sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1658                        return 0;
1659        }
1660
1661        while (resid > 0) {
1662                u32 nbytes = PAGE_SIZE;
1663
1664                if (curiov.iov_len == 0) {
1665                        curiov = sksgio->iov[iov_ix++];
1666                        continue;
1667                }
1668
1669                if (buf_len == 0) {
1670                        struct page *page;
1671                        page = sg_page(&skspcl->req.sg[sksg_ix++]);
1672                        bufp = page_address(page);
1673                        buf_len = PAGE_SIZE;
1674                }
1675
1676                nbytes = min_t(u32, nbytes, resid);
1677                nbytes = min_t(u32, nbytes, curiov.iov_len);
1678                nbytes = min_t(u32, nbytes, buf_len);
1679
1680                if (dxfer_dir == SG_DXFER_TO_DEV)
1681                        rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1682                else
1683                        rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1684
1685                if (rc)
1686                        return -EFAULT;
1687
1688                resid -= nbytes;
1689                curiov.iov_len -= nbytes;
1690                curiov.iov_base += nbytes;
1691                buf_len -= nbytes;
1692        }
1693
1694        return 0;
1695}
1696
1697static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1698                                 struct skd_sg_io *sksgio)
1699{
1700        struct skd_special_context *skspcl = sksgio->skspcl;
1701        struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
1702        struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
1703
1704        memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1705
1706        /* Initialize the FIT msg header */
1707        fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1708        fmh->num_protocol_cmds_coalesced = 1;
1709
1710        /* Initialize the SCSI request */
1711        if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1712                scsi_req->hdr.sg_list_dma_address =
1713                        cpu_to_be64(skspcl->req.sksg_dma_address);
1714        scsi_req->hdr.tag = skspcl->req.id;
1715        scsi_req->hdr.sg_list_len_bytes =
1716                cpu_to_be32(skspcl->req.sg_byte_count);
1717        memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1718
1719        skspcl->req.state = SKD_REQ_STATE_BUSY;
1720        skd_send_special_fitmsg(skdev, skspcl);
1721
1722        return 0;
1723}
1724
1725static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1726{
1727        unsigned long flags;
1728        int rc;
1729
1730        rc = wait_event_interruptible_timeout(skdev->waitq,
1731                                              (sksgio->skspcl->req.state !=
1732                                               SKD_REQ_STATE_BUSY),
1733                                              msecs_to_jiffies(sksgio->sg.
1734                                                               timeout));
1735
1736        spin_lock_irqsave(&skdev->lock, flags);
1737
1738        if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
1739                pr_debug("%s:%s:%d skspcl %p aborted\n",
1740                         skdev->name, __func__, __LINE__, sksgio->skspcl);
1741
1742                /* Build check cond, sense and let command finish. */
1743                /* For a timeout, we must fabricate completion and sense
1744                 * data to complete the command */
1745                sksgio->skspcl->req.completion.status =
1746                        SAM_STAT_CHECK_CONDITION;
1747
1748                memset(&sksgio->skspcl->req.err_info, 0,
1749                       sizeof(sksgio->skspcl->req.err_info));
1750                sksgio->skspcl->req.err_info.type = 0x70;
1751                sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1752                sksgio->skspcl->req.err_info.code = 0x44;
1753                sksgio->skspcl->req.err_info.qual = 0;
1754                rc = 0;
1755        } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1756                /* No longer on the adapter. We finish. */
1757                rc = 0;
1758        else {
1759                /* Something's gone wrong. Still busy. Timeout or
1760                 * user interrupted (control-C). Mark as an orphan
1761                 * so it will be disposed when completed. */
1762                sksgio->skspcl->orphaned = 1;
1763                sksgio->skspcl = NULL;
1764                if (rc == 0) {
1765                        pr_debug("%s:%s:%d timed out %p (%u ms)\n",
1766                                 skdev->name, __func__, __LINE__,
1767                                 sksgio, sksgio->sg.timeout);
1768                        rc = -ETIMEDOUT;
1769                } else {
1770                        pr_debug("%s:%s:%d cntlc %p\n",
1771                                 skdev->name, __func__, __LINE__, sksgio);
1772                        rc = -EINTR;
1773                }
1774        }
1775
1776        spin_unlock_irqrestore(&skdev->lock, flags);
1777
1778        return rc;
1779}
1780
1781static int skd_sg_io_put_status(struct skd_device *skdev,
1782                                struct skd_sg_io *sksgio)
1783{
1784        struct sg_io_hdr *sgp = &sksgio->sg;
1785        struct skd_special_context *skspcl = sksgio->skspcl;
1786        int resid = 0;
1787
1788        u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1789
1790        sgp->status = skspcl->req.completion.status;
1791        resid = sksgio->dxfer_len - nb;
1792
1793        sgp->masked_status = sgp->status & STATUS_MASK;
1794        sgp->msg_status = 0;
1795        sgp->host_status = 0;
1796        sgp->driver_status = 0;
1797        sgp->resid = resid;
1798        if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1799                sgp->info |= SG_INFO_CHECK;
1800
1801        pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
1802                 skdev->name, __func__, __LINE__,
1803                 sgp->status, sgp->masked_status, sgp->resid);
1804
1805        if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1806                if (sgp->mx_sb_len > 0) {
1807                        struct fit_comp_error_info *ei = &skspcl->req.err_info;
1808                        u32 nbytes = sizeof(*ei);
1809
1810                        nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1811
1812                        sgp->sb_len_wr = nbytes;
1813
1814                        if (__copy_to_user(sgp->sbp, ei, nbytes)) {
1815                                pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
1816                                         skdev->name, __func__, __LINE__,
1817                                         sgp->sbp);
1818                                return -EFAULT;
1819                        }
1820                }
1821        }
1822
1823        if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
1824                pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
1825                         skdev->name, __func__, __LINE__, sksgio->argp);
1826                return -EFAULT;
1827        }
1828
1829        return 0;
1830}
1831
1832static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1833                                    struct skd_sg_io *sksgio)
1834{
1835        struct skd_special_context *skspcl = sksgio->skspcl;
1836
1837        if (skspcl != NULL) {
1838                ulong flags;
1839
1840                sksgio->skspcl = NULL;
1841
1842                spin_lock_irqsave(&skdev->lock, flags);
1843                skd_release_special(skdev, skspcl);
1844                spin_unlock_irqrestore(&skdev->lock, flags);
1845        }
1846
1847        return 0;
1848}
1849
1850/*
1851 *****************************************************************************
1852 * INTERNAL REQUESTS -- generated by driver itself
1853 *****************************************************************************
1854 */
1855
1856static int skd_format_internal_skspcl(struct skd_device *skdev)
1857{
1858        struct skd_special_context *skspcl = &skdev->internal_skspcl;
1859        struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1860        struct fit_msg_hdr *fmh;
1861        uint64_t dma_address;
1862        struct skd_scsi_request *scsi;
1863
1864        fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
1865        fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1866        fmh->num_protocol_cmds_coalesced = 1;
1867
1868        scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1869        memset(scsi, 0, sizeof(*scsi));
1870        dma_address = skspcl->req.sksg_dma_address;
1871        scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1872        sgd->control = FIT_SGD_CONTROL_LAST;
1873        sgd->byte_count = 0;
1874        sgd->host_side_addr = skspcl->db_dma_address;
1875        sgd->dev_side_addr = 0;
1876        sgd->next_desc_ptr = 0LL;
1877
1878        return 1;
1879}
1880
1881#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1882
1883static void skd_send_internal_skspcl(struct skd_device *skdev,
1884                                     struct skd_special_context *skspcl,
1885                                     u8 opcode)
1886{
1887        struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1888        struct skd_scsi_request *scsi;
1889        unsigned char *buf = skspcl->data_buf;
1890        int i;
1891
1892        if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1893                /*
1894                 * A refresh is already in progress.
1895                 * Just wait for it to finish.
1896                 */
1897                return;
1898
1899        SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1900        skspcl->req.state = SKD_REQ_STATE_BUSY;
1901        skspcl->req.id += SKD_ID_INCR;
1902
1903        scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1904        scsi->hdr.tag = skspcl->req.id;
1905
1906        memset(scsi->cdb, 0, sizeof(scsi->cdb));
1907
1908        switch (opcode) {
1909        case TEST_UNIT_READY:
1910                scsi->cdb[0] = TEST_UNIT_READY;
1911                sgd->byte_count = 0;
1912                scsi->hdr.sg_list_len_bytes = 0;
1913                break;
1914
1915        case READ_CAPACITY:
1916                scsi->cdb[0] = READ_CAPACITY;
1917                sgd->byte_count = SKD_N_READ_CAP_BYTES;
1918                scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1919                break;
1920
1921        case INQUIRY:
1922                scsi->cdb[0] = INQUIRY;
1923                scsi->cdb[1] = 0x01;    /* evpd */
1924                scsi->cdb[2] = 0x80;    /* serial number page */
1925                scsi->cdb[4] = 0x10;
1926                sgd->byte_count = 16;
1927                scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1928                break;
1929
1930        case SYNCHRONIZE_CACHE:
1931                scsi->cdb[0] = SYNCHRONIZE_CACHE;
1932                sgd->byte_count = 0;
1933                scsi->hdr.sg_list_len_bytes = 0;
1934                break;
1935
1936        case WRITE_BUFFER:
1937                scsi->cdb[0] = WRITE_BUFFER;
1938                scsi->cdb[1] = 0x02;
1939                scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1940                scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1941                sgd->byte_count = WR_BUF_SIZE;
1942                scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1943                /* fill incrementing byte pattern */
1944                for (i = 0; i < sgd->byte_count; i++)
1945                        buf[i] = i & 0xFF;
1946                break;
1947
1948        case READ_BUFFER:
1949                scsi->cdb[0] = READ_BUFFER;
1950                scsi->cdb[1] = 0x02;
1951                scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1952                scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1953                sgd->byte_count = WR_BUF_SIZE;
1954                scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1955                memset(skspcl->data_buf, 0, sgd->byte_count);
1956                break;
1957
1958        default:
1959                SKD_ASSERT("Don't know what to send");
1960                return;
1961
1962        }
1963        skd_send_special_fitmsg(skdev, skspcl);
1964}
1965
1966static void skd_refresh_device_data(struct skd_device *skdev)
1967{
1968        struct skd_special_context *skspcl = &skdev->internal_skspcl;
1969
1970        skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1971}
1972
1973static int skd_chk_read_buf(struct skd_device *skdev,
1974                            struct skd_special_context *skspcl)
1975{
1976        unsigned char *buf = skspcl->data_buf;
1977        int i;
1978
1979        /* check for incrementing byte pattern */
1980        for (i = 0; i < WR_BUF_SIZE; i++)
1981                if (buf[i] != (i & 0xFF))
1982                        return 1;
1983
1984        return 0;
1985}
1986
1987static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1988                                 u8 code, u8 qual, u8 fruc)
1989{
1990        /* If the check condition is of special interest, log a message */
1991        if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1992            && (code == 0x04) && (qual == 0x06)) {
1993                pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
1994                       "ascq/fruc %02x/%02x/%02x/%02x\n",
1995                       skd_name(skdev), key, code, qual, fruc);
1996        }
1997}
1998
1999static void skd_complete_internal(struct skd_device *skdev,
2000                                  volatile struct fit_completion_entry_v1
2001                                  *skcomp,
2002                                  volatile struct fit_comp_error_info *skerr,
2003                                  struct skd_special_context *skspcl)
2004{
2005        u8 *buf = skspcl->data_buf;
2006        u8 status;
2007        int i;
2008        struct skd_scsi_request *scsi =
2009                (struct skd_scsi_request *)&skspcl->msg_buf[64];
2010
2011        SKD_ASSERT(skspcl == &skdev->internal_skspcl);
2012
2013        pr_debug("%s:%s:%d complete internal %x\n",
2014                 skdev->name, __func__, __LINE__, scsi->cdb[0]);
2015
2016        skspcl->req.completion = *skcomp;
2017        skspcl->req.state = SKD_REQ_STATE_IDLE;
2018        skspcl->req.id += SKD_ID_INCR;
2019
2020        status = skspcl->req.completion.status;
2021
2022        skd_log_check_status(skdev, status, skerr->key, skerr->code,
2023                             skerr->qual, skerr->fruc);
2024
2025        switch (scsi->cdb[0]) {
2026        case TEST_UNIT_READY:
2027                if (status == SAM_STAT_GOOD)
2028                        skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
2029                else if ((status == SAM_STAT_CHECK_CONDITION) &&
2030                         (skerr->key == MEDIUM_ERROR))
2031                        skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
2032                else {
2033                        if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2034                                pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
2035                                         skdev->name, __func__, __LINE__,
2036                                         skdev->state);
2037                                return;
2038                        }
2039                        pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
2040                                 skdev->name, __func__, __LINE__);
2041                        skd_send_internal_skspcl(skdev, skspcl, 0x00);
2042                }
2043                break;
2044
2045        case WRITE_BUFFER:
2046                if (status == SAM_STAT_GOOD)
2047                        skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
2048                else {
2049                        if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2050                                pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
2051                                         skdev->name, __func__, __LINE__,
2052                                         skdev->state);
2053                                return;
2054                        }
2055                        pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
2056                                 skdev->name, __func__, __LINE__);
2057                        skd_send_internal_skspcl(skdev, skspcl, 0x00);
2058                }
2059                break;
2060
2061        case READ_BUFFER:
2062                if (status == SAM_STAT_GOOD) {
2063                        if (skd_chk_read_buf(skdev, skspcl) == 0)
2064                                skd_send_internal_skspcl(skdev, skspcl,
2065                                                         READ_CAPACITY);
2066                        else {
2067                                pr_err(
2068                                       "(%s):*** W/R Buffer mismatch %d ***\n",
2069                                       skd_name(skdev), skdev->connect_retries);
2070                                if (skdev->connect_retries <
2071                                    SKD_MAX_CONNECT_RETRIES) {
2072                                        skdev->connect_retries++;
2073                                        skd_soft_reset(skdev);
2074                                } else {
2075                                        pr_err(
2076                                               "(%s): W/R Buffer Connect Error\n",
2077                                               skd_name(skdev));
2078                                        return;
2079                                }
2080                        }
2081
2082                } else {
2083                        if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2084                                pr_debug("%s:%s:%d "
2085                                         "read buffer failed, don't send anymore state 0x%x\n",
2086                                         skdev->name, __func__, __LINE__,
2087                                         skdev->state);
2088                                return;
2089                        }
2090                        pr_debug("%s:%s:%d "
2091                                 "**** read buffer failed, retry skerr\n",
2092                                 skdev->name, __func__, __LINE__);
2093                        skd_send_internal_skspcl(skdev, skspcl, 0x00);
2094                }
2095                break;
2096
2097        case READ_CAPACITY:
2098                skdev->read_cap_is_valid = 0;
2099                if (status == SAM_STAT_GOOD) {
2100                        skdev->read_cap_last_lba =
2101                                (buf[0] << 24) | (buf[1] << 16) |
2102                                (buf[2] << 8) | buf[3];
2103                        skdev->read_cap_blocksize =
2104                                (buf[4] << 24) | (buf[5] << 16) |
2105                                (buf[6] << 8) | buf[7];
2106
2107                        pr_debug("%s:%s:%d last lba %d, bs %d\n",
2108                                 skdev->name, __func__, __LINE__,
2109                                 skdev->read_cap_last_lba,
2110                                 skdev->read_cap_blocksize);
2111
2112                        set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2113
2114                        skdev->read_cap_is_valid = 1;
2115
2116                        skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2117                } else if ((status == SAM_STAT_CHECK_CONDITION) &&
2118                           (skerr->key == MEDIUM_ERROR)) {
2119                        skdev->read_cap_last_lba = ~0;
2120                        set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2121                        pr_debug("%s:%s:%d "
2122                                 "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
2123                                 skdev->name, __func__, __LINE__);
2124                        skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2125                } else {
2126                        pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
2127                                 skdev->name, __func__, __LINE__);
2128                        skd_send_internal_skspcl(skdev, skspcl,
2129                                                 TEST_UNIT_READY);
2130                }
2131                break;
2132
2133        case INQUIRY:
2134                skdev->inquiry_is_valid = 0;
2135                if (status == SAM_STAT_GOOD) {
2136                        skdev->inquiry_is_valid = 1;
2137
2138                        for (i = 0; i < 12; i++)
2139                                skdev->inq_serial_num[i] = buf[i + 4];
2140                        skdev->inq_serial_num[12] = 0;
2141                }
2142
2143                if (skd_unquiesce_dev(skdev) < 0)
2144                        pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
2145                                 skdev->name, __func__, __LINE__);
2146                 /* connection is complete */
2147                skdev->connect_retries = 0;
2148                break;
2149
2150        case SYNCHRONIZE_CACHE:
2151                if (status == SAM_STAT_GOOD)
2152                        skdev->sync_done = 1;
2153                else
2154                        skdev->sync_done = -1;
2155                wake_up_interruptible(&skdev->waitq);
2156                break;
2157
2158        default:
2159                SKD_ASSERT("we didn't send this");
2160        }
2161}
2162
2163/*
2164 *****************************************************************************
2165 * FIT MESSAGES
2166 *****************************************************************************
2167 */
2168
2169static void skd_send_fitmsg(struct skd_device *skdev,
2170                            struct skd_fitmsg_context *skmsg)
2171{
2172        u64 qcmd;
2173        struct fit_msg_hdr *fmh;
2174
2175        pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
2176                 skdev->name, __func__, __LINE__,
2177                 skmsg->mb_dma_address, skdev->in_flight);
2178        pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
2179                 skdev->name, __func__, __LINE__,
2180                 skmsg->msg_buf, skmsg->offset);
2181
2182        qcmd = skmsg->mb_dma_address;
2183        qcmd |= FIT_QCMD_QID_NORMAL;
2184
2185        fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2186        skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2187
2188        if (unlikely(skdev->dbg_level > 1)) {
2189                u8 *bp = (u8 *)skmsg->msg_buf;
2190                int i;
2191                for (i = 0; i < skmsg->length; i += 8) {
2192                        pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
2193                                 "%02x %02x %02x %02x\n",
2194                                 skdev->name, __func__, __LINE__,
2195                                 i, bp[i + 0], bp[i + 1], bp[i + 2],
2196                                 bp[i + 3], bp[i + 4], bp[i + 5],
2197                                 bp[i + 6], bp[i + 7]);
2198                        if (i == 0)
2199                                i = 64 - 8;
2200                }
2201        }
2202
2203        if (skmsg->length > 256)
2204                qcmd |= FIT_QCMD_MSGSIZE_512;
2205        else if (skmsg->length > 128)
2206                qcmd |= FIT_QCMD_MSGSIZE_256;
2207        else if (skmsg->length > 64)
2208                qcmd |= FIT_QCMD_MSGSIZE_128;
2209        else
2210                /*
2211                 * This makes no sense because the FIT msg header is
2212                 * 64 bytes. If the msg is only 64 bytes long it has
2213                 * no payload.
2214                 */
2215                qcmd |= FIT_QCMD_MSGSIZE_64;
2216
2217        SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2218
2219}
2220
2221static void skd_send_special_fitmsg(struct skd_device *skdev,
2222                                    struct skd_special_context *skspcl)
2223{
2224        u64 qcmd;
2225
2226        if (unlikely(skdev->dbg_level > 1)) {
2227                u8 *bp = (u8 *)skspcl->msg_buf;
2228                int i;
2229
2230                for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2231                        pr_debug("%s:%s:%d  spcl[%2d] %02x %02x %02x %02x  "
2232                                 "%02x %02x %02x %02x\n",
2233                                 skdev->name, __func__, __LINE__, i,
2234                                 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
2235                                 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
2236                        if (i == 0)
2237                                i = 64 - 8;
2238                }
2239
2240                pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2241                         skdev->name, __func__, __LINE__,
2242                         skspcl, skspcl->req.id, skspcl->req.sksg_list,
2243                         skspcl->req.sksg_dma_address);
2244                for (i = 0; i < skspcl->req.n_sg; i++) {
2245                        struct fit_sg_descriptor *sgd =
2246                                &skspcl->req.sksg_list[i];
2247
2248                        pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
2249                                 "addr=0x%llx next=0x%llx\n",
2250                                 skdev->name, __func__, __LINE__,
2251                                 i, sgd->byte_count, sgd->control,
2252                                 sgd->host_side_addr, sgd->next_desc_ptr);
2253                }
2254        }
2255
2256        /*
2257         * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2258         * and one 64-byte SSDI command.
2259         */
2260        qcmd = skspcl->mb_dma_address;
2261        qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2262
2263        SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2264}
2265
2266/*
2267 *****************************************************************************
2268 * COMPLETION QUEUE
2269 *****************************************************************************
2270 */
2271
2272static void skd_complete_other(struct skd_device *skdev,
2273                               volatile struct fit_completion_entry_v1 *skcomp,
2274                               volatile struct fit_comp_error_info *skerr);
2275
2276struct sns_info {
2277        u8 type;
2278        u8 stat;
2279        u8 key;
2280        u8 asc;
2281        u8 ascq;
2282        u8 mask;
2283        enum skd_check_status_action action;
2284};
2285
2286static struct sns_info skd_chkstat_table[] = {
2287        /* Good */
2288        { 0x70, 0x02, RECOVERED_ERROR, 0,    0,    0x1c,
2289          SKD_CHECK_STATUS_REPORT_GOOD },
2290
2291        /* Smart alerts */
2292        { 0x70, 0x02, NO_SENSE,        0x0B, 0x00, 0x1E,        /* warnings */
2293          SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2294        { 0x70, 0x02, NO_SENSE,        0x5D, 0x00, 0x1E,        /* thresholds */
2295          SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2296        { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F,        /* temperature over trigger */
2297          SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2298
2299        /* Retry (with limits) */
2300        { 0x70, 0x02, 0x0B,            0,    0,    0x1C,        /* This one is for DMA ERROR */
2301          SKD_CHECK_STATUS_REQUEUE_REQUEST },
2302        { 0x70, 0x02, 0x06,            0x0B, 0x00, 0x1E,        /* warnings */
2303          SKD_CHECK_STATUS_REQUEUE_REQUEST },
2304        { 0x70, 0x02, 0x06,            0x5D, 0x00, 0x1E,        /* thresholds */
2305          SKD_CHECK_STATUS_REQUEUE_REQUEST },
2306        { 0x70, 0x02, 0x06,            0x80, 0x30, 0x1F,        /* backup power */
2307          SKD_CHECK_STATUS_REQUEUE_REQUEST },
2308
2309        /* Busy (or about to be) */
2310        { 0x70, 0x02, 0x06,            0x3f, 0x01, 0x1F, /* fw changed */
2311          SKD_CHECK_STATUS_BUSY_IMMINENT },
2312};
2313
2314/*
2315 * Look up status and sense data to decide how to handle the error
2316 * from the device.
2317 * mask says which fields must match e.g., mask=0x18 means check
2318 * type and stat, ignore key, asc, ascq.
2319 */
2320
2321static enum skd_check_status_action
2322skd_check_status(struct skd_device *skdev,
2323                 u8 cmp_status, volatile struct fit_comp_error_info *skerr)
2324{
2325        int i, n;
2326
2327        pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2328               skd_name(skdev), skerr->key, skerr->code, skerr->qual,
2329               skerr->fruc);
2330
2331        pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2332                 skdev->name, __func__, __LINE__, skerr->type, cmp_status,
2333                 skerr->key, skerr->code, skerr->qual, skerr->fruc);
2334
2335        /* Does the info match an entry in the good category? */
2336        n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2337        for (i = 0; i < n; i++) {
2338                struct sns_info *sns = &skd_chkstat_table[i];
2339
2340                if (sns->mask & 0x10)
2341                        if (skerr->type != sns->type)
2342                                continue;
2343
2344                if (sns->mask & 0x08)
2345                        if (cmp_status != sns->stat)
2346                                continue;
2347
2348                if (sns->mask & 0x04)
2349                        if (skerr->key != sns->key)
2350                                continue;
2351
2352                if (sns->mask & 0x02)
2353                        if (skerr->code != sns->asc)
2354                                continue;
2355
2356                if (sns->mask & 0x01)
2357                        if (skerr->qual != sns->ascq)
2358                                continue;
2359
2360                if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2361                        pr_err("(%s): SMART Alert: sense key/asc/ascq "
2362                               "%02x/%02x/%02x\n",
2363                               skd_name(skdev), skerr->key,
2364                               skerr->code, skerr->qual);
2365                }
2366                return sns->action;
2367        }
2368
2369        /* No other match, so nonzero status means error,
2370         * zero status means good
2371         */
2372        if (cmp_status) {
2373                pr_debug("%s:%s:%d status check: error\n",
2374                         skdev->name, __func__, __LINE__);
2375                return SKD_CHECK_STATUS_REPORT_ERROR;
2376        }
2377
2378        pr_debug("%s:%s:%d status check good default\n",
2379                 skdev->name, __func__, __LINE__);
2380        return SKD_CHECK_STATUS_REPORT_GOOD;
2381}
2382
2383static void skd_resolve_req_exception(struct skd_device *skdev,
2384                                      struct skd_request_context *skreq)
2385{
2386        u8 cmp_status = skreq->completion.status;
2387
2388        switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2389        case SKD_CHECK_STATUS_REPORT_GOOD:
2390        case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2391                skd_end_request(skdev, skreq, 0);
2392                break;
2393
2394        case SKD_CHECK_STATUS_BUSY_IMMINENT:
2395                skd_log_skreq(skdev, skreq, "retry(busy)");
2396                blk_requeue_request(skdev->queue, skreq->req);
2397                pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
2398                skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2399                skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2400                skd_quiesce_dev(skdev);
2401                break;
2402
2403        case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2404                if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2405                        skd_log_skreq(skdev, skreq, "retry");
2406                        blk_requeue_request(skdev->queue, skreq->req);
2407                        break;
2408                }
2409        /* fall through to report error */
2410
2411        case SKD_CHECK_STATUS_REPORT_ERROR:
2412        default:
2413                skd_end_request(skdev, skreq, -EIO);
2414                break;
2415        }
2416}
2417
2418/* assume spinlock is already held */
2419static void skd_release_skreq(struct skd_device *skdev,
2420                              struct skd_request_context *skreq)
2421{
2422        u32 msg_slot;
2423        struct skd_fitmsg_context *skmsg;
2424
2425        u32 timo_slot;
2426
2427        /*
2428         * Reclaim the FIT msg buffer if this is
2429         * the first of the requests it carried to
2430         * be completed. The FIT msg buffer used to
2431         * send this request cannot be reused until
2432         * we are sure the s1120 card has copied
2433         * it to its memory. The FIT msg might have
2434         * contained several requests. As soon as
2435         * any of them are completed we know that
2436         * the entire FIT msg was transferred.
2437         * Only the first completed request will
2438         * match the FIT msg buffer id. The FIT
2439         * msg buffer id is immediately updated.
2440         * When subsequent requests complete the FIT
2441         * msg buffer id won't match, so we know
2442         * quite cheaply that it is already done.
2443         */
2444        msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2445        SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2446
2447        skmsg = &skdev->skmsg_table[msg_slot];
2448        if (skmsg->id == skreq->fitmsg_id) {
2449                SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2450                SKD_ASSERT(skmsg->outstanding > 0);
2451                skmsg->outstanding--;
2452                if (skmsg->outstanding == 0) {
2453                        skmsg->state = SKD_MSG_STATE_IDLE;
2454                        skmsg->id += SKD_ID_INCR;
2455                        skmsg->next = skdev->skmsg_free_list;
2456                        skdev->skmsg_free_list = skmsg;
2457                }
2458        }
2459
2460        /*
2461         * Decrease the number of active requests.
2462         * Also decrements the count in the timeout slot.
2463         */
2464        SKD_ASSERT(skdev->in_flight > 0);
2465        skdev->in_flight -= 1;
2466
2467        timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2468        SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2469        skdev->timeout_slot[timo_slot] -= 1;
2470
2471        /*
2472         * Reset backpointer
2473         */
2474        skreq->req = NULL;
2475
2476        /*
2477         * Reclaim the skd_request_context
2478         */
2479        skreq->state = SKD_REQ_STATE_IDLE;
2480        skreq->id += SKD_ID_INCR;
2481        skreq->next = skdev->skreq_free_list;
2482        skdev->skreq_free_list = skreq;
2483}
2484
2485#define DRIVER_INQ_EVPD_PAGE_CODE   0xDA
2486
2487static void skd_do_inq_page_00(struct skd_device *skdev,
2488                               volatile struct fit_completion_entry_v1 *skcomp,
2489                               volatile struct fit_comp_error_info *skerr,
2490                               uint8_t *cdb, uint8_t *buf)
2491{
2492        uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2493
2494        /* Caller requested "supported pages".  The driver needs to insert
2495         * its page.
2496         */
2497        pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
2498                 skdev->name, __func__, __LINE__);
2499
2500        /* If the device rejected the request because the CDB was
2501         * improperly formed, then just leave.
2502         */
2503        if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2504            skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2505                return;
2506
2507        /* Get the amount of space the caller allocated */
2508        max_bytes = (cdb[3] << 8) | cdb[4];
2509
2510        /* Get the number of pages actually returned by the device */
2511        drive_pages = (buf[2] << 8) | buf[3];
2512        drive_bytes = drive_pages + 4;
2513        new_size = drive_pages + 1;
2514
2515        /* Supported pages must be in numerical order, so find where
2516         * the driver page needs to be inserted into the list of
2517         * pages returned by the device.
2518         */
2519        for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2520                if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2521                        return; /* Device using this page code. abort */
2522                else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2523                        break;
2524        }
2525
2526        if (insert_pt < max_bytes) {
2527                uint16_t u;
2528
2529                /* Shift everything up one byte to make room. */
2530                for (u = new_size + 3; u > insert_pt; u--)
2531                        buf[u] = buf[u - 1];
2532                buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2533
2534                /* SCSI byte order increment of num_returned_bytes by 1 */
2535                skcomp->num_returned_bytes =
2536                        be32_to_cpu(skcomp->num_returned_bytes) + 1;
2537                skcomp->num_returned_bytes =
2538                        be32_to_cpu(skcomp->num_returned_bytes);
2539        }
2540
2541        /* update page length field to reflect the driver's page too */
2542        buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2543        buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2544}
2545
2546static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2547{
2548        int pcie_reg;
2549        u16 pci_bus_speed;
2550        u8 pci_lanes;
2551
2552        pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2553        if (pcie_reg) {
2554                u16 linksta;
2555                pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2556
2557                pci_bus_speed = linksta & 0xF;
2558                pci_lanes = (linksta & 0x3F0) >> 4;
2559        } else {
2560                *speed = STEC_LINK_UNKNOWN;
2561                *width = 0xFF;
2562                return;
2563        }
2564
2565        switch (pci_bus_speed) {
2566        case 1:
2567                *speed = STEC_LINK_2_5GTS;
2568                break;
2569        case 2:
2570                *speed = STEC_LINK_5GTS;
2571                break;
2572        case 3:
2573                *speed = STEC_LINK_8GTS;
2574                break;
2575        default:
2576                *speed = STEC_LINK_UNKNOWN;
2577                break;
2578        }
2579
2580        if (pci_lanes <= 0x20)
2581                *width = pci_lanes;
2582        else
2583                *width = 0xFF;
2584}
2585
2586static void skd_do_inq_page_da(struct skd_device *skdev,
2587                               volatile struct fit_completion_entry_v1 *skcomp,
2588                               volatile struct fit_comp_error_info *skerr,
2589                               uint8_t *cdb, uint8_t *buf)
2590{
2591        struct pci_dev *pdev = skdev->pdev;
2592        unsigned max_bytes;
2593        struct driver_inquiry_data inq;
2594        u16 val;
2595
2596        pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
2597                 skdev->name, __func__, __LINE__);
2598
2599        memset(&inq, 0, sizeof(inq));
2600
2601        inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2602
2603        skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
2604        inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
2605        inq.pcie_device_number = PCI_SLOT(pdev->devfn);
2606        inq.pcie_function_number = PCI_FUNC(pdev->devfn);
2607
2608        pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
2609        inq.pcie_vendor_id = cpu_to_be16(val);
2610
2611        pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
2612        inq.pcie_device_id = cpu_to_be16(val);
2613
2614        pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
2615        inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2616
2617        pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
2618        inq.pcie_subsystem_device_id = cpu_to_be16(val);
2619
2620        /* Driver version, fixed lenth, padded with spaces on the right */
2621        inq.driver_version_length = sizeof(inq.driver_version);
2622        memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2623        memcpy(inq.driver_version, DRV_VER_COMPL,
2624               min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2625
2626        inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2627
2628        /* Clear the error set by the device */
2629        skcomp->status = SAM_STAT_GOOD;
2630        memset((void *)skerr, 0, sizeof(*skerr));
2631
2632        /* copy response into output buffer */
2633        max_bytes = (cdb[3] << 8) | cdb[4];
2634        memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2635
2636        skcomp->num_returned_bytes =
2637                be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
2638}
2639
2640static void skd_do_driver_inq(struct skd_device *skdev,
2641                              volatile struct fit_completion_entry_v1 *skcomp,
2642                              volatile struct fit_comp_error_info *skerr,
2643                              uint8_t *cdb, uint8_t *buf)
2644{
2645        if (!buf)
2646                return;
2647        else if (cdb[0] != INQUIRY)
2648                return;         /* Not an INQUIRY */
2649        else if ((cdb[1] & 1) == 0)
2650                return;         /* EVPD not set */
2651        else if (cdb[2] == 0)
2652                /* Need to add driver's page to supported pages list */
2653                skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2654        else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2655                /* Caller requested driver's page */
2656                skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2657}
2658
2659static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2660{
2661        if (!sg)
2662                return NULL;
2663        if (!sg_page(sg))
2664                return NULL;
2665        return sg_virt(sg);
2666}
2667
2668static void skd_process_scsi_inq(struct skd_device *skdev,
2669                                 volatile struct fit_completion_entry_v1
2670                                 *skcomp,
2671                                 volatile struct fit_comp_error_info *skerr,
2672                                 struct skd_special_context *skspcl)
2673{
2674        uint8_t *buf;
2675        struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2676        struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2677
2678        dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2679                            skspcl->req.sg_data_dir);
2680        buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2681
2682        if (buf)
2683                skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2684}
2685
2686
2687static int skd_isr_completion_posted(struct skd_device *skdev,
2688                                        int limit, int *enqueued)
2689{
2690        volatile struct fit_completion_entry_v1 *skcmp = NULL;
2691        volatile struct fit_comp_error_info *skerr;
2692        u16 req_id;
2693        u32 req_slot;
2694        struct skd_request_context *skreq;
2695        u16 cmp_cntxt = 0;
2696        u8 cmp_status = 0;
2697        u8 cmp_cycle = 0;
2698        u32 cmp_bytes = 0;
2699        int rc = 0;
2700        int processed = 0;
2701
2702        for (;; ) {
2703                SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2704
2705                skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2706                cmp_cycle = skcmp->cycle;
2707                cmp_cntxt = skcmp->tag;
2708                cmp_status = skcmp->status;
2709                cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2710
2711                skerr = &skdev->skerr_table[skdev->skcomp_ix];
2712
2713                pr_debug("%s:%s:%d "
2714                         "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
2715                         "busy=%d rbytes=0x%x proto=%d\n",
2716                         skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
2717                         skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
2718                         skdev->in_flight, cmp_bytes, skdev->proto_ver);
2719
2720                if (cmp_cycle != skdev->skcomp_cycle) {
2721                        pr_debug("%s:%s:%d end of completions\n",
2722                                 skdev->name, __func__, __LINE__);
2723                        break;
2724                }
2725                /*
2726                 * Update the completion queue head index and possibly
2727                 * the completion cycle count. 8-bit wrap-around.
2728                 */
2729                skdev->skcomp_ix++;
2730                if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2731                        skdev->skcomp_ix = 0;
2732                        skdev->skcomp_cycle++;
2733                }
2734
2735                /*
2736                 * The command context is a unique 32-bit ID. The low order
2737                 * bits help locate the request. The request is usually a
2738                 * r/w request (see skd_start() above) or a special request.
2739                 */
2740                req_id = cmp_cntxt;
2741                req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2742
2743                /* Is this other than a r/w request? */
2744                if (req_slot >= skdev->num_req_context) {
2745                        /*
2746                         * This is not a completion for a r/w request.
2747                         */
2748                        skd_complete_other(skdev, skcmp, skerr);
2749                        continue;
2750                }
2751
2752                skreq = &skdev->skreq_table[req_slot];
2753
2754                /*
2755                 * Make sure the request ID for the slot matches.
2756                 */
2757                if (skreq->id != req_id) {
2758                        pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
2759                                 skdev->name, __func__, __LINE__,
2760                                 req_id, skreq->id);
2761                        {
2762                                u16 new_id = cmp_cntxt;
2763                                pr_err("(%s): Completion mismatch "
2764                                       "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2765                                       skd_name(skdev), req_id,
2766                                       skreq->id, new_id);
2767
2768                                continue;
2769                        }
2770                }
2771
2772                SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2773
2774                if (skreq->state == SKD_REQ_STATE_ABORTED) {
2775                        pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
2776                                 skdev->name, __func__, __LINE__,
2777                                 skreq, skreq->id);
2778                        /* a previously timed out command can
2779                         * now be cleaned up */
2780                        skd_release_skreq(skdev, skreq);
2781                        continue;
2782                }
2783
2784                skreq->completion = *skcmp;
2785                if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2786                        skreq->err_info = *skerr;
2787                        skd_log_check_status(skdev, cmp_status, skerr->key,
2788                                             skerr->code, skerr->qual,
2789                                             skerr->fruc);
2790                }
2791                /* Release DMA resources for the request. */
2792                if (skreq->n_sg > 0)
2793                        skd_postop_sg_list(skdev, skreq);
2794
2795                if (!skreq->req) {
2796                        pr_debug("%s:%s:%d NULL backptr skdreq %p, "
2797                                 "req=0x%x req_id=0x%x\n",
2798                                 skdev->name, __func__, __LINE__,
2799                                 skreq, skreq->id, req_id);
2800                } else {
2801                        /*
2802                         * Capture the outcome and post it back to the
2803                         * native request.
2804                         */
2805                        if (likely(cmp_status == SAM_STAT_GOOD))
2806                                skd_end_request(skdev, skreq, 0);
2807                        else
2808                                skd_resolve_req_exception(skdev, skreq);
2809                }
2810
2811                /*
2812                 * Release the skreq, its FIT msg (if one), timeout slot,
2813                 * and queue depth.
2814                 */
2815                skd_release_skreq(skdev, skreq);
2816
2817                /* skd_isr_comp_limit equal zero means no limit */
2818                if (limit) {
2819                        if (++processed >= limit) {
2820                                rc = 1;
2821                                break;
2822                        }
2823                }
2824        }
2825
2826        if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2827                && (skdev->in_flight) == 0) {
2828                skdev->state = SKD_DRVR_STATE_PAUSED;
2829                wake_up_interruptible(&skdev->waitq);
2830        }
2831
2832        return rc;
2833}
2834
2835static void skd_complete_other(struct skd_device *skdev,
2836                               volatile struct fit_completion_entry_v1 *skcomp,
2837                               volatile struct fit_comp_error_info *skerr)
2838{
2839        u32 req_id = 0;
2840        u32 req_table;
2841        u32 req_slot;
2842        struct skd_special_context *skspcl;
2843
2844        req_id = skcomp->tag;
2845        req_table = req_id & SKD_ID_TABLE_MASK;
2846        req_slot = req_id & SKD_ID_SLOT_MASK;
2847
2848        pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
2849                 skdev->name, __func__, __LINE__,
2850                 req_table, req_id, req_slot);
2851
2852        /*
2853         * Based on the request id, determine how to dispatch this completion.
2854         * This swich/case is finding the good cases and forwarding the
2855         * completion entry. Errors are reported below the switch.
2856         */
2857        switch (req_table) {
2858        case SKD_ID_RW_REQUEST:
2859                /*
2860                 * The caller, skd_completion_posted_isr() above,
2861                 * handles r/w requests. The only way we get here
2862                 * is if the req_slot is out of bounds.
2863                 */
2864                break;
2865
2866        case SKD_ID_SPECIAL_REQUEST:
2867                /*
2868                 * Make sure the req_slot is in bounds and that the id
2869                 * matches.
2870                 */
2871                if (req_slot < skdev->n_special) {
2872                        skspcl = &skdev->skspcl_table[req_slot];
2873                        if (skspcl->req.id == req_id &&
2874                            skspcl->req.state == SKD_REQ_STATE_BUSY) {
2875                                skd_complete_special(skdev,
2876                                                     skcomp, skerr, skspcl);
2877                                return;
2878                        }
2879                }
2880                break;
2881
2882        case SKD_ID_INTERNAL:
2883                if (req_slot == 0) {
2884                        skspcl = &skdev->internal_skspcl;
2885                        if (skspcl->req.id == req_id &&
2886                            skspcl->req.state == SKD_REQ_STATE_BUSY) {
2887                                skd_complete_internal(skdev,
2888                                                      skcomp, skerr, skspcl);
2889                                return;
2890                        }
2891                }
2892                break;
2893
2894        case SKD_ID_FIT_MSG:
2895                /*
2896                 * These id's should never appear in a completion record.
2897                 */
2898                break;
2899
2900        default:
2901                /*
2902                 * These id's should never appear anywhere;
2903                 */
2904                break;
2905        }
2906
2907        /*
2908         * If we get here it is a bad or stale id.
2909         */
2910}
2911
2912static void skd_complete_special(struct skd_device *skdev,
2913                                 volatile struct fit_completion_entry_v1
2914                                 *skcomp,
2915                                 volatile struct fit_comp_error_info *skerr,
2916                                 struct skd_special_context *skspcl)
2917{
2918        pr_debug("%s:%s:%d  completing special request %p\n",
2919                 skdev->name, __func__, __LINE__, skspcl);
2920        if (skspcl->orphaned) {
2921                /* Discard orphaned request */
2922                /* ?: Can this release directly or does it need
2923                 * to use a worker? */
2924                pr_debug("%s:%s:%d release orphaned %p\n",
2925                         skdev->name, __func__, __LINE__, skspcl);
2926                skd_release_special(skdev, skspcl);
2927                return;
2928        }
2929
2930        skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2931
2932        skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2933        skspcl->req.completion = *skcomp;
2934        skspcl->req.err_info = *skerr;
2935
2936        skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2937                             skerr->code, skerr->qual, skerr->fruc);
2938
2939        wake_up_interruptible(&skdev->waitq);
2940}
2941
2942/* assume spinlock is already held */
2943static void skd_release_special(struct skd_device *skdev,
2944                                struct skd_special_context *skspcl)
2945{
2946        int i, was_depleted;
2947
2948        for (i = 0; i < skspcl->req.n_sg; i++) {
2949                struct page *page = sg_page(&skspcl->req.sg[i]);
2950                __free_page(page);
2951        }
2952
2953        was_depleted = (skdev->skspcl_free_list == NULL);
2954
2955        skspcl->req.state = SKD_REQ_STATE_IDLE;
2956        skspcl->req.id += SKD_ID_INCR;
2957        skspcl->req.next =
2958                (struct skd_request_context *)skdev->skspcl_free_list;
2959        skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2960
2961        if (was_depleted) {
2962                pr_debug("%s:%s:%d skspcl was depleted\n",
2963                         skdev->name, __func__, __LINE__);
2964                /* Free list was depleted. Their might be waiters. */
2965                wake_up_interruptible(&skdev->waitq);
2966        }
2967}
2968
2969static void skd_reset_skcomp(struct skd_device *skdev)
2970{
2971        u32 nbytes;
2972        struct fit_completion_entry_v1 *skcomp;
2973
2974        nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
2975        nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2976
2977        memset(skdev->skcomp_table, 0, nbytes);
2978
2979        skdev->skcomp_ix = 0;
2980        skdev->skcomp_cycle = 1;
2981}
2982
2983/*
2984 *****************************************************************************
2985 * INTERRUPTS
2986 *****************************************************************************
2987 */
2988static void skd_completion_worker(struct work_struct *work)
2989{
2990        struct skd_device *skdev =
2991                container_of(work, struct skd_device, completion_worker);
2992        unsigned long flags;
2993        int flush_enqueued = 0;
2994
2995        spin_lock_irqsave(&skdev->lock, flags);
2996
2997        /*
2998         * pass in limit=0, which means no limit..
2999         * process everything in compq
3000         */
3001        skd_isr_completion_posted(skdev, 0, &flush_enqueued);
3002        skd_request_fn(skdev->queue);
3003
3004        spin_unlock_irqrestore(&skdev->lock, flags);
3005}
3006
3007static void skd_isr_msg_from_dev(struct skd_device *skdev);
3008
3009irqreturn_t
3010static skd_isr(int irq, void *ptr)
3011{
3012        struct skd_device *skdev;
3013        u32 intstat;
3014        u32 ack;
3015        int rc = 0;
3016        int deferred = 0;
3017        int flush_enqueued = 0;
3018
3019        skdev = (struct skd_device *)ptr;
3020        spin_lock(&skdev->lock);
3021
3022        for (;; ) {
3023                intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3024
3025                ack = FIT_INT_DEF_MASK;
3026                ack &= intstat;
3027
3028                pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
3029                         skdev->name, __func__, __LINE__, intstat, ack);
3030
3031                /* As long as there is an int pending on device, keep
3032                 * running loop.  When none, get out, but if we've never
3033                 * done any processing, call completion handler?
3034                 */
3035                if (ack == 0) {
3036                        /* No interrupts on device, but run the completion
3037                         * processor anyway?
3038                         */
3039                        if (rc == 0)
3040                                if (likely (skdev->state
3041                                        == SKD_DRVR_STATE_ONLINE))
3042                                        deferred = 1;
3043                        break;
3044                }
3045
3046                rc = IRQ_HANDLED;
3047
3048                SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
3049
3050                if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
3051                           (skdev->state != SKD_DRVR_STATE_STOPPING))) {
3052                        if (intstat & FIT_ISH_COMPLETION_POSTED) {
3053                                /*
3054                                 * If we have already deferred completion
3055                                 * processing, don't bother running it again
3056                                 */
3057                                if (deferred == 0)
3058                                        deferred =
3059                                                skd_isr_completion_posted(skdev,
3060                                                skd_isr_comp_limit, &flush_enqueued);
3061                        }
3062
3063                        if (intstat & FIT_ISH_FW_STATE_CHANGE) {
3064                                skd_isr_fwstate(skdev);
3065                                if (skdev->state == SKD_DRVR_STATE_FAULT ||
3066                                    skdev->state ==
3067                                    SKD_DRVR_STATE_DISAPPEARED) {
3068                                        spin_unlock(&skdev->lock);
3069                                        return rc;
3070                                }
3071                        }
3072
3073                        if (intstat & FIT_ISH_MSG_FROM_DEV)
3074                                skd_isr_msg_from_dev(skdev);
3075                }
3076        }
3077
3078        if (unlikely(flush_enqueued))
3079                skd_request_fn(skdev->queue);
3080
3081        if (deferred)
3082                schedule_work(&skdev->completion_worker);
3083        else if (!flush_enqueued)
3084                skd_request_fn(skdev->queue);
3085
3086        spin_unlock(&skdev->lock);
3087
3088        return rc;
3089}
3090
3091static void skd_drive_fault(struct skd_device *skdev)
3092{
3093        skdev->state = SKD_DRVR_STATE_FAULT;
3094        pr_err("(%s): Drive FAULT\n", skd_name(skdev));
3095}
3096
3097static void skd_drive_disappeared(struct skd_device *skdev)
3098{
3099        skdev->state = SKD_DRVR_STATE_DISAPPEARED;
3100        pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
3101}
3102
3103static void skd_isr_fwstate(struct skd_device *skdev)
3104{
3105        u32 sense;
3106        u32 state;
3107        u32 mtd;
3108        int prev_driver_state = skdev->state;
3109
3110        sense = SKD_READL(skdev, FIT_STATUS);
3111        state = sense & FIT_SR_DRIVE_STATE_MASK;
3112
3113        pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
3114               skd_name(skdev),
3115               skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3116               skd_drive_state_to_str(state), state);
3117
3118        skdev->drive_state = state;
3119
3120        switch (skdev->drive_state) {
3121        case FIT_SR_DRIVE_INIT:
3122                if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
3123                        skd_disable_interrupts(skdev);
3124                        break;
3125                }
3126                if (skdev->state == SKD_DRVR_STATE_RESTARTING)
3127                        skd_recover_requests(skdev, 0);
3128                if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
3129                        skdev->timer_countdown = SKD_STARTING_TIMO;
3130                        skdev->state = SKD_DRVR_STATE_STARTING;
3131                        skd_soft_reset(skdev);
3132                        break;
3133                }
3134                mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
3135                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3136                skdev->last_mtd = mtd;
3137                break;
3138
3139        case FIT_SR_DRIVE_ONLINE:
3140                skdev->cur_max_queue_depth = skd_max_queue_depth;
3141                if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
3142                        skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3143
3144                skdev->queue_low_water_mark =
3145                        skdev->cur_max_queue_depth * 2 / 3 + 1;
3146                if (skdev->queue_low_water_mark < 1)
3147                        skdev->queue_low_water_mark = 1;
3148                pr_info(
3149                       "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
3150                       skd_name(skdev),
3151                       skdev->cur_max_queue_depth,
3152                       skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3153
3154                skd_refresh_device_data(skdev);
3155                break;
3156
3157        case FIT_SR_DRIVE_BUSY:
3158                skdev->state = SKD_DRVR_STATE_BUSY;
3159                skdev->timer_countdown = SKD_BUSY_TIMO;
3160                skd_quiesce_dev(skdev);
3161                break;
3162        case FIT_SR_DRIVE_BUSY_SANITIZE:
3163                /* set timer for 3 seconds, we'll abort any unfinished
3164                 * commands after that expires
3165                 */
3166                skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3167                skdev->timer_countdown = SKD_TIMER_SECONDS(3);
3168                blk_start_queue(skdev->queue);
3169                break;
3170        case FIT_SR_DRIVE_BUSY_ERASE:
3171                skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3172                skdev->timer_countdown = SKD_BUSY_TIMO;
3173                break;
3174        case FIT_SR_DRIVE_OFFLINE:
3175                skdev->state = SKD_DRVR_STATE_IDLE;
3176                break;
3177        case FIT_SR_DRIVE_SOFT_RESET:
3178                switch (skdev->state) {
3179                case SKD_DRVR_STATE_STARTING:
3180                case SKD_DRVR_STATE_RESTARTING:
3181                        /* Expected by a caller of skd_soft_reset() */
3182                        break;
3183                default:
3184                        skdev->state = SKD_DRVR_STATE_RESTARTING;
3185                        break;
3186                }
3187                break;
3188        case FIT_SR_DRIVE_FW_BOOTING:
3189                pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
3190                         skdev->name, __func__, __LINE__, skdev->name);
3191                skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3192                skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3193                break;
3194
3195        case FIT_SR_DRIVE_DEGRADED:
3196        case FIT_SR_PCIE_LINK_DOWN:
3197        case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3198                break;
3199
3200        case FIT_SR_DRIVE_FAULT:
3201                skd_drive_fault(skdev);
3202                skd_recover_requests(skdev, 0);
3203                blk_start_queue(skdev->queue);
3204                break;
3205
3206        /* PCIe bus returned all Fs? */
3207        case 0xFF:
3208                pr_info("(%s): state=0x%x sense=0x%x\n",
3209                       skd_name(skdev), state, sense);
3210                skd_drive_disappeared(skdev);
3211                skd_recover_requests(skdev, 0);
3212                blk_start_queue(skdev->queue);
3213                break;
3214        default:
3215                /*
3216                 * Uknown FW State. Wait for a state we recognize.
3217                 */
3218                break;
3219        }
3220        pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3221               skd_name(skdev),
3222               skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3223               skd_skdev_state_to_str(skdev->state), skdev->state);
3224}
3225
3226static void skd_recover_requests(struct skd_device *skdev, int requeue)
3227{
3228        int i;
3229
3230        for (i = 0; i < skdev->num_req_context; i++) {
3231                struct skd_request_context *skreq = &skdev->skreq_table[i];
3232
3233                if (skreq->state == SKD_REQ_STATE_BUSY) {
3234                        skd_log_skreq(skdev, skreq, "recover");
3235
3236                        SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3237                        SKD_ASSERT(skreq->req != NULL);
3238
3239                        /* Release DMA resources for the request. */
3240                        if (skreq->n_sg > 0)
3241                                skd_postop_sg_list(skdev, skreq);
3242
3243                        if (requeue &&
3244                            (unsigned long) ++skreq->req->special <
3245                            SKD_MAX_RETRIES)
3246                                blk_requeue_request(skdev->queue, skreq->req);
3247                        else
3248                                skd_end_request(skdev, skreq, -EIO);
3249
3250                        skreq->req = NULL;
3251
3252                        skreq->state = SKD_REQ_STATE_IDLE;
3253                        skreq->id += SKD_ID_INCR;
3254                }
3255                if (i > 0)
3256                        skreq[-1].next = skreq;
3257                skreq->next = NULL;
3258        }
3259        skdev->skreq_free_list = skdev->skreq_table;
3260
3261        for (i = 0; i < skdev->num_fitmsg_context; i++) {
3262                struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3263
3264                if (skmsg->state == SKD_MSG_STATE_BUSY) {
3265                        skd_log_skmsg(skdev, skmsg, "salvaged");
3266                        SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3267                        skmsg->state = SKD_MSG_STATE_IDLE;
3268                        skmsg->id += SKD_ID_INCR;
3269                }
3270                if (i > 0)
3271                        skmsg[-1].next = skmsg;
3272                skmsg->next = NULL;
3273        }
3274        skdev->skmsg_free_list = skdev->skmsg_table;
3275
3276        for (i = 0; i < skdev->n_special; i++) {
3277                struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3278
3279                /* If orphaned, reclaim it because it has already been reported
3280                 * to the process as an error (it was just waiting for
3281                 * a completion that didn't come, and now it will never come)
3282                 * If busy, change to a state that will cause it to error
3283                 * out in the wait routine and let it do the normal
3284                 * reporting and reclaiming
3285                 */
3286                if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3287                        if (skspcl->orphaned) {
3288                                pr_debug("%s:%s:%d orphaned %p\n",
3289                                         skdev->name, __func__, __LINE__,
3290                                         skspcl);
3291                                skd_release_special(skdev, skspcl);
3292                        } else {
3293                                pr_debug("%s:%s:%d not orphaned %p\n",
3294                                         skdev->name, __func__, __LINE__,
3295                                         skspcl);
3296                                skspcl->req.state = SKD_REQ_STATE_ABORTED;
3297                        }
3298                }
3299        }
3300        skdev->skspcl_free_list = skdev->skspcl_table;
3301
3302        for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3303                skdev->timeout_slot[i] = 0;
3304
3305        skdev->in_flight = 0;
3306}
3307
3308static void skd_isr_msg_from_dev(struct skd_device *skdev)
3309{
3310        u32 mfd;
3311        u32 mtd;
3312        u32 data;
3313
3314        mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3315
3316        pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
3317                 skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
3318
3319        /* ignore any mtd that is an ack for something we didn't send */
3320        if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3321                return;
3322
3323        switch (FIT_MXD_TYPE(mfd)) {
3324        case FIT_MTD_FITFW_INIT:
3325                skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3326
3327                if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3328                        pr_err("(%s): protocol mismatch\n",
3329                               skdev->name);
3330                        pr_err("(%s):   got=%d support=%d\n",
3331                               skdev->name, skdev->proto_ver,
3332                               FIT_PROTOCOL_VERSION_1);
3333                        pr_err("(%s):   please upgrade driver\n",
3334                               skdev->name);
3335                        skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3336                        skd_soft_reset(skdev);
3337                        break;
3338                }
3339                mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3340                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3341                skdev->last_mtd = mtd;
3342                break;
3343
3344        case FIT_MTD_GET_CMDQ_DEPTH:
3345                skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3346                mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3347                                   SKD_N_COMPLETION_ENTRY);
3348                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3349                skdev->last_mtd = mtd;
3350                break;
3351
3352        case FIT_MTD_SET_COMPQ_DEPTH:
3353                SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3354                mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3355                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3356                skdev->last_mtd = mtd;
3357                break;
3358
3359        case FIT_MTD_SET_COMPQ_ADDR:
3360                skd_reset_skcomp(skdev);
3361                mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3362                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3363                skdev->last_mtd = mtd;
3364                break;
3365
3366        case FIT_MTD_CMD_LOG_HOST_ID:
3367                skdev->connect_time_stamp = get_seconds();
3368                data = skdev->connect_time_stamp & 0xFFFF;
3369                mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3370                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3371                skdev->last_mtd = mtd;
3372                break;
3373
3374        case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3375                skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3376                data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3377                mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3378                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3379                skdev->last_mtd = mtd;
3380                break;
3381
3382        case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3383                skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3384                mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3385                SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3386                skdev->last_mtd = mtd;
3387
3388                pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
3389                       skd_name(skdev),
3390                       skdev->connect_time_stamp, skdev->drive_jiffies);
3391                break;
3392
3393        case FIT_MTD_ARM_QUEUE:
3394                skdev->last_mtd = 0;
3395                /*
3396                 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3397                 */
3398                break;
3399
3400        default:
3401                break;
3402        }
3403}
3404
3405static void skd_disable_interrupts(struct skd_device *skdev)
3406{
3407        u32 sense;
3408
3409        sense = SKD_READL(skdev, FIT_CONTROL);
3410        sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3411        SKD_WRITEL(skdev, sense, FIT_CONTROL);
3412        pr_debug("%s:%s:%d sense 0x%x\n",
3413                 skdev->name, __func__, __LINE__, sense);
3414
3415        /* Note that the 1s is written. A 1-bit means
3416         * disable, a 0 means enable.
3417         */
3418        SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3419}
3420
3421static void skd_enable_interrupts(struct skd_device *skdev)
3422{
3423        u32 val;
3424
3425        /* unmask interrupts first */
3426        val = FIT_ISH_FW_STATE_CHANGE +
3427              FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3428
3429        /* Note that the compliment of mask is written. A 1-bit means
3430         * disable, a 0 means enable. */
3431        SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
3432        pr_debug("%s:%s:%d interrupt mask=0x%x\n",
3433                 skdev->name, __func__, __LINE__, ~val);
3434
3435        val = SKD_READL(skdev, FIT_CONTROL);
3436        val |= FIT_CR_ENABLE_INTERRUPTS;
3437        pr_debug("%s:%s:%d control=0x%x\n",
3438                 skdev->name, __func__, __LINE__, val);
3439        SKD_WRITEL(skdev, val, FIT_CONTROL);
3440}
3441
3442/*
3443 *****************************************************************************
3444 * START, STOP, RESTART, QUIESCE, UNQUIESCE
3445 *****************************************************************************
3446 */
3447
3448static void skd_soft_reset(struct skd_device *skdev)
3449{
3450        u32 val;
3451
3452        val = SKD_READL(skdev, FIT_CONTROL);
3453        val |= (FIT_CR_SOFT_RESET);
3454        pr_debug("%s:%s:%d control=0x%x\n",
3455                 skdev->name, __func__, __LINE__, val);
3456        SKD_WRITEL(skdev, val, FIT_CONTROL);
3457}
3458
3459static void skd_start_device(struct skd_device *skdev)
3460{
3461        unsigned long flags;
3462        u32 sense;
3463        u32 state;
3464
3465        spin_lock_irqsave(&skdev->lock, flags);
3466
3467        /* ack all ghost interrupts */
3468        SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3469
3470        sense = SKD_READL(skdev, FIT_STATUS);
3471
3472        pr_debug("%s:%s:%d initial status=0x%x\n",
3473                 skdev->name, __func__, __LINE__, sense);
3474
3475        state = sense & FIT_SR_DRIVE_STATE_MASK;
3476        skdev->drive_state = state;
3477        skdev->last_mtd = 0;
3478
3479        skdev->state = SKD_DRVR_STATE_STARTING;
3480        skdev->timer_countdown = SKD_STARTING_TIMO;
3481
3482        skd_enable_interrupts(skdev);
3483
3484        switch (skdev->drive_state) {
3485        case FIT_SR_DRIVE_OFFLINE:
3486                pr_err("(%s): Drive offline...\n", skd_name(skdev));
3487                break;
3488
3489        case FIT_SR_DRIVE_FW_BOOTING:
3490                pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
3491                         skdev->name, __func__, __LINE__, skdev->name);
3492                skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3493                skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3494                break;
3495
3496        case FIT_SR_DRIVE_BUSY_SANITIZE:
3497                pr_info("(%s): Start: BUSY_SANITIZE\n",
3498                       skd_name(skdev));
3499                skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3500                skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3501                break;
3502
3503        case FIT_SR_DRIVE_BUSY_ERASE:
3504                pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
3505                skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3506                skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3507                break;
3508
3509        case FIT_SR_DRIVE_INIT:
3510        case FIT_SR_DRIVE_ONLINE:
3511                skd_soft_reset(skdev);
3512                break;
3513
3514        case FIT_SR_DRIVE_BUSY:
3515                pr_err("(%s): Drive Busy...\n", skd_name(skdev));
3516                skdev->state = SKD_DRVR_STATE_BUSY;
3517                skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3518                break;
3519
3520        case FIT_SR_DRIVE_SOFT_RESET:
3521                pr_err("(%s) drive soft reset in prog\n",
3522                       skd_name(skdev));
3523                break;
3524
3525        case FIT_SR_DRIVE_FAULT:
3526                /* Fault state is bad...soft reset won't do it...
3527                 * Hard reset, maybe, but does it work on device?
3528                 * For now, just fault so the system doesn't hang.
3529                 */
3530                skd_drive_fault(skdev);
3531                /*start the queue so we can respond with error to requests */
3532                pr_debug("%s:%s:%d starting %s queue\n",
3533                         skdev->name, __func__, __LINE__, skdev->name);
3534                blk_start_queue(skdev->queue);
3535                skdev->gendisk_on = -1;
3536                wake_up_interruptible(&skdev->waitq);
3537                break;
3538
3539        case 0xFF:
3540                /* Most likely the device isn't there or isn't responding
3541                 * to the BAR1 addresses. */
3542                skd_drive_disappeared(skdev);
3543                /*start the queue so we can respond with error to requests */
3544                pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
3545                         skdev->name, __func__, __LINE__, skdev->name);
3546                blk_start_queue(skdev->queue);
3547                skdev->gendisk_on = -1;
3548                wake_up_interruptible(&skdev->waitq);
3549                break;
3550
3551        default:
3552                pr_err("(%s) Start: unknown state %x\n",
3553                       skd_name(skdev), skdev->drive_state);
3554                break;
3555        }
3556
3557        state = SKD_READL(skdev, FIT_CONTROL);
3558        pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
3559                 skdev->name, __func__, __LINE__, state);
3560
3561        state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3562        pr_debug("%s:%s:%d Intr Status=0x%x\n",
3563                 skdev->name, __func__, __LINE__, state);
3564
3565        state = SKD_READL(skdev, FIT_INT_MASK_HOST);
3566        pr_debug("%s:%s:%d Intr Mask=0x%x\n",
3567                 skdev->name, __func__, __LINE__, state);
3568
3569        state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3570        pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
3571                 skdev->name, __func__, __LINE__, state);
3572
3573        state = SKD_READL(skdev, FIT_HW_VERSION);
3574        pr_debug("%s:%s:%d HW version=0x%x\n",
3575                 skdev->name, __func__, __LINE__, state);
3576
3577        spin_unlock_irqrestore(&skdev->lock, flags);
3578}
3579
3580static void skd_stop_device(struct skd_device *skdev)
3581{
3582        unsigned long flags;
3583        struct skd_special_context *skspcl = &skdev->internal_skspcl;
3584        u32 dev_state;
3585        int i;
3586
3587        spin_lock_irqsave(&skdev->lock, flags);
3588
3589        if (skdev->state != SKD_DRVR_STATE_ONLINE) {
3590                pr_err("(%s): skd_stop_device not online no sync\n",
3591                       skd_name(skdev));
3592                goto stop_out;
3593        }
3594
3595        if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
3596                pr_err("(%s): skd_stop_device no special\n",
3597                       skd_name(skdev));
3598                goto stop_out;
3599        }
3600
3601        skdev->state = SKD_DRVR_STATE_SYNCING;
3602        skdev->sync_done = 0;
3603
3604        skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3605
3606        spin_unlock_irqrestore(&skdev->lock, flags);
3607
3608        wait_event_interruptible_timeout(skdev->waitq,
3609                                         (skdev->sync_done), (10 * HZ));
3610
3611        spin_lock_irqsave(&skdev->lock, flags);
3612
3613        switch (skdev->sync_done) {
3614        case 0:
3615                pr_err("(%s): skd_stop_device no sync\n",
3616                       skd_name(skdev));
3617                break;
3618        case 1:
3619                pr_err("(%s): skd_stop_device sync done\n",
3620                       skd_name(skdev));
3621                break;
3622        default:
3623                pr_err("(%s): skd_stop_device sync error\n",
3624                       skd_name(skdev));
3625        }
3626
3627stop_out:
3628        skdev->state = SKD_DRVR_STATE_STOPPING;
3629        spin_unlock_irqrestore(&skdev->lock, flags);
3630
3631        skd_kill_timer(skdev);
3632
3633        spin_lock_irqsave(&skdev->lock, flags);
3634        skd_disable_interrupts(skdev);
3635
3636        /* ensure all ints on device are cleared */
3637        /* soft reset the device to unload with a clean slate */
3638        SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3639        SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3640
3641        spin_unlock_irqrestore(&skdev->lock, flags);
3642
3643        /* poll every 100ms, 1 second timeout */
3644        for (i = 0; i < 10; i++) {
3645                dev_state =
3646                        SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3647                if (dev_state == FIT_SR_DRIVE_INIT)
3648                        break;
3649                set_current_state(TASK_INTERRUPTIBLE);
3650                schedule_timeout(msecs_to_jiffies(100));
3651        }
3652
3653        if (dev_state != FIT_SR_DRIVE_INIT)
3654                pr_err("(%s): skd_stop_device state error 0x%02x\n",
3655                       skd_name(skdev), dev_state);
3656}
3657
3658/* assume spinlock is held */
3659static void skd_restart_device(struct skd_device *skdev)
3660{
3661        u32 state;
3662
3663        /* ack all ghost interrupts */
3664        SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3665
3666        state = SKD_READL(skdev, FIT_STATUS);
3667
3668        pr_debug("%s:%s:%d drive status=0x%x\n",
3669                 skdev->name, __func__, __LINE__, state);
3670
3671        state &= FIT_SR_DRIVE_STATE_MASK;
3672        skdev->drive_state = state;
3673        skdev->last_mtd = 0;
3674
3675        skdev->state = SKD_DRVR_STATE_RESTARTING;
3676        skdev->timer_countdown = SKD_RESTARTING_TIMO;
3677
3678        skd_soft_reset(skdev);
3679}
3680
3681/* assume spinlock is held */
3682static int skd_quiesce_dev(struct skd_device *skdev)
3683{
3684        int rc = 0;
3685
3686        switch (skdev->state) {
3687        case SKD_DRVR_STATE_BUSY:
3688        case SKD_DRVR_STATE_BUSY_IMMINENT:
3689                pr_debug("%s:%s:%d stopping %s queue\n",
3690                         skdev->name, __func__, __LINE__, skdev->name);
3691                blk_stop_queue(skdev->queue);
3692                break;
3693        case SKD_DRVR_STATE_ONLINE:
3694        case SKD_DRVR_STATE_STOPPING:
3695        case SKD_DRVR_STATE_SYNCING:
3696        case SKD_DRVR_STATE_PAUSING:
3697        case SKD_DRVR_STATE_PAUSED:
3698        case SKD_DRVR_STATE_STARTING:
3699        case SKD_DRVR_STATE_RESTARTING:
3700        case SKD_DRVR_STATE_RESUMING:
3701        default:
3702                rc = -EINVAL;
3703                pr_debug("%s:%s:%d state [%d] not implemented\n",
3704                         skdev->name, __func__, __LINE__, skdev->state);
3705        }
3706        return rc;
3707}
3708
3709/* assume spinlock is held */
3710static int skd_unquiesce_dev(struct skd_device *skdev)
3711{
3712        int prev_driver_state = skdev->state;
3713
3714        skd_log_skdev(skdev, "unquiesce");
3715        if (skdev->state == SKD_DRVR_STATE_ONLINE) {
3716                pr_debug("%s:%s:%d **** device already ONLINE\n",
3717                         skdev->name, __func__, __LINE__);
3718                return 0;
3719        }
3720        if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3721                /*
3722                 * If there has been an state change to other than
3723                 * ONLINE, we will rely on controller state change
3724                 * to come back online and restart the queue.
3725                 * The BUSY state means that driver is ready to
3726                 * continue normal processing but waiting for controller
3727                 * to become available.
3728                 */
3729                skdev->state = SKD_DRVR_STATE_BUSY;
3730                pr_debug("%s:%s:%d drive BUSY state\n",
3731                         skdev->name, __func__, __LINE__);
3732                return 0;
3733        }
3734
3735        /*
3736         * Drive has just come online, driver is either in startup,
3737         * paused performing a task, or bust waiting for hardware.
3738         */
3739        switch (skdev->state) {
3740        case SKD_DRVR_STATE_PAUSED:
3741        case SKD_DRVR_STATE_BUSY:
3742        case SKD_DRVR_STATE_BUSY_IMMINENT:
3743        case SKD_DRVR_STATE_BUSY_ERASE:
3744        case SKD_DRVR_STATE_STARTING:
3745        case SKD_DRVR_STATE_RESTARTING:
3746        case SKD_DRVR_STATE_FAULT:
3747        case SKD_DRVR_STATE_IDLE:
3748        case SKD_DRVR_STATE_LOAD:
3749                skdev->state = SKD_DRVR_STATE_ONLINE;
3750                pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3751                       skd_name(skdev),
3752                       skd_skdev_state_to_str(prev_driver_state),
3753                       prev_driver_state, skd_skdev_state_to_str(skdev->state),
3754                       skdev->state);
3755                pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
3756                         skdev->name, __func__, __LINE__);
3757                pr_debug("%s:%s:%d starting %s queue\n",
3758                         skdev->name, __func__, __LINE__, skdev->name);
3759                pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
3760                blk_start_queue(skdev->queue);
3761                skdev->gendisk_on = 1;
3762                wake_up_interruptible(&skdev->waitq);
3763                break;
3764
3765        case SKD_DRVR_STATE_DISAPPEARED:
3766        default:
3767                pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
3768                         skdev->name, __func__, __LINE__,
3769                         skdev->state);
3770                return -EBUSY;
3771        }
3772        return 0;
3773}
3774
3775/*
3776 *****************************************************************************
3777 * PCIe MSI/MSI-X INTERRUPT HANDLERS
3778 *****************************************************************************
3779 */
3780
3781static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3782{
3783        struct skd_device *skdev = skd_host_data;
3784        unsigned long flags;
3785
3786        spin_lock_irqsave(&skdev->lock, flags);
3787        pr_debug("%s:%s:%d MSIX = 0x%x\n",
3788                 skdev->name, __func__, __LINE__,
3789                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3790        pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
3791               irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
3792        SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3793        spin_unlock_irqrestore(&skdev->lock, flags);
3794        return IRQ_HANDLED;
3795}
3796
3797static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3798{
3799        struct skd_device *skdev = skd_host_data;
3800        unsigned long flags;
3801
3802        spin_lock_irqsave(&skdev->lock, flags);
3803        pr_debug("%s:%s:%d MSIX = 0x%x\n",
3804                 skdev->name, __func__, __LINE__,
3805                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3806        SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3807        skd_isr_fwstate(skdev);
3808        spin_unlock_irqrestore(&skdev->lock, flags);
3809        return IRQ_HANDLED;
3810}
3811
3812static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3813{
3814        struct skd_device *skdev = skd_host_data;
3815        unsigned long flags;
3816        int flush_enqueued = 0;
3817        int deferred;
3818
3819        spin_lock_irqsave(&skdev->lock, flags);
3820        pr_debug("%s:%s:%d MSIX = 0x%x\n",
3821                 skdev->name, __func__, __LINE__,
3822                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3823        SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3824        deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3825                                                &flush_enqueued);
3826        if (flush_enqueued)
3827                skd_request_fn(skdev->queue);
3828
3829        if (deferred)
3830                schedule_work(&skdev->completion_worker);
3831        else if (!flush_enqueued)
3832                skd_request_fn(skdev->queue);
3833
3834        spin_unlock_irqrestore(&skdev->lock, flags);
3835
3836        return IRQ_HANDLED;
3837}
3838
3839static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3840{
3841        struct skd_device *skdev = skd_host_data;
3842        unsigned long flags;
3843
3844        spin_lock_irqsave(&skdev->lock, flags);
3845        pr_debug("%s:%s:%d MSIX = 0x%x\n",
3846                 skdev->name, __func__, __LINE__,
3847                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3848        SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3849        skd_isr_msg_from_dev(skdev);
3850        spin_unlock_irqrestore(&skdev->lock, flags);
3851        return IRQ_HANDLED;
3852}
3853
3854static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3855{
3856        struct skd_device *skdev = skd_host_data;
3857        unsigned long flags;
3858
3859        spin_lock_irqsave(&skdev->lock, flags);
3860        pr_debug("%s:%s:%d MSIX = 0x%x\n",
3861                 skdev->name, __func__, __LINE__,
3862                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3863        SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3864        spin_unlock_irqrestore(&skdev->lock, flags);
3865        return IRQ_HANDLED;
3866}
3867
3868/*
3869 *****************************************************************************
3870 * PCIe MSI/MSI-X SETUP
3871 *****************************************************************************
3872 */
3873
3874struct skd_msix_entry {
3875        int have_irq;
3876        u32 vector;
3877        u32 entry;
3878        struct skd_device *rsp;
3879        char isr_name[30];
3880};
3881
3882struct skd_init_msix_entry {
3883        const char *name;
3884        irq_handler_t handler;
3885};
3886
3887#define SKD_MAX_MSIX_COUNT              13
3888#define SKD_MIN_MSIX_COUNT              7
3889#define SKD_BASE_MSIX_IRQ               4
3890
3891static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3892        { "(DMA 0)",        skd_reserved_isr },
3893        { "(DMA 1)",        skd_reserved_isr },
3894        { "(DMA 2)",        skd_reserved_isr },
3895        { "(DMA 3)",        skd_reserved_isr },
3896        { "(State Change)", skd_statec_isr   },
3897        { "(COMPL_Q)",      skd_comp_q       },
3898        { "(MSG)",          skd_msg_isr      },
3899        { "(Reserved)",     skd_reserved_isr },
3900        { "(Reserved)",     skd_reserved_isr },
3901        { "(Queue Full 0)", skd_qfull_isr    },
3902        { "(Queue Full 1)", skd_qfull_isr    },
3903        { "(Queue Full 2)", skd_qfull_isr    },
3904        { "(Queue Full 3)", skd_qfull_isr    },
3905};
3906
3907static void skd_release_msix(struct skd_device *skdev)
3908{
3909        struct skd_msix_entry *qentry;
3910        int i;
3911
3912        if (skdev->msix_entries) {
3913                for (i = 0; i < skdev->msix_count; i++) {
3914                        qentry = &skdev->msix_entries[i];
3915                        skdev = qentry->rsp;
3916
3917                        if (qentry->have_irq)
3918                                devm_free_irq(&skdev->pdev->dev,
3919                                              qentry->vector, qentry->rsp);
3920                }
3921
3922                kfree(skdev->msix_entries);
3923        }
3924
3925        if (skdev->msix_count)
3926                pci_disable_msix(skdev->pdev);
3927
3928        skdev->msix_count = 0;
3929        skdev->msix_entries = NULL;
3930}
3931
3932static int skd_acquire_msix(struct skd_device *skdev)
3933{
3934        int i, rc;
3935        struct pci_dev *pdev = skdev->pdev;
3936        struct msix_entry *entries;
3937        struct skd_msix_entry *qentry;
3938
3939        entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
3940                          GFP_KERNEL);
3941        if (!entries)
3942                return -ENOMEM;
3943
3944        for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
3945                entries[i].entry = i;
3946
3947        rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT);
3948        if (rc) {
3949                pr_err("(%s): failed to enable MSI-X %d\n",
3950                       skd_name(skdev), rc);
3951                goto msix_out;
3952        }
3953
3954        skdev->msix_count = SKD_MAX_MSIX_COUNT;
3955        skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
3956                                      skdev->msix_count, GFP_KERNEL);
3957        if (!skdev->msix_entries) {
3958                rc = -ENOMEM;
3959                pr_err("(%s): msix table allocation error\n",
3960                       skd_name(skdev));
3961                goto msix_out;
3962        }
3963
3964        for (i = 0; i < skdev->msix_count; i++) {
3965                qentry = &skdev->msix_entries[i];
3966                qentry->vector = entries[i].vector;
3967                qentry->entry = entries[i].entry;
3968                qentry->rsp = NULL;
3969                qentry->have_irq = 0;
3970                pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
3971                         skdev->name, __func__, __LINE__,
3972                         pci_name(pdev), skdev->name,
3973                         i, qentry->vector, qentry->entry);
3974        }
3975
3976        /* Enable MSI-X vectors for the base queue */
3977        for (i = 0; i < skdev->msix_count; i++) {
3978                qentry = &skdev->msix_entries[i];
3979                snprintf(qentry->isr_name, sizeof(qentry->isr_name),
3980                         "%s%d-msix %s", DRV_NAME, skdev->devno,
3981                         msix_entries[i].name);
3982                rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
3983                                      msix_entries[i].handler, 0,
3984                                      qentry->isr_name, skdev);
3985                if (rc) {
3986                        pr_err("(%s): Unable to register(%d) MSI-X "
3987                               "handler %d: %s\n",
3988                               skd_name(skdev), rc, i, qentry->isr_name);
3989                        goto msix_out;
3990                } else {
3991                        qentry->have_irq = 1;
3992                        qentry->rsp = skdev;
3993                }
3994        }
3995        pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
3996                 skdev->name, __func__, __LINE__,
3997                 pci_name(pdev), skdev->name, skdev->msix_count);
3998        return 0;
3999
4000msix_out:
4001        if (entries)
4002                kfree(entries);
4003        skd_release_msix(skdev);
4004        return rc;
4005}
4006
4007static int skd_acquire_irq(struct skd_device *skdev)
4008{
4009        int rc;
4010        struct pci_dev *pdev;
4011
4012        pdev = skdev->pdev;
4013        skdev->msix_count = 0;
4014
4015RETRY_IRQ_TYPE:
4016        switch (skdev->irq_type) {
4017        case SKD_IRQ_MSIX:
4018                rc = skd_acquire_msix(skdev);
4019                if (!rc)
4020                        pr_info("(%s): MSI-X %d irqs enabled\n",
4021                               skd_name(skdev), skdev->msix_count);
4022                else {
4023                        pr_err(
4024                               "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
4025                               skd_name(skdev), rc);
4026                        skdev->irq_type = SKD_IRQ_MSI;
4027                        goto RETRY_IRQ_TYPE;
4028                }
4029                break;
4030        case SKD_IRQ_MSI:
4031                snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
4032                         DRV_NAME, skdev->devno);
4033                rc = pci_enable_msi_range(pdev, 1, 1);
4034                if (rc > 0) {
4035                        rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
4036                                              skdev->isr_name, skdev);
4037                        if (rc) {
4038                                pci_disable_msi(pdev);
4039                                pr_err(
4040                                       "(%s): failed to allocate the MSI interrupt %d\n",
4041                                       skd_name(skdev), rc);
4042                                goto RETRY_IRQ_LEGACY;
4043                        }
4044                        pr_info("(%s): MSI irq %d enabled\n",
4045                               skd_name(skdev), pdev->irq);
4046                } else {
4047RETRY_IRQ_LEGACY:
4048                        pr_err(
4049                               "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
4050                               skd_name(skdev), rc);
4051                        skdev->irq_type = SKD_IRQ_LEGACY;
4052                        goto RETRY_IRQ_TYPE;
4053                }
4054                break;
4055        case SKD_IRQ_LEGACY:
4056                snprintf(skdev->isr_name, sizeof(skdev->isr_name),
4057                         "%s%d-legacy", DRV_NAME, skdev->devno);
4058                rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
4059                                      IRQF_SHARED, skdev->isr_name, skdev);
4060                if (!rc)
4061                        pr_info("(%s): LEGACY irq %d enabled\n",
4062                               skd_name(skdev), pdev->irq);
4063                else
4064                        pr_err("(%s): request LEGACY irq error %d\n",
4065                               skd_name(skdev), rc);
4066                break;
4067        default:
4068                pr_info("(%s): irq_type %d invalid, re-set to %d\n",
4069                       skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
4070                skdev->irq_type = SKD_IRQ_LEGACY;
4071                goto RETRY_IRQ_TYPE;
4072        }
4073        return rc;
4074}
4075
4076static void skd_release_irq(struct skd_device *skdev)
4077{
4078        switch (skdev->irq_type) {
4079        case SKD_IRQ_MSIX:
4080                skd_release_msix(skdev);
4081                break;
4082        case SKD_IRQ_MSI:
4083                devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4084                pci_disable_msi(skdev->pdev);
4085                break;
4086        case SKD_IRQ_LEGACY:
4087                devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4088                break;
4089        default:
4090                pr_err("(%s): wrong irq type %d!",
4091                       skd_name(skdev), skdev->irq_type);
4092                break;
4093        }
4094}
4095
4096/*
4097 *****************************************************************************
4098 * CONSTRUCT
4099 *****************************************************************************
4100 */
4101
4102static int skd_cons_skcomp(struct skd_device *skdev)
4103{
4104        int rc = 0;
4105        struct fit_completion_entry_v1 *skcomp;
4106        u32 nbytes;
4107
4108        nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
4109        nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
4110
4111        pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
4112                 skdev->name, __func__, __LINE__,
4113                 nbytes, SKD_N_COMPLETION_ENTRY);
4114
4115        skcomp = pci_alloc_consistent(skdev->pdev, nbytes,
4116                                      &skdev->cq_dma_address);
4117
4118        if (skcomp == NULL) {
4119                rc = -ENOMEM;
4120                goto err_out;
4121        }
4122
4123        memset(skcomp, 0, nbytes);
4124
4125        skdev->skcomp_table = skcomp;
4126        skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
4127                                                           sizeof(*skcomp) *
4128                                                           SKD_N_COMPLETION_ENTRY);
4129
4130err_out:
4131        return rc;
4132}
4133
4134static int skd_cons_skmsg(struct skd_device *skdev)
4135{
4136        int rc = 0;
4137        u32 i;
4138
4139        pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
4140                 skdev->name, __func__, __LINE__,
4141                 sizeof(struct skd_fitmsg_context),
4142                 skdev->num_fitmsg_context,
4143                 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
4144
4145        skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
4146                                     *skdev->num_fitmsg_context, GFP_KERNEL);
4147        if (skdev->skmsg_table == NULL) {
4148                rc = -ENOMEM;
4149                goto err_out;
4150        }
4151
4152        for (i = 0; i < skdev->num_fitmsg_context; i++) {
4153                struct skd_fitmsg_context *skmsg;
4154
4155                skmsg = &skdev->skmsg_table[i];
4156
4157                skmsg->id = i + SKD_ID_FIT_MSG;
4158
4159                skmsg->state = SKD_MSG_STATE_IDLE;
4160                skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
4161                                                      SKD_N_FITMSG_BYTES + 64,
4162                                                      &skmsg->mb_dma_address);
4163
4164                if (skmsg->msg_buf == NULL) {
4165                        rc = -ENOMEM;
4166                        goto err_out;
4167                }
4168
4169                skmsg->offset = (u32)((u64)skmsg->msg_buf &
4170                                      (~FIT_QCMD_BASE_ADDRESS_MASK));
4171                skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
4172                skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
4173                                       FIT_QCMD_BASE_ADDRESS_MASK);
4174                skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
4175                skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
4176                memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
4177
4178                skmsg->next = &skmsg[1];
4179        }
4180
4181        /* Free list is in order starting with the 0th entry. */
4182        skdev->skmsg_table[i - 1].next = NULL;
4183        skdev->skmsg_free_list = skdev->skmsg_table;
4184
4185err_out:
4186        return rc;
4187}
4188
4189static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4190                                                  u32 n_sg,
4191                                                  dma_addr_t *ret_dma_addr)
4192{
4193        struct fit_sg_descriptor *sg_list;
4194        u32 nbytes;
4195
4196        nbytes = sizeof(*sg_list) * n_sg;
4197
4198        sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
4199
4200        if (sg_list != NULL) {
4201                uint64_t dma_address = *ret_dma_addr;
4202                u32 i;
4203
4204                memset(sg_list, 0, nbytes);
4205
4206                for (i = 0; i < n_sg - 1; i++) {
4207                        uint64_t ndp_off;
4208                        ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
4209
4210                        sg_list[i].next_desc_ptr = dma_address + ndp_off;
4211                }
4212                sg_list[i].next_desc_ptr = 0LL;
4213        }
4214
4215        return sg_list;
4216}
4217
4218static int skd_cons_skreq(struct skd_device *skdev)
4219{
4220        int rc = 0;
4221        u32 i;
4222
4223        pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
4224                 skdev->name, __func__, __LINE__,
4225                 sizeof(struct skd_request_context),
4226                 skdev->num_req_context,
4227                 sizeof(struct skd_request_context) * skdev->num_req_context);
4228
4229        skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
4230                                     * skdev->num_req_context, GFP_KERNEL);
4231        if (skdev->skreq_table == NULL) {
4232                rc = -ENOMEM;
4233                goto err_out;
4234        }
4235
4236        pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
4237                 skdev->name, __func__, __LINE__,
4238                 skdev->sgs_per_request, sizeof(struct scatterlist),
4239                 skdev->sgs_per_request * sizeof(struct scatterlist));
4240
4241        for (i = 0; i < skdev->num_req_context; i++) {
4242                struct skd_request_context *skreq;
4243
4244                skreq = &skdev->skreq_table[i];
4245
4246                skreq->id = i + SKD_ID_RW_REQUEST;
4247                skreq->state = SKD_REQ_STATE_IDLE;
4248
4249                skreq->sg = kzalloc(sizeof(struct scatterlist) *
4250                                    skdev->sgs_per_request, GFP_KERNEL);
4251                if (skreq->sg == NULL) {
4252                        rc = -ENOMEM;
4253                        goto err_out;
4254                }
4255                sg_init_table(skreq->sg, skdev->sgs_per_request);
4256
4257                skreq->sksg_list = skd_cons_sg_list(skdev,
4258                                                    skdev->sgs_per_request,
4259                                                    &skreq->sksg_dma_address);
4260
4261                if (skreq->sksg_list == NULL) {
4262                        rc = -ENOMEM;
4263                        goto err_out;
4264                }
4265
4266                skreq->next = &skreq[1];
4267        }
4268
4269        /* Free list is in order starting with the 0th entry. */
4270        skdev->skreq_table[i - 1].next = NULL;
4271        skdev->skreq_free_list = skdev->skreq_table;
4272
4273err_out:
4274        return rc;
4275}
4276
4277static int skd_cons_skspcl(struct skd_device *skdev)
4278{
4279        int rc = 0;
4280        u32 i, nbytes;
4281
4282        pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4283                 skdev->name, __func__, __LINE__,
4284                 sizeof(struct skd_special_context),
4285                 skdev->n_special,
4286                 sizeof(struct skd_special_context) * skdev->n_special);
4287
4288        skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4289                                      * skdev->n_special, GFP_KERNEL);
4290        if (skdev->skspcl_table == NULL) {
4291                rc = -ENOMEM;
4292                goto err_out;
4293        }
4294
4295        for (i = 0; i < skdev->n_special; i++) {
4296                struct skd_special_context *skspcl;
4297
4298                skspcl = &skdev->skspcl_table[i];
4299
4300                skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4301                skspcl->req.state = SKD_REQ_STATE_IDLE;
4302
4303                skspcl->req.next = &skspcl[1].req;
4304
4305                nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4306
4307                skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
4308                                                       &skspcl->mb_dma_address);
4309                if (skspcl->msg_buf == NULL) {
4310                        rc = -ENOMEM;
4311                        goto err_out;
4312                }
4313
4314                memset(skspcl->msg_buf, 0, nbytes);
4315
4316                skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4317                                         SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4318                if (skspcl->req.sg == NULL) {
4319                        rc = -ENOMEM;
4320                        goto err_out;
4321                }
4322
4323                skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4324                                                         SKD_N_SG_PER_SPECIAL,
4325                                                         &skspcl->req.
4326                                                         sksg_dma_address);
4327                if (skspcl->req.sksg_list == NULL) {
4328                        rc = -ENOMEM;
4329                        goto err_out;
4330                }
4331        }
4332
4333        /* Free list is in order starting with the 0th entry. */
4334        skdev->skspcl_table[i - 1].req.next = NULL;
4335        skdev->skspcl_free_list = skdev->skspcl_table;
4336
4337        return rc;
4338
4339err_out:
4340        return rc;
4341}
4342
4343static int skd_cons_sksb(struct skd_device *skdev)
4344{
4345        int rc = 0;
4346        struct skd_special_context *skspcl;
4347        u32 nbytes;
4348
4349        skspcl = &skdev->internal_skspcl;
4350
4351        skspcl->req.id = 0 + SKD_ID_INTERNAL;
4352        skspcl->req.state = SKD_REQ_STATE_IDLE;
4353
4354        nbytes = SKD_N_INTERNAL_BYTES;
4355
4356        skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes,
4357                                                &skspcl->db_dma_address);
4358        if (skspcl->data_buf == NULL) {
4359                rc = -ENOMEM;
4360                goto err_out;
4361        }
4362
4363        memset(skspcl->data_buf, 0, nbytes);
4364
4365        nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4366        skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
4367                                               &skspcl->mb_dma_address);
4368        if (skspcl->msg_buf == NULL) {
4369                rc = -ENOMEM;
4370                goto err_out;
4371        }
4372
4373        memset(skspcl->msg_buf, 0, nbytes);
4374
4375        skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4376                                                 &skspcl->req.sksg_dma_address);
4377        if (skspcl->req.sksg_list == NULL) {
4378                rc = -ENOMEM;
4379                goto err_out;
4380        }
4381
4382        if (!skd_format_internal_skspcl(skdev)) {
4383                rc = -EINVAL;
4384                goto err_out;
4385        }
4386
4387err_out:
4388        return rc;
4389}
4390
4391static int skd_cons_disk(struct skd_device *skdev)
4392{
4393        int rc = 0;
4394        struct gendisk *disk;
4395        struct request_queue *q;
4396        unsigned long flags;
4397
4398        disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4399        if (!disk) {
4400                rc = -ENOMEM;
4401                goto err_out;
4402        }
4403
4404        skdev->disk = disk;
4405        sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4406
4407        disk->major = skdev->major;
4408        disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4409        disk->fops = &skd_blockdev_ops;
4410        disk->private_data = skdev;
4411
4412        q = blk_init_queue(skd_request_fn, &skdev->lock);
4413        if (!q) {
4414                rc = -ENOMEM;
4415                goto err_out;
4416        }
4417
4418        skdev->queue = q;
4419        disk->queue = q;
4420        q->queuedata = skdev;
4421
4422        blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
4423        blk_queue_max_segments(q, skdev->sgs_per_request);
4424        blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4425
4426        /* set sysfs ptimal_io_size to 8K */
4427        blk_queue_io_opt(q, 8192);
4428
4429        /* DISCARD Flag initialization. */
4430        q->limits.discard_granularity = 8192;
4431        q->limits.discard_alignment = 0;
4432        q->limits.max_discard_sectors = UINT_MAX >> 9;
4433        q->limits.discard_zeroes_data = 1;
4434        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4435        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4436
4437        spin_lock_irqsave(&skdev->lock, flags);
4438        pr_debug("%s:%s:%d stopping %s queue\n",
4439                 skdev->name, __func__, __LINE__, skdev->name);
4440        blk_stop_queue(skdev->queue);
4441        spin_unlock_irqrestore(&skdev->lock, flags);
4442
4443err_out:
4444        return rc;
4445}
4446
4447#define SKD_N_DEV_TABLE         16u
4448static u32 skd_next_devno;
4449
4450static struct skd_device *skd_construct(struct pci_dev *pdev)
4451{
4452        struct skd_device *skdev;
4453        int blk_major = skd_major;
4454        int rc;
4455
4456        skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4457
4458        if (!skdev) {
4459                pr_err(PFX "(%s): memory alloc failure\n",
4460                       pci_name(pdev));
4461                return NULL;
4462        }
4463
4464        skdev->state = SKD_DRVR_STATE_LOAD;
4465        skdev->pdev = pdev;
4466        skdev->devno = skd_next_devno++;
4467        skdev->major = blk_major;
4468        skdev->irq_type = skd_isr_type;
4469        sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
4470        skdev->dev_max_queue_depth = 0;
4471
4472        skdev->num_req_context = skd_max_queue_depth;
4473        skdev->num_fitmsg_context = skd_max_queue_depth;
4474        skdev->n_special = skd_max_pass_thru;
4475        skdev->cur_max_queue_depth = 1;
4476        skdev->queue_low_water_mark = 1;
4477        skdev->proto_ver = 99;
4478        skdev->sgs_per_request = skd_sgs_per_request;
4479        skdev->dbg_level = skd_dbg_level;
4480
4481        atomic_set(&skdev->device_count, 0);
4482
4483        spin_lock_init(&skdev->lock);
4484
4485        INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4486
4487        pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4488        rc = skd_cons_skcomp(skdev);
4489        if (rc < 0)
4490                goto err_out;
4491
4492        pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4493        rc = skd_cons_skmsg(skdev);
4494        if (rc < 0)
4495                goto err_out;
4496
4497        pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4498        rc = skd_cons_skreq(skdev);
4499        if (rc < 0)
4500                goto err_out;
4501
4502        pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4503        rc = skd_cons_skspcl(skdev);
4504        if (rc < 0)
4505                goto err_out;
4506
4507        pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4508        rc = skd_cons_sksb(skdev);
4509        if (rc < 0)
4510                goto err_out;
4511
4512        pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4513        rc = skd_cons_disk(skdev);
4514        if (rc < 0)
4515                goto err_out;
4516
4517        pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
4518        return skdev;
4519
4520err_out:
4521        pr_debug("%s:%s:%d construct failed\n",
4522                 skdev->name, __func__, __LINE__);
4523        skd_destruct(skdev);
4524        return NULL;
4525}
4526
4527/*
4528 *****************************************************************************
4529 * DESTRUCT (FREE)
4530 *****************************************************************************
4531 */
4532
4533static void skd_free_skcomp(struct skd_device *skdev)
4534{
4535        if (skdev->skcomp_table != NULL) {
4536                u32 nbytes;
4537
4538                nbytes = sizeof(skdev->skcomp_table[0]) *
4539                         SKD_N_COMPLETION_ENTRY;
4540                pci_free_consistent(skdev->pdev, nbytes,
4541                                    skdev->skcomp_table, skdev->cq_dma_address);
4542        }
4543
4544        skdev->skcomp_table = NULL;
4545        skdev->cq_dma_address = 0;
4546}
4547
4548static void skd_free_skmsg(struct skd_device *skdev)
4549{
4550        u32 i;
4551
4552        if (skdev->skmsg_table == NULL)
4553                return;
4554
4555        for (i = 0; i < skdev->num_fitmsg_context; i++) {
4556                struct skd_fitmsg_context *skmsg;
4557
4558                skmsg = &skdev->skmsg_table[i];
4559
4560                if (skmsg->msg_buf != NULL) {
4561                        skmsg->msg_buf += skmsg->offset;
4562                        skmsg->mb_dma_address += skmsg->offset;
4563                        pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4564                                            skmsg->msg_buf,
4565                                            skmsg->mb_dma_address);
4566                }
4567                skmsg->msg_buf = NULL;
4568                skmsg->mb_dma_address = 0;
4569        }
4570
4571        kfree(skdev->skmsg_table);
4572        skdev->skmsg_table = NULL;
4573}
4574
4575static void skd_free_sg_list(struct skd_device *skdev,
4576                             struct fit_sg_descriptor *sg_list,
4577                             u32 n_sg, dma_addr_t dma_addr)
4578{
4579        if (sg_list != NULL) {
4580                u32 nbytes;
4581
4582                nbytes = sizeof(*sg_list) * n_sg;
4583
4584                pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4585        }
4586}
4587
4588static void skd_free_skreq(struct skd_device *skdev)
4589{
4590        u32 i;
4591
4592        if (skdev->skreq_table == NULL)
4593                return;
4594
4595        for (i = 0; i < skdev->num_req_context; i++) {
4596                struct skd_request_context *skreq;
4597
4598                skreq = &skdev->skreq_table[i];
4599
4600                skd_free_sg_list(skdev, skreq->sksg_list,
4601                                 skdev->sgs_per_request,
4602                                 skreq->sksg_dma_address);
4603
4604                skreq->sksg_list = NULL;
4605                skreq->sksg_dma_address = 0;
4606
4607                kfree(skreq->sg);
4608        }
4609
4610        kfree(skdev->skreq_table);
4611        skdev->skreq_table = NULL;
4612}
4613
4614static void skd_free_skspcl(struct skd_device *skdev)
4615{
4616        u32 i;
4617        u32 nbytes;
4618
4619        if (skdev->skspcl_table == NULL)
4620                return;
4621
4622        for (i = 0; i < skdev->n_special; i++) {
4623                struct skd_special_context *skspcl;
4624
4625                skspcl = &skdev->skspcl_table[i];
4626
4627                if (skspcl->msg_buf != NULL) {
4628                        nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4629                        pci_free_consistent(skdev->pdev, nbytes,
4630                                            skspcl->msg_buf,
4631                                            skspcl->mb_dma_address);
4632                }
4633
4634                skspcl->msg_buf = NULL;
4635                skspcl->mb_dma_address = 0;
4636
4637                skd_free_sg_list(skdev, skspcl->req.sksg_list,
4638                                 SKD_N_SG_PER_SPECIAL,
4639                                 skspcl->req.sksg_dma_address);
4640
4641                skspcl->req.sksg_list = NULL;
4642                skspcl->req.sksg_dma_address = 0;
4643
4644                kfree(skspcl->req.sg);
4645        }
4646
4647        kfree(skdev->skspcl_table);
4648        skdev->skspcl_table = NULL;
4649}
4650
4651static void skd_free_sksb(struct skd_device *skdev)
4652{
4653        struct skd_special_context *skspcl;
4654        u32 nbytes;
4655
4656        skspcl = &skdev->internal_skspcl;
4657
4658        if (skspcl->data_buf != NULL) {
4659                nbytes = SKD_N_INTERNAL_BYTES;
4660
4661                pci_free_consistent(skdev->pdev, nbytes,
4662                                    skspcl->data_buf, skspcl->db_dma_address);
4663        }
4664
4665        skspcl->data_buf = NULL;
4666        skspcl->db_dma_address = 0;
4667
4668        if (skspcl->msg_buf != NULL) {
4669                nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4670                pci_free_consistent(skdev->pdev, nbytes,
4671                                    skspcl->msg_buf, skspcl->mb_dma_address);
4672        }
4673
4674        skspcl->msg_buf = NULL;
4675        skspcl->mb_dma_address = 0;
4676
4677        skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4678                         skspcl->req.sksg_dma_address);
4679
4680        skspcl->req.sksg_list = NULL;
4681        skspcl->req.sksg_dma_address = 0;
4682}
4683
4684static void skd_free_disk(struct skd_device *skdev)
4685{
4686        struct gendisk *disk = skdev->disk;
4687
4688        if (disk != NULL) {
4689                struct request_queue *q = disk->queue;
4690
4691                if (disk->flags & GENHD_FL_UP)
4692                        del_gendisk(disk);
4693                if (q)
4694                        blk_cleanup_queue(q);
4695                put_disk(disk);
4696        }
4697        skdev->disk = NULL;
4698}
4699
4700static void skd_destruct(struct skd_device *skdev)
4701{
4702        if (skdev == NULL)
4703                return;
4704
4705
4706        pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4707        skd_free_disk(skdev);
4708
4709        pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4710        skd_free_sksb(skdev);
4711
4712        pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4713        skd_free_skspcl(skdev);
4714
4715        pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4716        skd_free_skreq(skdev);
4717
4718        pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4719        skd_free_skmsg(skdev);
4720
4721        pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4722        skd_free_skcomp(skdev);
4723
4724        pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
4725        kfree(skdev);
4726}
4727
4728/*
4729 *****************************************************************************
4730 * BLOCK DEVICE (BDEV) GLUE
4731 *****************************************************************************
4732 */
4733
4734static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4735{
4736        struct skd_device *skdev;
4737        u64 capacity;
4738
4739        skdev = bdev->bd_disk->private_data;
4740
4741        pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
4742                 skdev->name, __func__, __LINE__,
4743                 bdev->bd_disk->disk_name, current->comm);
4744
4745        if (skdev->read_cap_is_valid) {
4746                capacity = get_capacity(skdev->disk);
4747                geo->heads = 64;
4748                geo->sectors = 255;
4749                geo->cylinders = (capacity) / (255 * 64);
4750
4751                return 0;
4752        }
4753        return -EIO;
4754}
4755
4756static int skd_bdev_attach(struct skd_device *skdev)
4757{
4758        pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
4759        add_disk(skdev->disk);
4760        return 0;
4761}
4762
4763static const struct block_device_operations skd_blockdev_ops = {
4764        .owner          = THIS_MODULE,
4765        .ioctl          = skd_bdev_ioctl,
4766        .getgeo         = skd_bdev_getgeo,
4767};
4768
4769
4770/*
4771 *****************************************************************************
4772 * PCIe DRIVER GLUE
4773 *****************************************************************************
4774 */
4775
4776static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = {
4777        { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4778          PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4779        { 0 }                     /* terminate list */
4780};
4781
4782MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4783
4784static char *skd_pci_info(struct skd_device *skdev, char *str)
4785{
4786        int pcie_reg;
4787
4788        strcpy(str, "PCIe (");
4789        pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4790
4791        if (pcie_reg) {
4792
4793                char lwstr[6];
4794                uint16_t pcie_lstat, lspeed, lwidth;
4795
4796                pcie_reg += 0x12;
4797                pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4798                lspeed = pcie_lstat & (0xF);
4799                lwidth = (pcie_lstat & 0x3F0) >> 4;
4800
4801                if (lspeed == 1)
4802                        strcat(str, "2.5GT/s ");
4803                else if (lspeed == 2)
4804                        strcat(str, "5.0GT/s ");
4805                else
4806                        strcat(str, "<unknown> ");
4807                snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4808                strcat(str, lwstr);
4809        }
4810        return str;
4811}
4812
4813static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4814{
4815        int i;
4816        int rc = 0;
4817        char pci_str[32];
4818        struct skd_device *skdev;
4819
4820        pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
4821               DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4822        pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
4823               pci_name(pdev), pdev->vendor, pdev->device);
4824
4825        rc = pci_enable_device(pdev);
4826        if (rc)
4827                return rc;
4828        rc = pci_request_regions(pdev, DRV_NAME);
4829        if (rc)
4830                goto err_out;
4831        rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4832        if (!rc) {
4833                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4834
4835                        pr_err("(%s): consistent DMA mask error %d\n",
4836                               pci_name(pdev), rc);
4837                }
4838        } else {
4839                (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
4840                if (rc) {
4841
4842                        pr_err("(%s): DMA mask error %d\n",
4843                               pci_name(pdev), rc);
4844                        goto err_out_regions;
4845                }
4846        }
4847
4848        if (!skd_major) {
4849                rc = register_blkdev(0, DRV_NAME);
4850                if (rc < 0)
4851                        goto err_out_regions;
4852                BUG_ON(!rc);
4853                skd_major = rc;
4854        }
4855
4856        skdev = skd_construct(pdev);
4857        if (skdev == NULL) {
4858                rc = -ENOMEM;
4859                goto err_out_regions;
4860        }
4861
4862        skd_pci_info(skdev, pci_str);
4863        pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
4864
4865        pci_set_master(pdev);
4866        rc = pci_enable_pcie_error_reporting(pdev);
4867        if (rc) {
4868                pr_err(
4869                       "(%s): bad enable of PCIe error reporting rc=%d\n",
4870                       skd_name(skdev), rc);
4871                skdev->pcie_error_reporting_is_enabled = 0;
4872        } else
4873                skdev->pcie_error_reporting_is_enabled = 1;
4874
4875
4876        pci_set_drvdata(pdev, skdev);
4877
4878        skdev->disk->driverfs_dev = &pdev->dev;
4879
4880        for (i = 0; i < SKD_MAX_BARS; i++) {
4881                skdev->mem_phys[i] = pci_resource_start(pdev, i);
4882                skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4883                skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4884                                            skdev->mem_size[i]);
4885                if (!skdev->mem_map[i]) {
4886                        pr_err("(%s): Unable to map adapter memory!\n",
4887                               skd_name(skdev));
4888                        rc = -ENODEV;
4889                        goto err_out_iounmap;
4890                }
4891                pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4892                         skdev->name, __func__, __LINE__,
4893                         skdev->mem_map[i],
4894                         (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
4895        }
4896
4897        rc = skd_acquire_irq(skdev);
4898        if (rc) {
4899                pr_err("(%s): interrupt resource error %d\n",
4900                       skd_name(skdev), rc);
4901                goto err_out_iounmap;
4902        }
4903
4904        rc = skd_start_timer(skdev);
4905        if (rc)
4906                goto err_out_timer;
4907
4908        init_waitqueue_head(&skdev->waitq);
4909
4910        skd_start_device(skdev);
4911
4912        rc = wait_event_interruptible_timeout(skdev->waitq,
4913                                              (skdev->gendisk_on),
4914                                              (SKD_START_WAIT_SECONDS * HZ));
4915        if (skdev->gendisk_on > 0) {
4916                /* device came on-line after reset */
4917                skd_bdev_attach(skdev);
4918                rc = 0;
4919        } else {
4920                /* we timed out, something is wrong with the device,
4921                   don't add the disk structure */
4922                pr_err(
4923                       "(%s): error: waiting for s1120 timed out %d!\n",
4924                       skd_name(skdev), rc);
4925                /* in case of no error; we timeout with ENXIO */
4926                if (!rc)
4927                        rc = -ENXIO;
4928                goto err_out_timer;
4929        }
4930
4931
4932#ifdef SKD_VMK_POLL_HANDLER
4933        if (skdev->irq_type == SKD_IRQ_MSIX) {
4934                /* MSIX completion handler is being used for coredump */
4935                vmklnx_scsi_register_poll_handler(skdev->scsi_host,
4936                                                  skdev->msix_entries[5].vector,
4937                                                  skd_comp_q, skdev);
4938        } else {
4939                vmklnx_scsi_register_poll_handler(skdev->scsi_host,
4940                                                  skdev->pdev->irq, skd_isr,
4941                                                  skdev);
4942        }
4943#endif  /* SKD_VMK_POLL_HANDLER */
4944
4945        return rc;
4946
4947err_out_timer:
4948        skd_stop_device(skdev);
4949        skd_release_irq(skdev);
4950
4951err_out_iounmap:
4952        for (i = 0; i < SKD_MAX_BARS; i++)
4953                if (skdev->mem_map[i])
4954                        iounmap(skdev->mem_map[i]);
4955
4956        if (skdev->pcie_error_reporting_is_enabled)
4957                pci_disable_pcie_error_reporting(pdev);
4958
4959        skd_destruct(skdev);
4960
4961err_out_regions:
4962        pci_release_regions(pdev);
4963
4964err_out:
4965        pci_disable_device(pdev);
4966        pci_set_drvdata(pdev, NULL);
4967        return rc;
4968}
4969
4970static void skd_pci_remove(struct pci_dev *pdev)
4971{
4972        int i;
4973        struct skd_device *skdev;
4974
4975        skdev = pci_get_drvdata(pdev);
4976        if (!skdev) {
4977                pr_err("%s: no device data for PCI\n", pci_name(pdev));
4978                return;
4979        }
4980        skd_stop_device(skdev);
4981        skd_release_irq(skdev);
4982
4983        for (i = 0; i < SKD_MAX_BARS; i++)
4984                if (skdev->mem_map[i])
4985                        iounmap((u32 *)skdev->mem_map[i]);
4986
4987        if (skdev->pcie_error_reporting_is_enabled)
4988                pci_disable_pcie_error_reporting(pdev);
4989
4990        skd_destruct(skdev);
4991
4992        pci_release_regions(pdev);
4993        pci_disable_device(pdev);
4994        pci_set_drvdata(pdev, NULL);
4995
4996        return;
4997}
4998
4999static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
5000{
5001        int i;
5002        struct skd_device *skdev;
5003
5004        skdev = pci_get_drvdata(pdev);
5005        if (!skdev) {
5006                pr_err("%s: no device data for PCI\n", pci_name(pdev));
5007                return -EIO;
5008        }
5009
5010        skd_stop_device(skdev);
5011
5012        skd_release_irq(skdev);
5013
5014        for (i = 0; i < SKD_MAX_BARS; i++)
5015                if (skdev->mem_map[i])
5016                        iounmap((u32 *)skdev->mem_map[i]);
5017
5018        if (skdev->pcie_error_reporting_is_enabled)
5019                pci_disable_pcie_error_reporting(pdev);
5020
5021        pci_release_regions(pdev);
5022        pci_save_state(pdev);
5023        pci_disable_device(pdev);
5024        pci_set_power_state(pdev, pci_choose_state(pdev, state));
5025        return 0;
5026}
5027
5028static int skd_pci_resume(struct pci_dev *pdev)
5029{
5030        int i;
5031        int rc = 0;
5032        struct skd_device *skdev;
5033
5034        skdev = pci_get_drvdata(pdev);
5035        if (!skdev) {
5036                pr_err("%s: no device data for PCI\n", pci_name(pdev));
5037                return -1;
5038        }
5039
5040        pci_set_power_state(pdev, PCI_D0);
5041        pci_enable_wake(pdev, PCI_D0, 0);
5042        pci_restore_state(pdev);
5043
5044        rc = pci_enable_device(pdev);
5045        if (rc)
5046                return rc;
5047        rc = pci_request_regions(pdev, DRV_NAME);
5048        if (rc)
5049                goto err_out;
5050        rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5051        if (!rc) {
5052                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
5053
5054                        pr_err("(%s): consistent DMA mask error %d\n",
5055                               pci_name(pdev), rc);
5056                }
5057        } else {
5058                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5059                if (rc) {
5060
5061                        pr_err("(%s): DMA mask error %d\n",
5062                               pci_name(pdev), rc);
5063                        goto err_out_regions;
5064                }
5065        }
5066
5067        pci_set_master(pdev);
5068        rc = pci_enable_pcie_error_reporting(pdev);
5069        if (rc) {
5070                pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
5071                       skdev->name, rc);
5072                skdev->pcie_error_reporting_is_enabled = 0;
5073        } else
5074                skdev->pcie_error_reporting_is_enabled = 1;
5075
5076        for (i = 0; i < SKD_MAX_BARS; i++) {
5077
5078                skdev->mem_phys[i] = pci_resource_start(pdev, i);
5079                skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
5080                skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
5081                                            skdev->mem_size[i]);
5082                if (!skdev->mem_map[i]) {
5083                        pr_err("(%s): Unable to map adapter memory!\n",
5084                               skd_name(skdev));
5085                        rc = -ENODEV;
5086                        goto err_out_iounmap;
5087                }
5088                pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
5089                         skdev->name, __func__, __LINE__,
5090                         skdev->mem_map[i],
5091                         (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
5092        }
5093        rc = skd_acquire_irq(skdev);
5094        if (rc) {
5095
5096                pr_err("(%s): interrupt resource error %d\n",
5097                       pci_name(pdev), rc);
5098                goto err_out_iounmap;
5099        }
5100
5101        rc = skd_start_timer(skdev);
5102        if (rc)
5103                goto err_out_timer;
5104
5105        init_waitqueue_head(&skdev->waitq);
5106
5107        skd_start_device(skdev);
5108
5109        return rc;
5110
5111err_out_timer:
5112        skd_stop_device(skdev);
5113        skd_release_irq(skdev);
5114
5115err_out_iounmap:
5116        for (i = 0; i < SKD_MAX_BARS; i++)
5117                if (skdev->mem_map[i])
5118                        iounmap(skdev->mem_map[i]);
5119
5120        if (skdev->pcie_error_reporting_is_enabled)
5121                pci_disable_pcie_error_reporting(pdev);
5122
5123err_out_regions:
5124        pci_release_regions(pdev);
5125
5126err_out:
5127        pci_disable_device(pdev);
5128        return rc;
5129}
5130
5131static void skd_pci_shutdown(struct pci_dev *pdev)
5132{
5133        struct skd_device *skdev;
5134
5135        pr_err("skd_pci_shutdown called\n");
5136
5137        skdev = pci_get_drvdata(pdev);
5138        if (!skdev) {
5139                pr_err("%s: no device data for PCI\n", pci_name(pdev));
5140                return;
5141        }
5142
5143        pr_err("%s: calling stop\n", skd_name(skdev));
5144        skd_stop_device(skdev);
5145}
5146
5147static struct pci_driver skd_driver = {
5148        .name           = DRV_NAME,
5149        .id_table       = skd_pci_tbl,
5150        .probe          = skd_pci_probe,
5151        .remove         = skd_pci_remove,
5152        .suspend        = skd_pci_suspend,
5153        .resume         = skd_pci_resume,
5154        .shutdown       = skd_pci_shutdown,
5155};
5156
5157/*
5158 *****************************************************************************
5159 * LOGGING SUPPORT
5160 *****************************************************************************
5161 */
5162
5163static const char *skd_name(struct skd_device *skdev)
5164{
5165        memset(skdev->id_str, 0, sizeof(skdev->id_str));
5166
5167        if (skdev->inquiry_is_valid)
5168                snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
5169                         skdev->name, skdev->inq_serial_num,
5170                         pci_name(skdev->pdev));
5171        else
5172                snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
5173                         skdev->name, pci_name(skdev->pdev));
5174
5175        return skdev->id_str;
5176}
5177
5178const char *skd_drive_state_to_str(int state)
5179{
5180        switch (state) {
5181        case FIT_SR_DRIVE_OFFLINE:
5182                return "OFFLINE";
5183        case FIT_SR_DRIVE_INIT:
5184                return "INIT";
5185        case FIT_SR_DRIVE_ONLINE:
5186                return "ONLINE";
5187        case FIT_SR_DRIVE_BUSY:
5188                return "BUSY";
5189        case FIT_SR_DRIVE_FAULT:
5190                return "FAULT";
5191        case FIT_SR_DRIVE_DEGRADED:
5192                return "DEGRADED";
5193        case FIT_SR_PCIE_LINK_DOWN:
5194                return "INK_DOWN";
5195        case FIT_SR_DRIVE_SOFT_RESET:
5196                return "SOFT_RESET";
5197        case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
5198                return "NEED_FW";
5199        case FIT_SR_DRIVE_INIT_FAULT:
5200                return "INIT_FAULT";
5201        case FIT_SR_DRIVE_BUSY_SANITIZE:
5202                return "BUSY_SANITIZE";
5203        case FIT_SR_DRIVE_BUSY_ERASE:
5204                return "BUSY_ERASE";
5205        case FIT_SR_DRIVE_FW_BOOTING:
5206                return "FW_BOOTING";
5207        default:
5208                return "???";
5209        }
5210}
5211
5212const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5213{
5214        switch (state) {
5215        case SKD_DRVR_STATE_LOAD:
5216                return "LOAD";
5217        case SKD_DRVR_STATE_IDLE:
5218                return "IDLE";
5219        case SKD_DRVR_STATE_BUSY:
5220                return "BUSY";
5221        case SKD_DRVR_STATE_STARTING:
5222                return "STARTING";
5223        case SKD_DRVR_STATE_ONLINE:
5224                return "ONLINE";
5225        case SKD_DRVR_STATE_PAUSING:
5226                return "PAUSING";
5227        case SKD_DRVR_STATE_PAUSED:
5228                return "PAUSED";
5229        case SKD_DRVR_STATE_DRAINING_TIMEOUT:
5230                return "DRAINING_TIMEOUT";
5231        case SKD_DRVR_STATE_RESTARTING:
5232                return "RESTARTING";
5233        case SKD_DRVR_STATE_RESUMING:
5234                return "RESUMING";
5235        case SKD_DRVR_STATE_STOPPING:
5236                return "STOPPING";
5237        case SKD_DRVR_STATE_SYNCING:
5238                return "SYNCING";
5239        case SKD_DRVR_STATE_FAULT:
5240                return "FAULT";
5241        case SKD_DRVR_STATE_DISAPPEARED:
5242                return "DISAPPEARED";
5243        case SKD_DRVR_STATE_BUSY_ERASE:
5244                return "BUSY_ERASE";
5245        case SKD_DRVR_STATE_BUSY_SANITIZE:
5246                return "BUSY_SANITIZE";
5247        case SKD_DRVR_STATE_BUSY_IMMINENT:
5248                return "BUSY_IMMINENT";
5249        case SKD_DRVR_STATE_WAIT_BOOT:
5250                return "WAIT_BOOT";
5251
5252        default:
5253                return "???";
5254        }
5255}
5256
5257static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5258{
5259        switch (state) {
5260        case SKD_MSG_STATE_IDLE:
5261                return "IDLE";
5262        case SKD_MSG_STATE_BUSY:
5263                return "BUSY";
5264        default:
5265                return "???";
5266        }
5267}
5268
5269static const char *skd_skreq_state_to_str(enum skd_req_state state)
5270{
5271        switch (state) {
5272        case SKD_REQ_STATE_IDLE:
5273                return "IDLE";
5274        case SKD_REQ_STATE_SETUP:
5275                return "SETUP";
5276        case SKD_REQ_STATE_BUSY:
5277                return "BUSY";
5278        case SKD_REQ_STATE_COMPLETED:
5279                return "COMPLETED";
5280        case SKD_REQ_STATE_TIMEOUT:
5281                return "TIMEOUT";
5282        case SKD_REQ_STATE_ABORTED:
5283                return "ABORTED";
5284        default:
5285                return "???";
5286        }
5287}
5288
5289static void skd_log_skdev(struct skd_device *skdev, const char *event)
5290{
5291        pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
5292                 skdev->name, __func__, __LINE__, skdev->name, skdev, event);
5293        pr_debug("%s:%s:%d   drive_state=%s(%d) driver_state=%s(%d)\n",
5294                 skdev->name, __func__, __LINE__,
5295                 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
5296                 skd_skdev_state_to_str(skdev->state), skdev->state);
5297        pr_debug("%s:%s:%d   busy=%d limit=%d dev=%d lowat=%d\n",
5298                 skdev->name, __func__, __LINE__,
5299                 skdev->in_flight, skdev->cur_max_queue_depth,
5300                 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
5301        pr_debug("%s:%s:%d   timestamp=0x%x cycle=%d cycle_ix=%d\n",
5302                 skdev->name, __func__, __LINE__,
5303                 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
5304}
5305
5306static void skd_log_skmsg(struct skd_device *skdev,
5307                          struct skd_fitmsg_context *skmsg, const char *event)
5308{
5309        pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
5310                 skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
5311        pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x length=%d\n",
5312                 skdev->name, __func__, __LINE__,
5313                 skd_skmsg_state_to_str(skmsg->state), skmsg->state,
5314                 skmsg->id, skmsg->length);
5315}
5316
5317static void skd_log_skreq(struct skd_device *skdev,
5318                          struct skd_request_context *skreq, const char *event)
5319{
5320        pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
5321                 skdev->name, __func__, __LINE__, skdev->name, skreq, event);
5322        pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
5323                 skdev->name, __func__, __LINE__,
5324                 skd_skreq_state_to_str(skreq->state), skreq->state,
5325                 skreq->id, skreq->fitmsg_id);
5326        pr_debug("%s:%s:%d   timo=0x%x sg_dir=%d n_sg=%d\n",
5327                 skdev->name, __func__, __LINE__,
5328                 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
5329
5330        if (skreq->req != NULL) {
5331                struct request *req = skreq->req;
5332                u32 lba = (u32)blk_rq_pos(req);
5333                u32 count = blk_rq_sectors(req);
5334
5335                pr_debug("%s:%s:%d "
5336                         "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5337                         skdev->name, __func__, __LINE__,
5338                         req, lba, lba, count, count,
5339                         (int)rq_data_dir(req));
5340        } else
5341                pr_debug("%s:%s:%d req=NULL\n",
5342                         skdev->name, __func__, __LINE__);
5343}
5344
5345/*
5346 *****************************************************************************
5347 * MODULE GLUE
5348 *****************************************************************************
5349 */
5350
5351static int __init skd_init(void)
5352{
5353        pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5354
5355        switch (skd_isr_type) {
5356        case SKD_IRQ_LEGACY:
5357        case SKD_IRQ_MSI:
5358        case SKD_IRQ_MSIX:
5359                break;
5360        default:
5361                pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
5362                       skd_isr_type, SKD_IRQ_DEFAULT);
5363                skd_isr_type = SKD_IRQ_DEFAULT;
5364        }
5365
5366        if (skd_max_queue_depth < 1 ||
5367            skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5368                pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
5369                       skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5370                skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5371        }
5372
5373        if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
5374                pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
5375                       skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5376                skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5377        }
5378
5379        if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
5380                pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
5381                       skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5382                skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5383        }
5384
5385        if (skd_dbg_level < 0 || skd_dbg_level > 2) {
5386                pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
5387                       skd_dbg_level, 0);
5388                skd_dbg_level = 0;
5389        }
5390
5391        if (skd_isr_comp_limit < 0) {
5392                pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
5393                       skd_isr_comp_limit, 0);
5394                skd_isr_comp_limit = 0;
5395        }
5396
5397        if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5398                pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
5399                       skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5400                skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5401        }
5402
5403        return pci_register_driver(&skd_driver);
5404}
5405
5406static void __exit skd_exit(void)
5407{
5408        pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5409
5410        pci_unregister_driver(&skd_driver);
5411
5412        if (skd_major)
5413                unregister_blkdev(skd_major, DRV_NAME);
5414}
5415
5416module_init(skd_init);
5417module_exit(skd_exit);
5418