linux/drivers/scsi/myrb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
   4 *
   5 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
   6 *
   7 * Based on the original DAC960 driver,
   8 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
   9 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
  10 *
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/types.h>
  15#include <linux/delay.h>
  16#include <linux/interrupt.h>
  17#include <linux/pci.h>
  18#include <linux/raid_class.h>
  19#include <asm/unaligned.h>
  20#include <scsi/scsi.h>
  21#include <scsi/scsi_host.h>
  22#include <scsi/scsi_device.h>
  23#include <scsi/scsi_cmnd.h>
  24#include <scsi/scsi_tcq.h>
  25#include "myrb.h"
  26
  27static struct raid_template *myrb_raid_template;
  28
  29static void myrb_monitor(struct work_struct *work);
  30static inline void myrb_translate_devstate(void *DeviceState);
  31
  32static inline int myrb_logical_channel(struct Scsi_Host *shost)
  33{
  34        return shost->max_channel - 1;
  35}
  36
  37static struct myrb_devstate_name_entry {
  38        enum myrb_devstate state;
  39        const char *name;
  40} myrb_devstate_name_list[] = {
  41        { MYRB_DEVICE_DEAD, "Dead" },
  42        { MYRB_DEVICE_WO, "WriteOnly" },
  43        { MYRB_DEVICE_ONLINE, "Online" },
  44        { MYRB_DEVICE_CRITICAL, "Critical" },
  45        { MYRB_DEVICE_STANDBY, "Standby" },
  46        { MYRB_DEVICE_OFFLINE, "Offline" },
  47};
  48
  49static const char *myrb_devstate_name(enum myrb_devstate state)
  50{
  51        struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
  52        int i;
  53
  54        for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
  55                if (entry[i].state == state)
  56                        return entry[i].name;
  57        }
  58        return "Unknown";
  59}
  60
  61static struct myrb_raidlevel_name_entry {
  62        enum myrb_raidlevel level;
  63        const char *name;
  64} myrb_raidlevel_name_list[] = {
  65        { MYRB_RAID_LEVEL0, "RAID0" },
  66        { MYRB_RAID_LEVEL1, "RAID1" },
  67        { MYRB_RAID_LEVEL3, "RAID3" },
  68        { MYRB_RAID_LEVEL5, "RAID5" },
  69        { MYRB_RAID_LEVEL6, "RAID6" },
  70        { MYRB_RAID_JBOD, "JBOD" },
  71};
  72
  73static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
  74{
  75        struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
  76        int i;
  77
  78        for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
  79                if (entry[i].level == level)
  80                        return entry[i].name;
  81        }
  82        return NULL;
  83}
  84
  85/*
  86 * myrb_create_mempools - allocates auxiliary data structures
  87 *
  88 * Return: true on success, false otherwise.
  89 */
  90static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
  91{
  92        size_t elem_size, elem_align;
  93
  94        elem_align = sizeof(struct myrb_sge);
  95        elem_size = cb->host->sg_tablesize * elem_align;
  96        cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
  97                                      elem_size, elem_align, 0);
  98        if (cb->sg_pool == NULL) {
  99                shost_printk(KERN_ERR, cb->host,
 100                             "Failed to allocate SG pool\n");
 101                return false;
 102        }
 103
 104        cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
 105                                       sizeof(struct myrb_dcdb),
 106                                       sizeof(unsigned int), 0);
 107        if (!cb->dcdb_pool) {
 108                dma_pool_destroy(cb->sg_pool);
 109                cb->sg_pool = NULL;
 110                shost_printk(KERN_ERR, cb->host,
 111                             "Failed to allocate DCDB pool\n");
 112                return false;
 113        }
 114
 115        snprintf(cb->work_q_name, sizeof(cb->work_q_name),
 116                 "myrb_wq_%d", cb->host->host_no);
 117        cb->work_q = create_singlethread_workqueue(cb->work_q_name);
 118        if (!cb->work_q) {
 119                dma_pool_destroy(cb->dcdb_pool);
 120                cb->dcdb_pool = NULL;
 121                dma_pool_destroy(cb->sg_pool);
 122                cb->sg_pool = NULL;
 123                shost_printk(KERN_ERR, cb->host,
 124                             "Failed to create workqueue\n");
 125                return false;
 126        }
 127
 128        /*
 129         * Initialize the Monitoring Timer.
 130         */
 131        INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
 132        queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
 133
 134        return true;
 135}
 136
 137/*
 138 * myrb_destroy_mempools - tears down the memory pools for the controller
 139 */
 140static void myrb_destroy_mempools(struct myrb_hba *cb)
 141{
 142        cancel_delayed_work_sync(&cb->monitor_work);
 143        destroy_workqueue(cb->work_q);
 144
 145        dma_pool_destroy(cb->sg_pool);
 146        dma_pool_destroy(cb->dcdb_pool);
 147}
 148
 149/*
 150 * myrb_reset_cmd - reset command block
 151 */
 152static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
 153{
 154        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 155
 156        memset(mbox, 0, sizeof(union myrb_cmd_mbox));
 157        cmd_blk->status = 0;
 158}
 159
 160/*
 161 * myrb_qcmd - queues command block for execution
 162 */
 163static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
 164{
 165        void __iomem *base = cb->io_base;
 166        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 167        union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
 168
 169        cb->write_cmd_mbox(next_mbox, mbox);
 170        if (cb->prev_cmd_mbox1->words[0] == 0 ||
 171            cb->prev_cmd_mbox2->words[0] == 0)
 172                cb->get_cmd_mbox(base);
 173        cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
 174        cb->prev_cmd_mbox1 = next_mbox;
 175        if (++next_mbox > cb->last_cmd_mbox)
 176                next_mbox = cb->first_cmd_mbox;
 177        cb->next_cmd_mbox = next_mbox;
 178}
 179
 180/*
 181 * myrb_exec_cmd - executes command block and waits for completion.
 182 *
 183 * Return: command status
 184 */
 185static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
 186                struct myrb_cmdblk *cmd_blk)
 187{
 188        DECLARE_COMPLETION_ONSTACK(cmpl);
 189        unsigned long flags;
 190
 191        cmd_blk->completion = &cmpl;
 192
 193        spin_lock_irqsave(&cb->queue_lock, flags);
 194        cb->qcmd(cb, cmd_blk);
 195        spin_unlock_irqrestore(&cb->queue_lock, flags);
 196
 197        wait_for_completion(&cmpl);
 198        return cmd_blk->status;
 199}
 200
 201/*
 202 * myrb_exec_type3 - executes a type 3 command and waits for completion.
 203 *
 204 * Return: command status
 205 */
 206static unsigned short myrb_exec_type3(struct myrb_hba *cb,
 207                enum myrb_cmd_opcode op, dma_addr_t addr)
 208{
 209        struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
 210        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 211        unsigned short status;
 212
 213        mutex_lock(&cb->dcmd_mutex);
 214        myrb_reset_cmd(cmd_blk);
 215        mbox->type3.id = MYRB_DCMD_TAG;
 216        mbox->type3.opcode = op;
 217        mbox->type3.addr = addr;
 218        status = myrb_exec_cmd(cb, cmd_blk);
 219        mutex_unlock(&cb->dcmd_mutex);
 220        return status;
 221}
 222
 223/*
 224 * myrb_exec_type3D - executes a type 3D command and waits for completion.
 225 *
 226 * Return: command status
 227 */
 228static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
 229                enum myrb_cmd_opcode op, struct scsi_device *sdev,
 230                struct myrb_pdev_state *pdev_info)
 231{
 232        struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
 233        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 234        unsigned short status;
 235        dma_addr_t pdev_info_addr;
 236
 237        pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
 238                                        sizeof(struct myrb_pdev_state),
 239                                        DMA_FROM_DEVICE);
 240        if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
 241                return MYRB_STATUS_SUBSYS_FAILED;
 242
 243        mutex_lock(&cb->dcmd_mutex);
 244        myrb_reset_cmd(cmd_blk);
 245        mbox->type3D.id = MYRB_DCMD_TAG;
 246        mbox->type3D.opcode = op;
 247        mbox->type3D.channel = sdev->channel;
 248        mbox->type3D.target = sdev->id;
 249        mbox->type3D.addr = pdev_info_addr;
 250        status = myrb_exec_cmd(cb, cmd_blk);
 251        mutex_unlock(&cb->dcmd_mutex);
 252        dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
 253                         sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
 254        if (status == MYRB_STATUS_SUCCESS &&
 255            mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
 256                myrb_translate_devstate(pdev_info);
 257
 258        return status;
 259}
 260
 261static char *myrb_event_msg[] = {
 262        "killed because write recovery failed",
 263        "killed because of SCSI bus reset failure",
 264        "killed because of double check condition",
 265        "killed because it was removed",
 266        "killed because of gross error on SCSI chip",
 267        "killed because of bad tag returned from drive",
 268        "killed because of timeout on SCSI command",
 269        "killed because of reset SCSI command issued from system",
 270        "killed because busy or parity error count exceeded limit",
 271        "killed because of 'kill drive' command from system",
 272        "killed because of selection timeout",
 273        "killed due to SCSI phase sequence error",
 274        "killed due to unknown status",
 275};
 276
 277/**
 278 * myrb_get_event - get event log from HBA
 279 * @cb: pointer to the hba structure
 280 * @event: number of the event
 281 *
 282 * Execute a type 3E command and logs the event message
 283 */
 284static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
 285{
 286        struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
 287        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 288        struct myrb_log_entry *ev_buf;
 289        dma_addr_t ev_addr;
 290        unsigned short status;
 291
 292        ev_buf = dma_alloc_coherent(&cb->pdev->dev,
 293                                    sizeof(struct myrb_log_entry),
 294                                    &ev_addr, GFP_KERNEL);
 295        if (!ev_buf)
 296                return;
 297
 298        myrb_reset_cmd(cmd_blk);
 299        mbox->type3E.id = MYRB_MCMD_TAG;
 300        mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
 301        mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
 302        mbox->type3E.opqual = 1;
 303        mbox->type3E.ev_seq = event;
 304        mbox->type3E.addr = ev_addr;
 305        status = myrb_exec_cmd(cb, cmd_blk);
 306        if (status != MYRB_STATUS_SUCCESS)
 307                shost_printk(KERN_INFO, cb->host,
 308                             "Failed to get event log %d, status %04x\n",
 309                             event, status);
 310
 311        else if (ev_buf->seq_num == event) {
 312                struct scsi_sense_hdr sshdr;
 313
 314                memset(&sshdr, 0, sizeof(sshdr));
 315                scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
 316
 317                if (sshdr.sense_key == VENDOR_SPECIFIC &&
 318                    sshdr.asc == 0x80 &&
 319                    sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
 320                        shost_printk(KERN_CRIT, cb->host,
 321                                     "Physical drive %d:%d: %s\n",
 322                                     ev_buf->channel, ev_buf->target,
 323                                     myrb_event_msg[sshdr.ascq]);
 324                else
 325                        shost_printk(KERN_CRIT, cb->host,
 326                                     "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
 327                                     ev_buf->channel, ev_buf->target,
 328                                     sshdr.sense_key, sshdr.asc, sshdr.ascq);
 329        }
 330
 331        dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
 332                          ev_buf, ev_addr);
 333}
 334
 335/*
 336 * myrb_get_errtable - retrieves the error table from the controller
 337 *
 338 * Executes a type 3 command and logs the error table from the controller.
 339 */
 340static void myrb_get_errtable(struct myrb_hba *cb)
 341{
 342        struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
 343        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 344        unsigned short status;
 345        struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
 346
 347        memcpy(&old_table, cb->err_table, sizeof(old_table));
 348
 349        myrb_reset_cmd(cmd_blk);
 350        mbox->type3.id = MYRB_MCMD_TAG;
 351        mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
 352        mbox->type3.addr = cb->err_table_addr;
 353        status = myrb_exec_cmd(cb, cmd_blk);
 354        if (status == MYRB_STATUS_SUCCESS) {
 355                struct myrb_error_entry *table = cb->err_table;
 356                struct myrb_error_entry *new, *old;
 357                size_t err_table_offset;
 358                struct scsi_device *sdev;
 359
 360                shost_for_each_device(sdev, cb->host) {
 361                        if (sdev->channel >= myrb_logical_channel(cb->host))
 362                                continue;
 363                        err_table_offset = sdev->channel * MYRB_MAX_TARGETS
 364                                + sdev->id;
 365                        new = table + err_table_offset;
 366                        old = &old_table[err_table_offset];
 367                        if (new->parity_err == old->parity_err &&
 368                            new->soft_err == old->soft_err &&
 369                            new->hard_err == old->hard_err &&
 370                            new->misc_err == old->misc_err)
 371                                continue;
 372                        sdev_printk(KERN_CRIT, sdev,
 373                                    "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
 374                                    new->parity_err, new->soft_err,
 375                                    new->hard_err, new->misc_err);
 376                }
 377        }
 378}
 379
 380/*
 381 * myrb_get_ldev_info - retrieves the logical device table from the controller
 382 *
 383 * Executes a type 3 command and updates the logical device table.
 384 *
 385 * Return: command status
 386 */
 387static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
 388{
 389        unsigned short status;
 390        int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
 391        struct Scsi_Host *shost = cb->host;
 392
 393        status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
 394                                 cb->ldev_info_addr);
 395        if (status != MYRB_STATUS_SUCCESS)
 396                return status;
 397
 398        for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
 399                struct myrb_ldev_info *old = NULL;
 400                struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
 401                struct scsi_device *sdev;
 402
 403                sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
 404                                          ldev_num, 0);
 405                if (!sdev) {
 406                        if (new->state == MYRB_DEVICE_OFFLINE)
 407                                continue;
 408                        shost_printk(KERN_INFO, shost,
 409                                     "Adding Logical Drive %d in state %s\n",
 410                                     ldev_num, myrb_devstate_name(new->state));
 411                        scsi_add_device(shost, myrb_logical_channel(shost),
 412                                        ldev_num, 0);
 413                        continue;
 414                }
 415                old = sdev->hostdata;
 416                if (new->state != old->state)
 417                        shost_printk(KERN_INFO, shost,
 418                                     "Logical Drive %d is now %s\n",
 419                                     ldev_num, myrb_devstate_name(new->state));
 420                if (new->wb_enabled != old->wb_enabled)
 421                        sdev_printk(KERN_INFO, sdev,
 422                                    "Logical Drive is now WRITE %s\n",
 423                                    (new->wb_enabled ? "BACK" : "THRU"));
 424                memcpy(old, new, sizeof(*new));
 425                scsi_device_put(sdev);
 426        }
 427        return status;
 428}
 429
 430/*
 431 * myrb_get_rbld_progress - get rebuild progress information
 432 *
 433 * Executes a type 3 command and returns the rebuild progress
 434 * information.
 435 *
 436 * Return: command status
 437 */
 438static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
 439                struct myrb_rbld_progress *rbld)
 440{
 441        struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
 442        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 443        struct myrb_rbld_progress *rbld_buf;
 444        dma_addr_t rbld_addr;
 445        unsigned short status;
 446
 447        rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
 448                                      sizeof(struct myrb_rbld_progress),
 449                                      &rbld_addr, GFP_KERNEL);
 450        if (!rbld_buf)
 451                return MYRB_STATUS_RBLD_NOT_CHECKED;
 452
 453        myrb_reset_cmd(cmd_blk);
 454        mbox->type3.id = MYRB_MCMD_TAG;
 455        mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
 456        mbox->type3.addr = rbld_addr;
 457        status = myrb_exec_cmd(cb, cmd_blk);
 458        if (rbld)
 459                memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
 460        dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
 461                          rbld_buf, rbld_addr);
 462        return status;
 463}
 464
 465/*
 466 * myrb_update_rbld_progress - updates the rebuild status
 467 *
 468 * Updates the rebuild status for the attached logical devices.
 469 */
 470static void myrb_update_rbld_progress(struct myrb_hba *cb)
 471{
 472        struct myrb_rbld_progress rbld_buf;
 473        unsigned short status;
 474
 475        status = myrb_get_rbld_progress(cb, &rbld_buf);
 476        if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
 477            cb->last_rbld_status == MYRB_STATUS_SUCCESS)
 478                status = MYRB_STATUS_RBLD_SUCCESS;
 479        if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
 480                unsigned int blocks_done =
 481                        rbld_buf.ldev_size - rbld_buf.blocks_left;
 482                struct scsi_device *sdev;
 483
 484                sdev = scsi_device_lookup(cb->host,
 485                                          myrb_logical_channel(cb->host),
 486                                          rbld_buf.ldev_num, 0);
 487                if (!sdev)
 488                        return;
 489
 490                switch (status) {
 491                case MYRB_STATUS_SUCCESS:
 492                        sdev_printk(KERN_INFO, sdev,
 493                                    "Rebuild in Progress, %d%% completed\n",
 494                                    (100 * (blocks_done >> 7))
 495                                    / (rbld_buf.ldev_size >> 7));
 496                        break;
 497                case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
 498                        sdev_printk(KERN_INFO, sdev,
 499                                    "Rebuild Failed due to Logical Drive Failure\n");
 500                        break;
 501                case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
 502                        sdev_printk(KERN_INFO, sdev,
 503                                    "Rebuild Failed due to Bad Blocks on Other Drives\n");
 504                        break;
 505                case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
 506                        sdev_printk(KERN_INFO, sdev,
 507                                    "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
 508                        break;
 509                case MYRB_STATUS_RBLD_SUCCESS:
 510                        sdev_printk(KERN_INFO, sdev,
 511                                    "Rebuild Completed Successfully\n");
 512                        break;
 513                case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
 514                        sdev_printk(KERN_INFO, sdev,
 515                                     "Rebuild Successfully Terminated\n");
 516                        break;
 517                default:
 518                        break;
 519                }
 520                scsi_device_put(sdev);
 521        }
 522        cb->last_rbld_status = status;
 523}
 524
 525/*
 526 * myrb_get_cc_progress - retrieve the rebuild status
 527 *
 528 * Execute a type 3 Command and fetch the rebuild / consistency check
 529 * status.
 530 */
 531static void myrb_get_cc_progress(struct myrb_hba *cb)
 532{
 533        struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
 534        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 535        struct myrb_rbld_progress *rbld_buf;
 536        dma_addr_t rbld_addr;
 537        unsigned short status;
 538
 539        rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
 540                                      sizeof(struct myrb_rbld_progress),
 541                                      &rbld_addr, GFP_KERNEL);
 542        if (!rbld_buf) {
 543                cb->need_cc_status = true;
 544                return;
 545        }
 546        myrb_reset_cmd(cmd_blk);
 547        mbox->type3.id = MYRB_MCMD_TAG;
 548        mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
 549        mbox->type3.addr = rbld_addr;
 550        status = myrb_exec_cmd(cb, cmd_blk);
 551        if (status == MYRB_STATUS_SUCCESS) {
 552                unsigned int ldev_num = rbld_buf->ldev_num;
 553                unsigned int ldev_size = rbld_buf->ldev_size;
 554                unsigned int blocks_done =
 555                        ldev_size - rbld_buf->blocks_left;
 556                struct scsi_device *sdev;
 557
 558                sdev = scsi_device_lookup(cb->host,
 559                                          myrb_logical_channel(cb->host),
 560                                          ldev_num, 0);
 561                if (sdev) {
 562                        sdev_printk(KERN_INFO, sdev,
 563                                    "Consistency Check in Progress: %d%% completed\n",
 564                                    (100 * (blocks_done >> 7))
 565                                    / (ldev_size >> 7));
 566                        scsi_device_put(sdev);
 567                }
 568        }
 569        dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
 570                          rbld_buf, rbld_addr);
 571}
 572
 573/*
 574 * myrb_bgi_control - updates background initialisation status
 575 *
 576 * Executes a type 3B command and updates the background initialisation status
 577 */
 578static void myrb_bgi_control(struct myrb_hba *cb)
 579{
 580        struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
 581        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 582        struct myrb_bgi_status *bgi, *last_bgi;
 583        dma_addr_t bgi_addr;
 584        struct scsi_device *sdev = NULL;
 585        unsigned short status;
 586
 587        bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
 588                                 &bgi_addr, GFP_KERNEL);
 589        if (!bgi) {
 590                shost_printk(KERN_ERR, cb->host,
 591                             "Failed to allocate bgi memory\n");
 592                return;
 593        }
 594        myrb_reset_cmd(cmd_blk);
 595        mbox->type3B.id = MYRB_DCMD_TAG;
 596        mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
 597        mbox->type3B.optype = 0x20;
 598        mbox->type3B.addr = bgi_addr;
 599        status = myrb_exec_cmd(cb, cmd_blk);
 600        last_bgi = &cb->bgi_status;
 601        sdev = scsi_device_lookup(cb->host,
 602                                  myrb_logical_channel(cb->host),
 603                                  bgi->ldev_num, 0);
 604        switch (status) {
 605        case MYRB_STATUS_SUCCESS:
 606                switch (bgi->status) {
 607                case MYRB_BGI_INVALID:
 608                        break;
 609                case MYRB_BGI_STARTED:
 610                        if (!sdev)
 611                                break;
 612                        sdev_printk(KERN_INFO, sdev,
 613                                    "Background Initialization Started\n");
 614                        break;
 615                case MYRB_BGI_INPROGRESS:
 616                        if (!sdev)
 617                                break;
 618                        if (bgi->blocks_done == last_bgi->blocks_done &&
 619                            bgi->ldev_num == last_bgi->ldev_num)
 620                                break;
 621                        sdev_printk(KERN_INFO, sdev,
 622                                 "Background Initialization in Progress: %d%% completed\n",
 623                                 (100 * (bgi->blocks_done >> 7))
 624                                 / (bgi->ldev_size >> 7));
 625                        break;
 626                case MYRB_BGI_SUSPENDED:
 627                        if (!sdev)
 628                                break;
 629                        sdev_printk(KERN_INFO, sdev,
 630                                    "Background Initialization Suspended\n");
 631                        break;
 632                case MYRB_BGI_CANCELLED:
 633                        if (!sdev)
 634                                break;
 635                        sdev_printk(KERN_INFO, sdev,
 636                                    "Background Initialization Cancelled\n");
 637                        break;
 638                }
 639                memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
 640                break;
 641        case MYRB_STATUS_BGI_SUCCESS:
 642                if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
 643                        sdev_printk(KERN_INFO, sdev,
 644                                    "Background Initialization Completed Successfully\n");
 645                cb->bgi_status.status = MYRB_BGI_INVALID;
 646                break;
 647        case MYRB_STATUS_BGI_ABORTED:
 648                if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
 649                        sdev_printk(KERN_INFO, sdev,
 650                                    "Background Initialization Aborted\n");
 651                fallthrough;
 652        case MYRB_STATUS_NO_BGI_INPROGRESS:
 653                cb->bgi_status.status = MYRB_BGI_INVALID;
 654                break;
 655        }
 656        if (sdev)
 657                scsi_device_put(sdev);
 658        dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
 659                          bgi, bgi_addr);
 660}
 661
 662/*
 663 * myrb_hba_enquiry - updates the controller status
 664 *
 665 * Executes a DAC_V1_Enquiry command and updates the controller status.
 666 *
 667 * Return: command status
 668 */
 669static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
 670{
 671        struct myrb_enquiry old, *new;
 672        unsigned short status;
 673
 674        memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
 675
 676        status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
 677        if (status != MYRB_STATUS_SUCCESS)
 678                return status;
 679
 680        new = cb->enquiry;
 681        if (new->ldev_count > old.ldev_count) {
 682                int ldev_num = old.ldev_count - 1;
 683
 684                while (++ldev_num < new->ldev_count)
 685                        shost_printk(KERN_CRIT, cb->host,
 686                                     "Logical Drive %d Now Exists\n",
 687                                     ldev_num);
 688        }
 689        if (new->ldev_count < old.ldev_count) {
 690                int ldev_num = new->ldev_count - 1;
 691
 692                while (++ldev_num < old.ldev_count)
 693                        shost_printk(KERN_CRIT, cb->host,
 694                                     "Logical Drive %d No Longer Exists\n",
 695                                     ldev_num);
 696        }
 697        if (new->status.deferred != old.status.deferred)
 698                shost_printk(KERN_CRIT, cb->host,
 699                             "Deferred Write Error Flag is now %s\n",
 700                             (new->status.deferred ? "TRUE" : "FALSE"));
 701        if (new->ev_seq != old.ev_seq) {
 702                cb->new_ev_seq = new->ev_seq;
 703                cb->need_err_info = true;
 704                shost_printk(KERN_INFO, cb->host,
 705                             "Event log %d/%d (%d/%d) available\n",
 706                             cb->old_ev_seq, cb->new_ev_seq,
 707                             old.ev_seq, new->ev_seq);
 708        }
 709        if ((new->ldev_critical > 0 &&
 710             new->ldev_critical != old.ldev_critical) ||
 711            (new->ldev_offline > 0 &&
 712             new->ldev_offline != old.ldev_offline) ||
 713            (new->ldev_count != old.ldev_count)) {
 714                shost_printk(KERN_INFO, cb->host,
 715                             "Logical drive count changed (%d/%d/%d)\n",
 716                             new->ldev_critical,
 717                             new->ldev_offline,
 718                             new->ldev_count);
 719                cb->need_ldev_info = true;
 720        }
 721        if (new->pdev_dead > 0 ||
 722            new->pdev_dead != old.pdev_dead ||
 723            time_after_eq(jiffies, cb->secondary_monitor_time
 724                          + MYRB_SECONDARY_MONITOR_INTERVAL)) {
 725                cb->need_bgi_status = cb->bgi_status_supported;
 726                cb->secondary_monitor_time = jiffies;
 727        }
 728        if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
 729            new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
 730            old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
 731            old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
 732                cb->need_rbld = true;
 733                cb->rbld_first = (new->ldev_critical < old.ldev_critical);
 734        }
 735        if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
 736                switch (new->rbld) {
 737                case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
 738                        shost_printk(KERN_INFO, cb->host,
 739                                     "Consistency Check Completed Successfully\n");
 740                        break;
 741                case MYRB_STDBY_RBLD_IN_PROGRESS:
 742                case MYRB_BG_RBLD_IN_PROGRESS:
 743                        break;
 744                case MYRB_BG_CHECK_IN_PROGRESS:
 745                        cb->need_cc_status = true;
 746                        break;
 747                case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
 748                        shost_printk(KERN_INFO, cb->host,
 749                                     "Consistency Check Completed with Error\n");
 750                        break;
 751                case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
 752                        shost_printk(KERN_INFO, cb->host,
 753                                     "Consistency Check Failed - Physical Device Failed\n");
 754                        break;
 755                case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
 756                        shost_printk(KERN_INFO, cb->host,
 757                                     "Consistency Check Failed - Logical Drive Failed\n");
 758                        break;
 759                case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
 760                        shost_printk(KERN_INFO, cb->host,
 761                                     "Consistency Check Failed - Other Causes\n");
 762                        break;
 763                case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
 764                        shost_printk(KERN_INFO, cb->host,
 765                                     "Consistency Check Successfully Terminated\n");
 766                        break;
 767                }
 768        else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
 769                cb->need_cc_status = true;
 770
 771        return MYRB_STATUS_SUCCESS;
 772}
 773
 774/*
 775 * myrb_set_pdev_state - sets the device state for a physical device
 776 *
 777 * Return: command status
 778 */
 779static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
 780                struct scsi_device *sdev, enum myrb_devstate state)
 781{
 782        struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
 783        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 784        unsigned short status;
 785
 786        mutex_lock(&cb->dcmd_mutex);
 787        mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
 788        mbox->type3D.id = MYRB_DCMD_TAG;
 789        mbox->type3D.channel = sdev->channel;
 790        mbox->type3D.target = sdev->id;
 791        mbox->type3D.state = state & 0x1F;
 792        status = myrb_exec_cmd(cb, cmd_blk);
 793        mutex_unlock(&cb->dcmd_mutex);
 794
 795        return status;
 796}
 797
 798/*
 799 * myrb_enable_mmio - enables the Memory Mailbox Interface
 800 *
 801 * PD and P controller types have no memory mailbox, but still need the
 802 * other dma mapped memory.
 803 *
 804 * Return: true on success, false otherwise.
 805 */
 806static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
 807{
 808        void __iomem *base = cb->io_base;
 809        struct pci_dev *pdev = cb->pdev;
 810        size_t err_table_size;
 811        size_t ldev_info_size;
 812        union myrb_cmd_mbox *cmd_mbox_mem;
 813        struct myrb_stat_mbox *stat_mbox_mem;
 814        union myrb_cmd_mbox mbox;
 815        unsigned short status;
 816
 817        memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
 818
 819        if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 820                dev_err(&pdev->dev, "DMA mask out of range\n");
 821                return false;
 822        }
 823
 824        cb->enquiry = dma_alloc_coherent(&pdev->dev,
 825                                         sizeof(struct myrb_enquiry),
 826                                         &cb->enquiry_addr, GFP_KERNEL);
 827        if (!cb->enquiry)
 828                return false;
 829
 830        err_table_size = sizeof(struct myrb_error_entry) *
 831                MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
 832        cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
 833                                           &cb->err_table_addr, GFP_KERNEL);
 834        if (!cb->err_table)
 835                return false;
 836
 837        ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
 838        cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
 839                                               &cb->ldev_info_addr, GFP_KERNEL);
 840        if (!cb->ldev_info_buf)
 841                return false;
 842
 843        /*
 844         * Skip mailbox initialisation for PD and P Controllers
 845         */
 846        if (!mmio_init_fn)
 847                return true;
 848
 849        /* These are the base addresses for the command memory mailbox array */
 850        cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
 851        cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
 852                                                cb->cmd_mbox_size,
 853                                                &cb->cmd_mbox_addr,
 854                                                GFP_KERNEL);
 855        if (!cb->first_cmd_mbox)
 856                return false;
 857
 858        cmd_mbox_mem = cb->first_cmd_mbox;
 859        cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
 860        cb->last_cmd_mbox = cmd_mbox_mem;
 861        cb->next_cmd_mbox = cb->first_cmd_mbox;
 862        cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
 863        cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
 864
 865        /* These are the base addresses for the status memory mailbox array */
 866        cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
 867            sizeof(struct myrb_stat_mbox);
 868        cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
 869                                                 cb->stat_mbox_size,
 870                                                 &cb->stat_mbox_addr,
 871                                                 GFP_KERNEL);
 872        if (!cb->first_stat_mbox)
 873                return false;
 874
 875        stat_mbox_mem = cb->first_stat_mbox;
 876        stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
 877        cb->last_stat_mbox = stat_mbox_mem;
 878        cb->next_stat_mbox = cb->first_stat_mbox;
 879
 880        /* Enable the Memory Mailbox Interface. */
 881        cb->dual_mode_interface = true;
 882        mbox.typeX.opcode = 0x2B;
 883        mbox.typeX.id = 0;
 884        mbox.typeX.opcode2 = 0x14;
 885        mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
 886        mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
 887
 888        status = mmio_init_fn(pdev, base, &mbox);
 889        if (status != MYRB_STATUS_SUCCESS) {
 890                cb->dual_mode_interface = false;
 891                mbox.typeX.opcode2 = 0x10;
 892                status = mmio_init_fn(pdev, base, &mbox);
 893                if (status != MYRB_STATUS_SUCCESS) {
 894                        dev_err(&pdev->dev,
 895                                "Failed to enable mailbox, statux %02X\n",
 896                                status);
 897                        return false;
 898                }
 899        }
 900        return true;
 901}
 902
 903/*
 904 * myrb_get_hba_config - reads the configuration information
 905 *
 906 * Reads the configuration information from the controller and
 907 * initializes the controller structure.
 908 *
 909 * Return: 0 on success, errno otherwise
 910 */
 911static int myrb_get_hba_config(struct myrb_hba *cb)
 912{
 913        struct myrb_enquiry2 *enquiry2;
 914        dma_addr_t enquiry2_addr;
 915        struct myrb_config2 *config2;
 916        dma_addr_t config2_addr;
 917        struct Scsi_Host *shost = cb->host;
 918        struct pci_dev *pdev = cb->pdev;
 919        int pchan_max = 0, pchan_cur = 0;
 920        unsigned short status;
 921        int ret = -ENODEV, memsize = 0;
 922
 923        enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
 924                                      &enquiry2_addr, GFP_KERNEL);
 925        if (!enquiry2) {
 926                shost_printk(KERN_ERR, cb->host,
 927                             "Failed to allocate V1 enquiry2 memory\n");
 928                return -ENOMEM;
 929        }
 930        config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
 931                                     &config2_addr, GFP_KERNEL);
 932        if (!config2) {
 933                shost_printk(KERN_ERR, cb->host,
 934                             "Failed to allocate V1 config2 memory\n");
 935                dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
 936                                  enquiry2, enquiry2_addr);
 937                return -ENOMEM;
 938        }
 939        mutex_lock(&cb->dma_mutex);
 940        status = myrb_hba_enquiry(cb);
 941        mutex_unlock(&cb->dma_mutex);
 942        if (status != MYRB_STATUS_SUCCESS) {
 943                shost_printk(KERN_WARNING, cb->host,
 944                             "Failed it issue V1 Enquiry\n");
 945                goto out_free;
 946        }
 947
 948        status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
 949        if (status != MYRB_STATUS_SUCCESS) {
 950                shost_printk(KERN_WARNING, cb->host,
 951                             "Failed to issue V1 Enquiry2\n");
 952                goto out_free;
 953        }
 954
 955        status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
 956        if (status != MYRB_STATUS_SUCCESS) {
 957                shost_printk(KERN_WARNING, cb->host,
 958                             "Failed to issue ReadConfig2\n");
 959                goto out_free;
 960        }
 961
 962        status = myrb_get_ldev_info(cb);
 963        if (status != MYRB_STATUS_SUCCESS) {
 964                shost_printk(KERN_WARNING, cb->host,
 965                             "Failed to get logical drive information\n");
 966                goto out_free;
 967        }
 968
 969        /*
 970         * Initialize the Controller Model Name and Full Model Name fields.
 971         */
 972        switch (enquiry2->hw.sub_model) {
 973        case DAC960_V1_P_PD_PU:
 974                if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
 975                        strcpy(cb->model_name, "DAC960PU");
 976                else
 977                        strcpy(cb->model_name, "DAC960PD");
 978                break;
 979        case DAC960_V1_PL:
 980                strcpy(cb->model_name, "DAC960PL");
 981                break;
 982        case DAC960_V1_PG:
 983                strcpy(cb->model_name, "DAC960PG");
 984                break;
 985        case DAC960_V1_PJ:
 986                strcpy(cb->model_name, "DAC960PJ");
 987                break;
 988        case DAC960_V1_PR:
 989                strcpy(cb->model_name, "DAC960PR");
 990                break;
 991        case DAC960_V1_PT:
 992                strcpy(cb->model_name, "DAC960PT");
 993                break;
 994        case DAC960_V1_PTL0:
 995                strcpy(cb->model_name, "DAC960PTL0");
 996                break;
 997        case DAC960_V1_PRL:
 998                strcpy(cb->model_name, "DAC960PRL");
 999                break;
1000        case DAC960_V1_PTL1:
1001                strcpy(cb->model_name, "DAC960PTL1");
1002                break;
1003        case DAC960_V1_1164P:
1004                strcpy(cb->model_name, "eXtremeRAID 1100");
1005                break;
1006        default:
1007                shost_printk(KERN_WARNING, cb->host,
1008                             "Unknown Model %X\n",
1009                             enquiry2->hw.sub_model);
1010                goto out;
1011        }
1012        /*
1013         * Initialize the Controller Firmware Version field and verify that it
1014         * is a supported firmware version.
1015         * The supported firmware versions are:
1016         *
1017         * DAC1164P                 5.06 and above
1018         * DAC960PTL/PRL/PJ/PG      4.06 and above
1019         * DAC960PU/PD/PL           3.51 and above
1020         * DAC960PU/PD/PL/P         2.73 and above
1021         */
1022#if defined(CONFIG_ALPHA)
1023        /*
1024         * DEC Alpha machines were often equipped with DAC960 cards that were
1025         * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1026         * the last custom FW revision to be released by DEC for these older
1027         * controllers, appears to work quite well with this driver.
1028         *
1029         * Cards tested successfully were several versions each of the PD and
1030         * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1031         * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1032         * back of the board, of:
1033         *
1034         * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
1035         *         or D040349 (3-channel)
1036         * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
1037         *         or D040397 (3-channel)
1038         */
1039# define FIRMWARE_27X   "2.70"
1040#else
1041# define FIRMWARE_27X   "2.73"
1042#endif
1043
1044        if (enquiry2->fw.major_version == 0) {
1045                enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1046                enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1047                enquiry2->fw.firmware_type = '0';
1048                enquiry2->fw.turn_id = 0;
1049        }
1050        snprintf(cb->fw_version, sizeof(cb->fw_version),
1051                "%u.%02u-%c-%02u",
1052                enquiry2->fw.major_version,
1053                enquiry2->fw.minor_version,
1054                enquiry2->fw.firmware_type,
1055                enquiry2->fw.turn_id);
1056        if (!((enquiry2->fw.major_version == 5 &&
1057               enquiry2->fw.minor_version >= 6) ||
1058              (enquiry2->fw.major_version == 4 &&
1059               enquiry2->fw.minor_version >= 6) ||
1060              (enquiry2->fw.major_version == 3 &&
1061               enquiry2->fw.minor_version >= 51) ||
1062              (enquiry2->fw.major_version == 2 &&
1063               strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1064                shost_printk(KERN_WARNING, cb->host,
1065                        "Firmware Version '%s' unsupported\n",
1066                        cb->fw_version);
1067                goto out;
1068        }
1069        /*
1070         * Initialize the Channels, Targets, Memory Size, and SAF-TE
1071         * Enclosure Management Enabled fields.
1072         */
1073        switch (enquiry2->hw.model) {
1074        case MYRB_5_CHANNEL_BOARD:
1075                pchan_max = 5;
1076                break;
1077        case MYRB_3_CHANNEL_BOARD:
1078        case MYRB_3_CHANNEL_ASIC_DAC:
1079                pchan_max = 3;
1080                break;
1081        case MYRB_2_CHANNEL_BOARD:
1082                pchan_max = 2;
1083                break;
1084        default:
1085                pchan_max = enquiry2->cfg_chan;
1086                break;
1087        }
1088        pchan_cur = enquiry2->cur_chan;
1089        if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1090                cb->bus_width = 32;
1091        else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1092                cb->bus_width = 16;
1093        else
1094                cb->bus_width = 8;
1095        cb->ldev_block_size = enquiry2->ldev_block_size;
1096        shost->max_channel = pchan_cur;
1097        shost->max_id = enquiry2->max_targets;
1098        memsize = enquiry2->mem_size >> 20;
1099        cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1100        /*
1101         * Initialize the Controller Queue Depth, Driver Queue Depth,
1102         * Logical Drive Count, Maximum Blocks per Command, Controller
1103         * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1104         * The Driver Queue Depth must be at most one less than the
1105         * Controller Queue Depth to allow for an automatic drive
1106         * rebuild operation.
1107         */
1108        shost->can_queue = cb->enquiry->max_tcq;
1109        if (shost->can_queue < 3)
1110                shost->can_queue = enquiry2->max_cmds;
1111        if (shost->can_queue < 3)
1112                /* Play safe and disable TCQ */
1113                shost->can_queue = 1;
1114
1115        if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1116                shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1117        shost->max_sectors = enquiry2->max_sectors;
1118        shost->sg_tablesize = enquiry2->max_sge;
1119        if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1120                shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1121        /*
1122         * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1123         */
1124        cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1125                >> (10 - MYRB_BLKSIZE_BITS);
1126        cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1127                >> (10 - MYRB_BLKSIZE_BITS);
1128        /* Assume 255/63 translation */
1129        cb->ldev_geom_heads = 255;
1130        cb->ldev_geom_sectors = 63;
1131        if (config2->drive_geometry) {
1132                cb->ldev_geom_heads = 128;
1133                cb->ldev_geom_sectors = 32;
1134        }
1135
1136        /*
1137         * Initialize the Background Initialization Status.
1138         */
1139        if ((cb->fw_version[0] == '4' &&
1140             strcmp(cb->fw_version, "4.08") >= 0) ||
1141            (cb->fw_version[0] == '5' &&
1142             strcmp(cb->fw_version, "5.08") >= 0)) {
1143                cb->bgi_status_supported = true;
1144                myrb_bgi_control(cb);
1145        }
1146        cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1147        ret = 0;
1148
1149out:
1150        shost_printk(KERN_INFO, cb->host,
1151                "Configuring %s PCI RAID Controller\n", cb->model_name);
1152        shost_printk(KERN_INFO, cb->host,
1153                "  Firmware Version: %s, Memory Size: %dMB\n",
1154                cb->fw_version, memsize);
1155        if (cb->io_addr == 0)
1156                shost_printk(KERN_INFO, cb->host,
1157                        "  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1158                        (unsigned long)cb->pci_addr, cb->irq);
1159        else
1160                shost_printk(KERN_INFO, cb->host,
1161                        "  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1162                        (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1163                        cb->irq);
1164        shost_printk(KERN_INFO, cb->host,
1165                "  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1166                cb->host->can_queue, cb->host->max_sectors);
1167        shost_printk(KERN_INFO, cb->host,
1168                     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1169                     cb->host->can_queue, cb->host->sg_tablesize,
1170                     MYRB_SCATTER_GATHER_LIMIT);
1171        shost_printk(KERN_INFO, cb->host,
1172                     "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1173                     cb->stripe_size, cb->segment_size,
1174                     cb->ldev_geom_heads, cb->ldev_geom_sectors,
1175                     cb->safte_enabled ?
1176                     "  SAF-TE Enclosure Management Enabled" : "");
1177        shost_printk(KERN_INFO, cb->host,
1178                     "  Physical: %d/%d channels %d/%d/%d devices\n",
1179                     pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1180                     cb->host->max_id);
1181
1182        shost_printk(KERN_INFO, cb->host,
1183                     "  Logical: 1/1 channels, %d/%d disks\n",
1184                     cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1185
1186out_free:
1187        dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1188                          enquiry2, enquiry2_addr);
1189        dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1190                          config2, config2_addr);
1191
1192        return ret;
1193}
1194
1195/*
1196 * myrb_unmap - unmaps controller structures
1197 */
1198static void myrb_unmap(struct myrb_hba *cb)
1199{
1200        if (cb->ldev_info_buf) {
1201                size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1202                        MYRB_MAX_LDEVS;
1203                dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1204                                  cb->ldev_info_buf, cb->ldev_info_addr);
1205                cb->ldev_info_buf = NULL;
1206        }
1207        if (cb->err_table) {
1208                size_t err_table_size = sizeof(struct myrb_error_entry) *
1209                        MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1210                dma_free_coherent(&cb->pdev->dev, err_table_size,
1211                                  cb->err_table, cb->err_table_addr);
1212                cb->err_table = NULL;
1213        }
1214        if (cb->enquiry) {
1215                dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1216                                  cb->enquiry, cb->enquiry_addr);
1217                cb->enquiry = NULL;
1218        }
1219        if (cb->first_stat_mbox) {
1220                dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1221                                  cb->first_stat_mbox, cb->stat_mbox_addr);
1222                cb->first_stat_mbox = NULL;
1223        }
1224        if (cb->first_cmd_mbox) {
1225                dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1226                                  cb->first_cmd_mbox, cb->cmd_mbox_addr);
1227                cb->first_cmd_mbox = NULL;
1228        }
1229}
1230
1231/*
1232 * myrb_cleanup - cleanup controller structures
1233 */
1234static void myrb_cleanup(struct myrb_hba *cb)
1235{
1236        struct pci_dev *pdev = cb->pdev;
1237
1238        /* Free the memory mailbox, status, and related structures */
1239        myrb_unmap(cb);
1240
1241        if (cb->mmio_base) {
1242                cb->disable_intr(cb->io_base);
1243                iounmap(cb->mmio_base);
1244        }
1245        if (cb->irq)
1246                free_irq(cb->irq, cb);
1247        if (cb->io_addr)
1248                release_region(cb->io_addr, 0x80);
1249        pci_set_drvdata(pdev, NULL);
1250        pci_disable_device(pdev);
1251        scsi_host_put(cb->host);
1252}
1253
1254static int myrb_host_reset(struct scsi_cmnd *scmd)
1255{
1256        struct Scsi_Host *shost = scmd->device->host;
1257        struct myrb_hba *cb = shost_priv(shost);
1258
1259        cb->reset(cb->io_base);
1260        return SUCCESS;
1261}
1262
1263static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1264                struct scsi_cmnd *scmd)
1265{
1266        struct request *rq = scsi_cmd_to_rq(scmd);
1267        struct myrb_hba *cb = shost_priv(shost);
1268        struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1269        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1270        struct myrb_dcdb *dcdb;
1271        dma_addr_t dcdb_addr;
1272        struct scsi_device *sdev = scmd->device;
1273        struct scatterlist *sgl;
1274        unsigned long flags;
1275        int nsge;
1276
1277        myrb_reset_cmd(cmd_blk);
1278        dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1279        if (!dcdb)
1280                return SCSI_MLQUEUE_HOST_BUSY;
1281        nsge = scsi_dma_map(scmd);
1282        if (nsge > 1) {
1283                dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1284                scmd->result = (DID_ERROR << 16);
1285                scmd->scsi_done(scmd);
1286                return 0;
1287        }
1288
1289        mbox->type3.opcode = MYRB_CMD_DCDB;
1290        mbox->type3.id = rq->tag + 3;
1291        mbox->type3.addr = dcdb_addr;
1292        dcdb->channel = sdev->channel;
1293        dcdb->target = sdev->id;
1294        switch (scmd->sc_data_direction) {
1295        case DMA_NONE:
1296                dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1297                break;
1298        case DMA_TO_DEVICE:
1299                dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1300                break;
1301        case DMA_FROM_DEVICE:
1302                dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1303                break;
1304        default:
1305                dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1306                break;
1307        }
1308        dcdb->early_status = false;
1309        if (rq->timeout <= 10)
1310                dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1311        else if (rq->timeout <= 60)
1312                dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1313        else if (rq->timeout <= 600)
1314                dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1315        else
1316                dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1317        dcdb->no_autosense = false;
1318        dcdb->allow_disconnect = true;
1319        sgl = scsi_sglist(scmd);
1320        dcdb->dma_addr = sg_dma_address(sgl);
1321        if (sg_dma_len(sgl) > USHRT_MAX) {
1322                dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1323                dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1324        } else {
1325                dcdb->xfer_len_lo = sg_dma_len(sgl);
1326                dcdb->xfer_len_hi4 = 0;
1327        }
1328        dcdb->cdb_len = scmd->cmd_len;
1329        dcdb->sense_len = sizeof(dcdb->sense);
1330        memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1331
1332        spin_lock_irqsave(&cb->queue_lock, flags);
1333        cb->qcmd(cb, cmd_blk);
1334        spin_unlock_irqrestore(&cb->queue_lock, flags);
1335        return 0;
1336}
1337
1338static void myrb_inquiry(struct myrb_hba *cb,
1339                struct scsi_cmnd *scmd)
1340{
1341        unsigned char inq[36] = {
1342                0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1343                0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1344                0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1345                0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346                0x20, 0x20, 0x20, 0x20,
1347        };
1348
1349        if (cb->bus_width > 16)
1350                inq[7] |= 1 << 6;
1351        if (cb->bus_width > 8)
1352                inq[7] |= 1 << 5;
1353        memcpy(&inq[16], cb->model_name, 16);
1354        memcpy(&inq[32], cb->fw_version, 1);
1355        memcpy(&inq[33], &cb->fw_version[2], 2);
1356        memcpy(&inq[35], &cb->fw_version[7], 1);
1357
1358        scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1359}
1360
1361static void
1362myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1363                struct myrb_ldev_info *ldev_info)
1364{
1365        unsigned char modes[32], *mode_pg;
1366        bool dbd;
1367        size_t mode_len;
1368
1369        dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1370        if (dbd) {
1371                mode_len = 24;
1372                mode_pg = &modes[4];
1373        } else {
1374                mode_len = 32;
1375                mode_pg = &modes[12];
1376        }
1377        memset(modes, 0, sizeof(modes));
1378        modes[0] = mode_len - 1;
1379        if (!dbd) {
1380                unsigned char *block_desc = &modes[4];
1381
1382                modes[3] = 8;
1383                put_unaligned_be32(ldev_info->size, &block_desc[0]);
1384                put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1385        }
1386        mode_pg[0] = 0x08;
1387        mode_pg[1] = 0x12;
1388        if (ldev_info->wb_enabled)
1389                mode_pg[2] |= 0x04;
1390        if (cb->segment_size) {
1391                mode_pg[2] |= 0x08;
1392                put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1393        }
1394
1395        scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1396}
1397
1398static void myrb_request_sense(struct myrb_hba *cb,
1399                struct scsi_cmnd *scmd)
1400{
1401        scsi_build_sense(scmd, 0, NO_SENSE, 0, 0);
1402        scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1403                                 SCSI_SENSE_BUFFERSIZE);
1404}
1405
1406static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1407                struct myrb_ldev_info *ldev_info)
1408{
1409        unsigned char data[8];
1410
1411        dev_dbg(&scmd->device->sdev_gendev,
1412                "Capacity %u, blocksize %u\n",
1413                ldev_info->size, cb->ldev_block_size);
1414        put_unaligned_be32(ldev_info->size - 1, &data[0]);
1415        put_unaligned_be32(cb->ldev_block_size, &data[4]);
1416        scsi_sg_copy_from_buffer(scmd, data, 8);
1417}
1418
1419static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1420                struct scsi_cmnd *scmd)
1421{
1422        struct myrb_hba *cb = shost_priv(shost);
1423        struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1424        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1425        struct myrb_ldev_info *ldev_info;
1426        struct scsi_device *sdev = scmd->device;
1427        struct scatterlist *sgl;
1428        unsigned long flags;
1429        u64 lba;
1430        u32 block_cnt;
1431        int nsge;
1432
1433        ldev_info = sdev->hostdata;
1434        if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1435            ldev_info->state != MYRB_DEVICE_WO) {
1436                dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1437                        sdev->id, ldev_info ? ldev_info->state : 0xff);
1438                scmd->result = (DID_BAD_TARGET << 16);
1439                scmd->scsi_done(scmd);
1440                return 0;
1441        }
1442        switch (scmd->cmnd[0]) {
1443        case TEST_UNIT_READY:
1444                scmd->result = (DID_OK << 16);
1445                scmd->scsi_done(scmd);
1446                return 0;
1447        case INQUIRY:
1448                if (scmd->cmnd[1] & 1) {
1449                        /* Illegal request, invalid field in CDB */
1450                        scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1451                } else {
1452                        myrb_inquiry(cb, scmd);
1453                        scmd->result = (DID_OK << 16);
1454                }
1455                scmd->scsi_done(scmd);
1456                return 0;
1457        case SYNCHRONIZE_CACHE:
1458                scmd->result = (DID_OK << 16);
1459                scmd->scsi_done(scmd);
1460                return 0;
1461        case MODE_SENSE:
1462                if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1463                    (scmd->cmnd[2] & 0x3F) != 0x08) {
1464                        /* Illegal request, invalid field in CDB */
1465                        scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1466                } else {
1467                        myrb_mode_sense(cb, scmd, ldev_info);
1468                        scmd->result = (DID_OK << 16);
1469                }
1470                scmd->scsi_done(scmd);
1471                return 0;
1472        case READ_CAPACITY:
1473                if ((scmd->cmnd[1] & 1) ||
1474                    (scmd->cmnd[8] & 1)) {
1475                        /* Illegal request, invalid field in CDB */
1476                        scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1477                        scmd->scsi_done(scmd);
1478                        return 0;
1479                }
1480                lba = get_unaligned_be32(&scmd->cmnd[2]);
1481                if (lba) {
1482                        /* Illegal request, invalid field in CDB */
1483                        scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1484                        scmd->scsi_done(scmd);
1485                        return 0;
1486                }
1487                myrb_read_capacity(cb, scmd, ldev_info);
1488                scmd->scsi_done(scmd);
1489                return 0;
1490        case REQUEST_SENSE:
1491                myrb_request_sense(cb, scmd);
1492                scmd->result = (DID_OK << 16);
1493                return 0;
1494        case SEND_DIAGNOSTIC:
1495                if (scmd->cmnd[1] != 0x04) {
1496                        /* Illegal request, invalid field in CDB */
1497                        scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1498                } else {
1499                        /* Assume good status */
1500                        scmd->result = (DID_OK << 16);
1501                }
1502                scmd->scsi_done(scmd);
1503                return 0;
1504        case READ_6:
1505                if (ldev_info->state == MYRB_DEVICE_WO) {
1506                        /* Data protect, attempt to read invalid data */
1507                        scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1508                        scmd->scsi_done(scmd);
1509                        return 0;
1510                }
1511                fallthrough;
1512        case WRITE_6:
1513                lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1514                       (scmd->cmnd[2] << 8) |
1515                       scmd->cmnd[3]);
1516                block_cnt = scmd->cmnd[4];
1517                break;
1518        case READ_10:
1519                if (ldev_info->state == MYRB_DEVICE_WO) {
1520                        /* Data protect, attempt to read invalid data */
1521                        scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1522                        scmd->scsi_done(scmd);
1523                        return 0;
1524                }
1525                fallthrough;
1526        case WRITE_10:
1527        case VERIFY:            /* 0x2F */
1528        case WRITE_VERIFY:      /* 0x2E */
1529                lba = get_unaligned_be32(&scmd->cmnd[2]);
1530                block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1531                break;
1532        case READ_12:
1533                if (ldev_info->state == MYRB_DEVICE_WO) {
1534                        /* Data protect, attempt to read invalid data */
1535                        scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1536                        scmd->scsi_done(scmd);
1537                        return 0;
1538                }
1539                fallthrough;
1540        case WRITE_12:
1541        case VERIFY_12: /* 0xAF */
1542        case WRITE_VERIFY_12:   /* 0xAE */
1543                lba = get_unaligned_be32(&scmd->cmnd[2]);
1544                block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1545                break;
1546        default:
1547                /* Illegal request, invalid opcode */
1548                scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
1549                scmd->scsi_done(scmd);
1550                return 0;
1551        }
1552
1553        myrb_reset_cmd(cmd_blk);
1554        mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
1555        if (scmd->sc_data_direction == DMA_NONE)
1556                goto submit;
1557        nsge = scsi_dma_map(scmd);
1558        if (nsge == 1) {
1559                sgl = scsi_sglist(scmd);
1560                if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1561                        mbox->type5.opcode = MYRB_CMD_READ;
1562                else
1563                        mbox->type5.opcode = MYRB_CMD_WRITE;
1564
1565                mbox->type5.ld.xfer_len = block_cnt;
1566                mbox->type5.ld.ldev_num = sdev->id;
1567                mbox->type5.lba = lba;
1568                mbox->type5.addr = (u32)sg_dma_address(sgl);
1569        } else {
1570                struct myrb_sge *hw_sgl;
1571                dma_addr_t hw_sgl_addr;
1572                int i;
1573
1574                hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1575                if (!hw_sgl)
1576                        return SCSI_MLQUEUE_HOST_BUSY;
1577
1578                cmd_blk->sgl = hw_sgl;
1579                cmd_blk->sgl_addr = hw_sgl_addr;
1580
1581                if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1582                        mbox->type5.opcode = MYRB_CMD_READ_SG;
1583                else
1584                        mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1585
1586                mbox->type5.ld.xfer_len = block_cnt;
1587                mbox->type5.ld.ldev_num = sdev->id;
1588                mbox->type5.lba = lba;
1589                mbox->type5.addr = hw_sgl_addr;
1590                mbox->type5.sg_count = nsge;
1591
1592                scsi_for_each_sg(scmd, sgl, nsge, i) {
1593                        hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1594                        hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1595                        hw_sgl++;
1596                }
1597        }
1598submit:
1599        spin_lock_irqsave(&cb->queue_lock, flags);
1600        cb->qcmd(cb, cmd_blk);
1601        spin_unlock_irqrestore(&cb->queue_lock, flags);
1602
1603        return 0;
1604}
1605
1606static int myrb_queuecommand(struct Scsi_Host *shost,
1607                struct scsi_cmnd *scmd)
1608{
1609        struct scsi_device *sdev = scmd->device;
1610
1611        if (sdev->channel > myrb_logical_channel(shost)) {
1612                scmd->result = (DID_BAD_TARGET << 16);
1613                scmd->scsi_done(scmd);
1614                return 0;
1615        }
1616        if (sdev->channel == myrb_logical_channel(shost))
1617                return myrb_ldev_queuecommand(shost, scmd);
1618
1619        return myrb_pthru_queuecommand(shost, scmd);
1620}
1621
1622static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1623{
1624        struct myrb_hba *cb = shost_priv(sdev->host);
1625        struct myrb_ldev_info *ldev_info;
1626        unsigned short ldev_num = sdev->id;
1627        enum raid_level level;
1628
1629        ldev_info = cb->ldev_info_buf + ldev_num;
1630        if (!ldev_info)
1631                return -ENXIO;
1632
1633        sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1634        if (!sdev->hostdata)
1635                return -ENOMEM;
1636        dev_dbg(&sdev->sdev_gendev,
1637                "slave alloc ldev %d state %x\n",
1638                ldev_num, ldev_info->state);
1639        memcpy(sdev->hostdata, ldev_info,
1640               sizeof(*ldev_info));
1641        switch (ldev_info->raid_level) {
1642        case MYRB_RAID_LEVEL0:
1643                level = RAID_LEVEL_LINEAR;
1644                break;
1645        case MYRB_RAID_LEVEL1:
1646                level = RAID_LEVEL_1;
1647                break;
1648        case MYRB_RAID_LEVEL3:
1649                level = RAID_LEVEL_3;
1650                break;
1651        case MYRB_RAID_LEVEL5:
1652                level = RAID_LEVEL_5;
1653                break;
1654        case MYRB_RAID_LEVEL6:
1655                level = RAID_LEVEL_6;
1656                break;
1657        case MYRB_RAID_JBOD:
1658                level = RAID_LEVEL_JBOD;
1659                break;
1660        default:
1661                level = RAID_LEVEL_UNKNOWN;
1662                break;
1663        }
1664        raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1665        return 0;
1666}
1667
1668static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1669{
1670        struct myrb_hba *cb = shost_priv(sdev->host);
1671        struct myrb_pdev_state *pdev_info;
1672        unsigned short status;
1673
1674        if (sdev->id > MYRB_MAX_TARGETS)
1675                return -ENXIO;
1676
1677        pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1678        if (!pdev_info)
1679                return -ENOMEM;
1680
1681        status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1682                                  sdev, pdev_info);
1683        if (status != MYRB_STATUS_SUCCESS) {
1684                dev_dbg(&sdev->sdev_gendev,
1685                        "Failed to get device state, status %x\n",
1686                        status);
1687                kfree(pdev_info);
1688                return -ENXIO;
1689        }
1690        if (!pdev_info->present) {
1691                dev_dbg(&sdev->sdev_gendev,
1692                        "device not present, skip\n");
1693                kfree(pdev_info);
1694                return -ENXIO;
1695        }
1696        dev_dbg(&sdev->sdev_gendev,
1697                "slave alloc pdev %d:%d state %x\n",
1698                sdev->channel, sdev->id, pdev_info->state);
1699        sdev->hostdata = pdev_info;
1700
1701        return 0;
1702}
1703
1704static int myrb_slave_alloc(struct scsi_device *sdev)
1705{
1706        if (sdev->channel > myrb_logical_channel(sdev->host))
1707                return -ENXIO;
1708
1709        if (sdev->lun > 0)
1710                return -ENXIO;
1711
1712        if (sdev->channel == myrb_logical_channel(sdev->host))
1713                return myrb_ldev_slave_alloc(sdev);
1714
1715        return myrb_pdev_slave_alloc(sdev);
1716}
1717
1718static int myrb_slave_configure(struct scsi_device *sdev)
1719{
1720        struct myrb_ldev_info *ldev_info;
1721
1722        if (sdev->channel > myrb_logical_channel(sdev->host))
1723                return -ENXIO;
1724
1725        if (sdev->channel < myrb_logical_channel(sdev->host)) {
1726                sdev->no_uld_attach = 1;
1727                return 0;
1728        }
1729        if (sdev->lun != 0)
1730                return -ENXIO;
1731
1732        ldev_info = sdev->hostdata;
1733        if (!ldev_info)
1734                return -ENXIO;
1735        if (ldev_info->state != MYRB_DEVICE_ONLINE)
1736                sdev_printk(KERN_INFO, sdev,
1737                            "Logical drive is %s\n",
1738                            myrb_devstate_name(ldev_info->state));
1739
1740        sdev->tagged_supported = 1;
1741        return 0;
1742}
1743
1744static void myrb_slave_destroy(struct scsi_device *sdev)
1745{
1746        kfree(sdev->hostdata);
1747}
1748
1749static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1750                sector_t capacity, int geom[])
1751{
1752        struct myrb_hba *cb = shost_priv(sdev->host);
1753
1754        geom[0] = cb->ldev_geom_heads;
1755        geom[1] = cb->ldev_geom_sectors;
1756        geom[2] = sector_div(capacity, geom[0] * geom[1]);
1757
1758        return 0;
1759}
1760
1761static ssize_t raid_state_show(struct device *dev,
1762                struct device_attribute *attr, char *buf)
1763{
1764        struct scsi_device *sdev = to_scsi_device(dev);
1765        struct myrb_hba *cb = shost_priv(sdev->host);
1766        int ret;
1767
1768        if (!sdev->hostdata)
1769                return snprintf(buf, 16, "Unknown\n");
1770
1771        if (sdev->channel == myrb_logical_channel(sdev->host)) {
1772                struct myrb_ldev_info *ldev_info = sdev->hostdata;
1773                const char *name;
1774
1775                name = myrb_devstate_name(ldev_info->state);
1776                if (name)
1777                        ret = snprintf(buf, 32, "%s\n", name);
1778                else
1779                        ret = snprintf(buf, 32, "Invalid (%02X)\n",
1780                                       ldev_info->state);
1781        } else {
1782                struct myrb_pdev_state *pdev_info = sdev->hostdata;
1783                unsigned short status;
1784                const char *name;
1785
1786                status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1787                                          sdev, pdev_info);
1788                if (status != MYRB_STATUS_SUCCESS)
1789                        sdev_printk(KERN_INFO, sdev,
1790                                    "Failed to get device state, status %x\n",
1791                                    status);
1792
1793                if (!pdev_info->present)
1794                        name = "Removed";
1795                else
1796                        name = myrb_devstate_name(pdev_info->state);
1797                if (name)
1798                        ret = snprintf(buf, 32, "%s\n", name);
1799                else
1800                        ret = snprintf(buf, 32, "Invalid (%02X)\n",
1801                                       pdev_info->state);
1802        }
1803        return ret;
1804}
1805
1806static ssize_t raid_state_store(struct device *dev,
1807                struct device_attribute *attr, const char *buf, size_t count)
1808{
1809        struct scsi_device *sdev = to_scsi_device(dev);
1810        struct myrb_hba *cb = shost_priv(sdev->host);
1811        struct myrb_pdev_state *pdev_info;
1812        enum myrb_devstate new_state;
1813        unsigned short status;
1814
1815        if (!strncmp(buf, "kill", 4) ||
1816            !strncmp(buf, "offline", 7))
1817                new_state = MYRB_DEVICE_DEAD;
1818        else if (!strncmp(buf, "online", 6))
1819                new_state = MYRB_DEVICE_ONLINE;
1820        else if (!strncmp(buf, "standby", 7))
1821                new_state = MYRB_DEVICE_STANDBY;
1822        else
1823                return -EINVAL;
1824
1825        pdev_info = sdev->hostdata;
1826        if (!pdev_info) {
1827                sdev_printk(KERN_INFO, sdev,
1828                            "Failed - no physical device information\n");
1829                return -ENXIO;
1830        }
1831        if (!pdev_info->present) {
1832                sdev_printk(KERN_INFO, sdev,
1833                            "Failed - device not present\n");
1834                return -ENXIO;
1835        }
1836
1837        if (pdev_info->state == new_state)
1838                return count;
1839
1840        status = myrb_set_pdev_state(cb, sdev, new_state);
1841        switch (status) {
1842        case MYRB_STATUS_SUCCESS:
1843                break;
1844        case MYRB_STATUS_START_DEVICE_FAILED:
1845                sdev_printk(KERN_INFO, sdev,
1846                             "Failed - Unable to Start Device\n");
1847                count = -EAGAIN;
1848                break;
1849        case MYRB_STATUS_NO_DEVICE:
1850                sdev_printk(KERN_INFO, sdev,
1851                            "Failed - No Device at Address\n");
1852                count = -ENODEV;
1853                break;
1854        case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1855                sdev_printk(KERN_INFO, sdev,
1856                         "Failed - Invalid Channel or Target or Modifier\n");
1857                count = -EINVAL;
1858                break;
1859        case MYRB_STATUS_CHANNEL_BUSY:
1860                sdev_printk(KERN_INFO, sdev,
1861                         "Failed - Channel Busy\n");
1862                count = -EBUSY;
1863                break;
1864        default:
1865                sdev_printk(KERN_INFO, sdev,
1866                         "Failed - Unexpected Status %04X\n", status);
1867                count = -EIO;
1868                break;
1869        }
1870        return count;
1871}
1872static DEVICE_ATTR_RW(raid_state);
1873
1874static ssize_t raid_level_show(struct device *dev,
1875                struct device_attribute *attr, char *buf)
1876{
1877        struct scsi_device *sdev = to_scsi_device(dev);
1878
1879        if (sdev->channel == myrb_logical_channel(sdev->host)) {
1880                struct myrb_ldev_info *ldev_info = sdev->hostdata;
1881                const char *name;
1882
1883                if (!ldev_info)
1884                        return -ENXIO;
1885
1886                name = myrb_raidlevel_name(ldev_info->raid_level);
1887                if (!name)
1888                        return snprintf(buf, 32, "Invalid (%02X)\n",
1889                                        ldev_info->state);
1890                return snprintf(buf, 32, "%s\n", name);
1891        }
1892        return snprintf(buf, 32, "Physical Drive\n");
1893}
1894static DEVICE_ATTR_RO(raid_level);
1895
1896static ssize_t rebuild_show(struct device *dev,
1897                struct device_attribute *attr, char *buf)
1898{
1899        struct scsi_device *sdev = to_scsi_device(dev);
1900        struct myrb_hba *cb = shost_priv(sdev->host);
1901        struct myrb_rbld_progress rbld_buf;
1902        unsigned char status;
1903
1904        if (sdev->channel < myrb_logical_channel(sdev->host))
1905                return snprintf(buf, 32, "physical device - not rebuilding\n");
1906
1907        status = myrb_get_rbld_progress(cb, &rbld_buf);
1908
1909        if (rbld_buf.ldev_num != sdev->id ||
1910            status != MYRB_STATUS_SUCCESS)
1911                return snprintf(buf, 32, "not rebuilding\n");
1912
1913        return snprintf(buf, 32, "rebuilding block %u of %u\n",
1914                        rbld_buf.ldev_size - rbld_buf.blocks_left,
1915                        rbld_buf.ldev_size);
1916}
1917
1918static ssize_t rebuild_store(struct device *dev,
1919                struct device_attribute *attr, const char *buf, size_t count)
1920{
1921        struct scsi_device *sdev = to_scsi_device(dev);
1922        struct myrb_hba *cb = shost_priv(sdev->host);
1923        struct myrb_cmdblk *cmd_blk;
1924        union myrb_cmd_mbox *mbox;
1925        unsigned short status;
1926        int rc, start;
1927        const char *msg;
1928
1929        rc = kstrtoint(buf, 0, &start);
1930        if (rc)
1931                return rc;
1932
1933        if (sdev->channel >= myrb_logical_channel(sdev->host))
1934                return -ENXIO;
1935
1936        status = myrb_get_rbld_progress(cb, NULL);
1937        if (start) {
1938                if (status == MYRB_STATUS_SUCCESS) {
1939                        sdev_printk(KERN_INFO, sdev,
1940                                    "Rebuild Not Initiated; already in progress\n");
1941                        return -EALREADY;
1942                }
1943                mutex_lock(&cb->dcmd_mutex);
1944                cmd_blk = &cb->dcmd_blk;
1945                myrb_reset_cmd(cmd_blk);
1946                mbox = &cmd_blk->mbox;
1947                mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1948                mbox->type3D.id = MYRB_DCMD_TAG;
1949                mbox->type3D.channel = sdev->channel;
1950                mbox->type3D.target = sdev->id;
1951                status = myrb_exec_cmd(cb, cmd_blk);
1952                mutex_unlock(&cb->dcmd_mutex);
1953        } else {
1954                struct pci_dev *pdev = cb->pdev;
1955                unsigned char *rate;
1956                dma_addr_t rate_addr;
1957
1958                if (status != MYRB_STATUS_SUCCESS) {
1959                        sdev_printk(KERN_INFO, sdev,
1960                                    "Rebuild Not Cancelled; not in progress\n");
1961                        return 0;
1962                }
1963
1964                rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1965                                          &rate_addr, GFP_KERNEL);
1966                if (rate == NULL) {
1967                        sdev_printk(KERN_INFO, sdev,
1968                                    "Cancellation of Rebuild Failed - Out of Memory\n");
1969                        return -ENOMEM;
1970                }
1971                mutex_lock(&cb->dcmd_mutex);
1972                cmd_blk = &cb->dcmd_blk;
1973                myrb_reset_cmd(cmd_blk);
1974                mbox = &cmd_blk->mbox;
1975                mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
1976                mbox->type3R.id = MYRB_DCMD_TAG;
1977                mbox->type3R.rbld_rate = 0xFF;
1978                mbox->type3R.addr = rate_addr;
1979                status = myrb_exec_cmd(cb, cmd_blk);
1980                dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
1981                mutex_unlock(&cb->dcmd_mutex);
1982        }
1983        if (status == MYRB_STATUS_SUCCESS) {
1984                sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1985                            start ? "Initiated" : "Cancelled");
1986                return count;
1987        }
1988        if (!start) {
1989                sdev_printk(KERN_INFO, sdev,
1990                            "Rebuild Not Cancelled, status 0x%x\n",
1991                            status);
1992                return -EIO;
1993        }
1994
1995        switch (status) {
1996        case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
1997                msg = "Attempt to Rebuild Online or Unresponsive Drive";
1998                break;
1999        case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2000                msg = "New Disk Failed During Rebuild";
2001                break;
2002        case MYRB_STATUS_INVALID_ADDRESS:
2003                msg = "Invalid Device Address";
2004                break;
2005        case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2006                msg = "Already in Progress";
2007                break;
2008        default:
2009                msg = NULL;
2010                break;
2011        }
2012        if (msg)
2013                sdev_printk(KERN_INFO, sdev,
2014                            "Rebuild Failed - %s\n", msg);
2015        else
2016                sdev_printk(KERN_INFO, sdev,
2017                            "Rebuild Failed, status 0x%x\n", status);
2018
2019        return -EIO;
2020}
2021static DEVICE_ATTR_RW(rebuild);
2022
2023static ssize_t consistency_check_store(struct device *dev,
2024                struct device_attribute *attr, const char *buf, size_t count)
2025{
2026        struct scsi_device *sdev = to_scsi_device(dev);
2027        struct myrb_hba *cb = shost_priv(sdev->host);
2028        struct myrb_rbld_progress rbld_buf;
2029        struct myrb_cmdblk *cmd_blk;
2030        union myrb_cmd_mbox *mbox;
2031        unsigned short ldev_num = 0xFFFF;
2032        unsigned short status;
2033        int rc, start;
2034        const char *msg;
2035
2036        rc = kstrtoint(buf, 0, &start);
2037        if (rc)
2038                return rc;
2039
2040        if (sdev->channel < myrb_logical_channel(sdev->host))
2041                return -ENXIO;
2042
2043        status = myrb_get_rbld_progress(cb, &rbld_buf);
2044        if (start) {
2045                if (status == MYRB_STATUS_SUCCESS) {
2046                        sdev_printk(KERN_INFO, sdev,
2047                                    "Check Consistency Not Initiated; already in progress\n");
2048                        return -EALREADY;
2049                }
2050                mutex_lock(&cb->dcmd_mutex);
2051                cmd_blk = &cb->dcmd_blk;
2052                myrb_reset_cmd(cmd_blk);
2053                mbox = &cmd_blk->mbox;
2054                mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2055                mbox->type3C.id = MYRB_DCMD_TAG;
2056                mbox->type3C.ldev_num = sdev->id;
2057                mbox->type3C.auto_restore = true;
2058
2059                status = myrb_exec_cmd(cb, cmd_blk);
2060                mutex_unlock(&cb->dcmd_mutex);
2061        } else {
2062                struct pci_dev *pdev = cb->pdev;
2063                unsigned char *rate;
2064                dma_addr_t rate_addr;
2065
2066                if (ldev_num != sdev->id) {
2067                        sdev_printk(KERN_INFO, sdev,
2068                                    "Check Consistency Not Cancelled; not in progress\n");
2069                        return 0;
2070                }
2071                rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2072                                          &rate_addr, GFP_KERNEL);
2073                if (rate == NULL) {
2074                        sdev_printk(KERN_INFO, sdev,
2075                                    "Cancellation of Check Consistency Failed - Out of Memory\n");
2076                        return -ENOMEM;
2077                }
2078                mutex_lock(&cb->dcmd_mutex);
2079                cmd_blk = &cb->dcmd_blk;
2080                myrb_reset_cmd(cmd_blk);
2081                mbox = &cmd_blk->mbox;
2082                mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2083                mbox->type3R.id = MYRB_DCMD_TAG;
2084                mbox->type3R.rbld_rate = 0xFF;
2085                mbox->type3R.addr = rate_addr;
2086                status = myrb_exec_cmd(cb, cmd_blk);
2087                dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2088                mutex_unlock(&cb->dcmd_mutex);
2089        }
2090        if (status == MYRB_STATUS_SUCCESS) {
2091                sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2092                            start ? "Initiated" : "Cancelled");
2093                return count;
2094        }
2095        if (!start) {
2096                sdev_printk(KERN_INFO, sdev,
2097                            "Check Consistency Not Cancelled, status 0x%x\n",
2098                            status);
2099                return -EIO;
2100        }
2101
2102        switch (status) {
2103        case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2104                msg = "Dependent Physical Device is DEAD";
2105                break;
2106        case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2107                msg = "New Disk Failed During Rebuild";
2108                break;
2109        case MYRB_STATUS_INVALID_ADDRESS:
2110                msg = "Invalid or Nonredundant Logical Drive";
2111                break;
2112        case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2113                msg = "Already in Progress";
2114                break;
2115        default:
2116                msg = NULL;
2117                break;
2118        }
2119        if (msg)
2120                sdev_printk(KERN_INFO, sdev,
2121                            "Check Consistency Failed - %s\n", msg);
2122        else
2123                sdev_printk(KERN_INFO, sdev,
2124                            "Check Consistency Failed, status 0x%x\n", status);
2125
2126        return -EIO;
2127}
2128
2129static ssize_t consistency_check_show(struct device *dev,
2130                struct device_attribute *attr, char *buf)
2131{
2132        return rebuild_show(dev, attr, buf);
2133}
2134static DEVICE_ATTR_RW(consistency_check);
2135
2136static ssize_t ctlr_num_show(struct device *dev,
2137                struct device_attribute *attr, char *buf)
2138{
2139        struct Scsi_Host *shost = class_to_shost(dev);
2140        struct myrb_hba *cb = shost_priv(shost);
2141
2142        return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2143}
2144static DEVICE_ATTR_RO(ctlr_num);
2145
2146static ssize_t firmware_show(struct device *dev,
2147                struct device_attribute *attr, char *buf)
2148{
2149        struct Scsi_Host *shost = class_to_shost(dev);
2150        struct myrb_hba *cb = shost_priv(shost);
2151
2152        return snprintf(buf, 16, "%s\n", cb->fw_version);
2153}
2154static DEVICE_ATTR_RO(firmware);
2155
2156static ssize_t model_show(struct device *dev,
2157                struct device_attribute *attr, char *buf)
2158{
2159        struct Scsi_Host *shost = class_to_shost(dev);
2160        struct myrb_hba *cb = shost_priv(shost);
2161
2162        return snprintf(buf, 16, "%s\n", cb->model_name);
2163}
2164static DEVICE_ATTR_RO(model);
2165
2166static ssize_t flush_cache_store(struct device *dev,
2167                struct device_attribute *attr, const char *buf, size_t count)
2168{
2169        struct Scsi_Host *shost = class_to_shost(dev);
2170        struct myrb_hba *cb = shost_priv(shost);
2171        unsigned short status;
2172
2173        status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2174        if (status == MYRB_STATUS_SUCCESS) {
2175                shost_printk(KERN_INFO, shost,
2176                             "Cache Flush Completed\n");
2177                return count;
2178        }
2179        shost_printk(KERN_INFO, shost,
2180                     "Cache Flush Failed, status %x\n", status);
2181        return -EIO;
2182}
2183static DEVICE_ATTR_WO(flush_cache);
2184
2185static struct device_attribute *myrb_sdev_attrs[] = {
2186        &dev_attr_rebuild,
2187        &dev_attr_consistency_check,
2188        &dev_attr_raid_state,
2189        &dev_attr_raid_level,
2190        NULL,
2191};
2192
2193static struct device_attribute *myrb_shost_attrs[] = {
2194        &dev_attr_ctlr_num,
2195        &dev_attr_model,
2196        &dev_attr_firmware,
2197        &dev_attr_flush_cache,
2198        NULL,
2199};
2200
2201static struct scsi_host_template myrb_template = {
2202        .module                 = THIS_MODULE,
2203        .name                   = "DAC960",
2204        .proc_name              = "myrb",
2205        .queuecommand           = myrb_queuecommand,
2206        .eh_host_reset_handler  = myrb_host_reset,
2207        .slave_alloc            = myrb_slave_alloc,
2208        .slave_configure        = myrb_slave_configure,
2209        .slave_destroy          = myrb_slave_destroy,
2210        .bios_param             = myrb_biosparam,
2211        .cmd_size               = sizeof(struct myrb_cmdblk),
2212        .shost_attrs            = myrb_shost_attrs,
2213        .sdev_attrs             = myrb_sdev_attrs,
2214        .this_id                = -1,
2215};
2216
2217/**
2218 * myrb_is_raid - return boolean indicating device is raid volume
2219 * @dev: the device struct object
2220 */
2221static int myrb_is_raid(struct device *dev)
2222{
2223        struct scsi_device *sdev = to_scsi_device(dev);
2224
2225        return sdev->channel == myrb_logical_channel(sdev->host);
2226}
2227
2228/**
2229 * myrb_get_resync - get raid volume resync percent complete
2230 * @dev: the device struct object
2231 */
2232static void myrb_get_resync(struct device *dev)
2233{
2234        struct scsi_device *sdev = to_scsi_device(dev);
2235        struct myrb_hba *cb = shost_priv(sdev->host);
2236        struct myrb_rbld_progress rbld_buf;
2237        unsigned int percent_complete = 0;
2238        unsigned short status;
2239        unsigned int ldev_size = 0, remaining = 0;
2240
2241        if (sdev->channel < myrb_logical_channel(sdev->host))
2242                return;
2243        status = myrb_get_rbld_progress(cb, &rbld_buf);
2244        if (status == MYRB_STATUS_SUCCESS) {
2245                if (rbld_buf.ldev_num == sdev->id) {
2246                        ldev_size = rbld_buf.ldev_size;
2247                        remaining = rbld_buf.blocks_left;
2248                }
2249        }
2250        if (remaining && ldev_size)
2251                percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2252        raid_set_resync(myrb_raid_template, dev, percent_complete);
2253}
2254
2255/**
2256 * myrb_get_state - get raid volume status
2257 * @dev: the device struct object
2258 */
2259static void myrb_get_state(struct device *dev)
2260{
2261        struct scsi_device *sdev = to_scsi_device(dev);
2262        struct myrb_hba *cb = shost_priv(sdev->host);
2263        struct myrb_ldev_info *ldev_info = sdev->hostdata;
2264        enum raid_state state = RAID_STATE_UNKNOWN;
2265        unsigned short status;
2266
2267        if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2268                state = RAID_STATE_UNKNOWN;
2269        else {
2270                status = myrb_get_rbld_progress(cb, NULL);
2271                if (status == MYRB_STATUS_SUCCESS)
2272                        state = RAID_STATE_RESYNCING;
2273                else {
2274                        switch (ldev_info->state) {
2275                        case MYRB_DEVICE_ONLINE:
2276                                state = RAID_STATE_ACTIVE;
2277                                break;
2278                        case MYRB_DEVICE_WO:
2279                        case MYRB_DEVICE_CRITICAL:
2280                                state = RAID_STATE_DEGRADED;
2281                                break;
2282                        default:
2283                                state = RAID_STATE_OFFLINE;
2284                        }
2285                }
2286        }
2287        raid_set_state(myrb_raid_template, dev, state);
2288}
2289
2290static struct raid_function_template myrb_raid_functions = {
2291        .cookie         = &myrb_template,
2292        .is_raid        = myrb_is_raid,
2293        .get_resync     = myrb_get_resync,
2294        .get_state      = myrb_get_state,
2295};
2296
2297static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2298                struct scsi_cmnd *scmd)
2299{
2300        unsigned short status;
2301
2302        if (!cmd_blk)
2303                return;
2304
2305        scsi_dma_unmap(scmd);
2306
2307        if (cmd_blk->dcdb) {
2308                memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2309                dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2310                              cmd_blk->dcdb_addr);
2311                cmd_blk->dcdb = NULL;
2312        }
2313        if (cmd_blk->sgl) {
2314                dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2315                cmd_blk->sgl = NULL;
2316                cmd_blk->sgl_addr = 0;
2317        }
2318        status = cmd_blk->status;
2319        switch (status) {
2320        case MYRB_STATUS_SUCCESS:
2321        case MYRB_STATUS_DEVICE_BUSY:
2322                scmd->result = (DID_OK << 16) | status;
2323                break;
2324        case MYRB_STATUS_BAD_DATA:
2325                dev_dbg(&scmd->device->sdev_gendev,
2326                        "Bad Data Encountered\n");
2327                if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2328                        /* Unrecovered read error */
2329                        scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0);
2330                else
2331                        /* Write error */
2332                        scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0);
2333                break;
2334        case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2335                scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2336                if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2337                        /* Unrecovered read error, auto-reallocation failed */
2338                        scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04);
2339                else
2340                        /* Write error, auto-reallocation failed */
2341                        scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02);
2342                break;
2343        case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2344                dev_dbg(&scmd->device->sdev_gendev,
2345                            "Logical Drive Nonexistent or Offline");
2346                scmd->result = (DID_BAD_TARGET << 16);
2347                break;
2348        case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2349                dev_dbg(&scmd->device->sdev_gendev,
2350                            "Attempt to Access Beyond End of Logical Drive");
2351                /* Logical block address out of range */
2352                scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0);
2353                break;
2354        case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2355                dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2356                scmd->result = (DID_BAD_TARGET << 16);
2357                break;
2358        default:
2359                scmd_printk(KERN_ERR, scmd,
2360                            "Unexpected Error Status %04X", status);
2361                scmd->result = (DID_ERROR << 16);
2362                break;
2363        }
2364        scmd->scsi_done(scmd);
2365}
2366
2367static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2368{
2369        if (!cmd_blk)
2370                return;
2371
2372        if (cmd_blk->completion) {
2373                complete(cmd_blk->completion);
2374                cmd_blk->completion = NULL;
2375        }
2376}
2377
2378static void myrb_monitor(struct work_struct *work)
2379{
2380        struct myrb_hba *cb = container_of(work,
2381                        struct myrb_hba, monitor_work.work);
2382        struct Scsi_Host *shost = cb->host;
2383        unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2384
2385        dev_dbg(&shost->shost_gendev, "monitor tick\n");
2386
2387        if (cb->new_ev_seq > cb->old_ev_seq) {
2388                int event = cb->old_ev_seq;
2389
2390                dev_dbg(&shost->shost_gendev,
2391                        "get event log no %d/%d\n",
2392                        cb->new_ev_seq, event);
2393                myrb_get_event(cb, event);
2394                cb->old_ev_seq = event + 1;
2395                interval = 10;
2396        } else if (cb->need_err_info) {
2397                cb->need_err_info = false;
2398                dev_dbg(&shost->shost_gendev, "get error table\n");
2399                myrb_get_errtable(cb);
2400                interval = 10;
2401        } else if (cb->need_rbld && cb->rbld_first) {
2402                cb->need_rbld = false;
2403                dev_dbg(&shost->shost_gendev,
2404                        "get rebuild progress\n");
2405                myrb_update_rbld_progress(cb);
2406                interval = 10;
2407        } else if (cb->need_ldev_info) {
2408                cb->need_ldev_info = false;
2409                dev_dbg(&shost->shost_gendev,
2410                        "get logical drive info\n");
2411                myrb_get_ldev_info(cb);
2412                interval = 10;
2413        } else if (cb->need_rbld) {
2414                cb->need_rbld = false;
2415                dev_dbg(&shost->shost_gendev,
2416                        "get rebuild progress\n");
2417                myrb_update_rbld_progress(cb);
2418                interval = 10;
2419        } else if (cb->need_cc_status) {
2420                cb->need_cc_status = false;
2421                dev_dbg(&shost->shost_gendev,
2422                        "get consistency check progress\n");
2423                myrb_get_cc_progress(cb);
2424                interval = 10;
2425        } else if (cb->need_bgi_status) {
2426                cb->need_bgi_status = false;
2427                dev_dbg(&shost->shost_gendev, "get background init status\n");
2428                myrb_bgi_control(cb);
2429                interval = 10;
2430        } else {
2431                dev_dbg(&shost->shost_gendev, "new enquiry\n");
2432                mutex_lock(&cb->dma_mutex);
2433                myrb_hba_enquiry(cb);
2434                mutex_unlock(&cb->dma_mutex);
2435                if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2436                    cb->need_err_info || cb->need_rbld ||
2437                    cb->need_ldev_info || cb->need_cc_status ||
2438                    cb->need_bgi_status) {
2439                        dev_dbg(&shost->shost_gendev,
2440                                "reschedule monitor\n");
2441                        interval = 0;
2442                }
2443        }
2444        if (interval > 1)
2445                cb->primary_monitor_time = jiffies;
2446        queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2447}
2448
2449/*
2450 * myrb_err_status - reports controller BIOS messages
2451 *
2452 * Controller BIOS messages are passed through the Error Status Register
2453 * when the driver performs the BIOS handshaking.
2454 *
2455 * Return: true for fatal errors and false otherwise.
2456 */
2457static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2458                unsigned char parm0, unsigned char parm1)
2459{
2460        struct pci_dev *pdev = cb->pdev;
2461
2462        switch (error) {
2463        case 0x00:
2464                dev_info(&pdev->dev,
2465                         "Physical Device %d:%d Not Responding\n",
2466                         parm1, parm0);
2467                break;
2468        case 0x08:
2469                dev_notice(&pdev->dev, "Spinning Up Drives\n");
2470                break;
2471        case 0x30:
2472                dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2473                break;
2474        case 0x60:
2475                dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2476                break;
2477        case 0x70:
2478                dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2479                break;
2480        case 0x90:
2481                dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2482                           parm1, parm0);
2483                break;
2484        case 0xA0:
2485                dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2486                break;
2487        case 0xB0:
2488                dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2489                break;
2490        case 0xD0:
2491                dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2492                break;
2493        case 0xF0:
2494                dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2495                return true;
2496        default:
2497                dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2498                        error);
2499                return true;
2500        }
2501        return false;
2502}
2503
2504/*
2505 * Hardware-specific functions
2506 */
2507
2508/*
2509 * DAC960 LA Series Controllers
2510 */
2511
2512static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2513{
2514        writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2515}
2516
2517static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2518{
2519        writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2520}
2521
2522static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2523{
2524        writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2525}
2526
2527static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2528{
2529        writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2530}
2531
2532static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2533{
2534        unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2535
2536        return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2537}
2538
2539static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2540{
2541        unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2542
2543        return !(idb & DAC960_LA_IDB_INIT_DONE);
2544}
2545
2546static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2547{
2548        writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2549}
2550
2551static inline void DAC960_LA_ack_intr(void __iomem *base)
2552{
2553        writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2554               base + DAC960_LA_ODB_OFFSET);
2555}
2556
2557static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2558{
2559        unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2560
2561        return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2562}
2563
2564static inline void DAC960_LA_enable_intr(void __iomem *base)
2565{
2566        unsigned char odb = 0xFF;
2567
2568        odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2569        writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2570}
2571
2572static inline void DAC960_LA_disable_intr(void __iomem *base)
2573{
2574        unsigned char odb = 0xFF;
2575
2576        odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2577        writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2578}
2579
2580static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2581                union myrb_cmd_mbox *mbox)
2582{
2583        mem_mbox->words[1] = mbox->words[1];
2584        mem_mbox->words[2] = mbox->words[2];
2585        mem_mbox->words[3] = mbox->words[3];
2586        /* Memory barrier to prevent reordering */
2587        wmb();
2588        mem_mbox->words[0] = mbox->words[0];
2589        /* Memory barrier to force PCI access */
2590        mb();
2591}
2592
2593static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2594                union myrb_cmd_mbox *mbox)
2595{
2596        writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2597        writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2598        writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2599        writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2600}
2601
2602static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2603{
2604        return readw(base + DAC960_LA_STS_OFFSET);
2605}
2606
2607static inline bool
2608DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2609                unsigned char *param0, unsigned char *param1)
2610{
2611        unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2612
2613        if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2614                return false;
2615        errsts &= ~DAC960_LA_ERRSTS_PENDING;
2616
2617        *error = errsts;
2618        *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2619        *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2620        writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2621        return true;
2622}
2623
2624static inline unsigned short
2625DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2626                union myrb_cmd_mbox *mbox)
2627{
2628        unsigned short status;
2629        int timeout = 0;
2630
2631        while (timeout < MYRB_MAILBOX_TIMEOUT) {
2632                if (!DAC960_LA_hw_mbox_is_full(base))
2633                        break;
2634                udelay(10);
2635                timeout++;
2636        }
2637        if (DAC960_LA_hw_mbox_is_full(base)) {
2638                dev_err(&pdev->dev,
2639                        "Timeout waiting for empty mailbox\n");
2640                return MYRB_STATUS_SUBSYS_TIMEOUT;
2641        }
2642        DAC960_LA_write_hw_mbox(base, mbox);
2643        DAC960_LA_hw_mbox_new_cmd(base);
2644        timeout = 0;
2645        while (timeout < MYRB_MAILBOX_TIMEOUT) {
2646                if (DAC960_LA_hw_mbox_status_available(base))
2647                        break;
2648                udelay(10);
2649                timeout++;
2650        }
2651        if (!DAC960_LA_hw_mbox_status_available(base)) {
2652                dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2653                return MYRB_STATUS_SUBSYS_TIMEOUT;
2654        }
2655        status = DAC960_LA_read_status(base);
2656        DAC960_LA_ack_hw_mbox_intr(base);
2657        DAC960_LA_ack_hw_mbox_status(base);
2658
2659        return status;
2660}
2661
2662static int DAC960_LA_hw_init(struct pci_dev *pdev,
2663                struct myrb_hba *cb, void __iomem *base)
2664{
2665        int timeout = 0;
2666        unsigned char error, parm0, parm1;
2667
2668        DAC960_LA_disable_intr(base);
2669        DAC960_LA_ack_hw_mbox_status(base);
2670        udelay(1000);
2671        while (DAC960_LA_init_in_progress(base) &&
2672               timeout < MYRB_MAILBOX_TIMEOUT) {
2673                if (DAC960_LA_read_error_status(base, &error,
2674                                              &parm0, &parm1) &&
2675                    myrb_err_status(cb, error, parm0, parm1))
2676                        return -ENODEV;
2677                udelay(10);
2678                timeout++;
2679        }
2680        if (timeout == MYRB_MAILBOX_TIMEOUT) {
2681                dev_err(&pdev->dev,
2682                        "Timeout waiting for Controller Initialisation\n");
2683                return -ETIMEDOUT;
2684        }
2685        if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2686                dev_err(&pdev->dev,
2687                        "Unable to Enable Memory Mailbox Interface\n");
2688                DAC960_LA_reset_ctrl(base);
2689                return -ENODEV;
2690        }
2691        DAC960_LA_enable_intr(base);
2692        cb->qcmd = myrb_qcmd;
2693        cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2694        if (cb->dual_mode_interface)
2695                cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2696        else
2697                cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2698        cb->disable_intr = DAC960_LA_disable_intr;
2699        cb->reset = DAC960_LA_reset_ctrl;
2700
2701        return 0;
2702}
2703
2704static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2705{
2706        struct myrb_hba *cb = arg;
2707        void __iomem *base = cb->io_base;
2708        struct myrb_stat_mbox *next_stat_mbox;
2709        unsigned long flags;
2710
2711        spin_lock_irqsave(&cb->queue_lock, flags);
2712        DAC960_LA_ack_intr(base);
2713        next_stat_mbox = cb->next_stat_mbox;
2714        while (next_stat_mbox->valid) {
2715                unsigned char id = next_stat_mbox->id;
2716                struct scsi_cmnd *scmd = NULL;
2717                struct myrb_cmdblk *cmd_blk = NULL;
2718
2719                if (id == MYRB_DCMD_TAG)
2720                        cmd_blk = &cb->dcmd_blk;
2721                else if (id == MYRB_MCMD_TAG)
2722                        cmd_blk = &cb->mcmd_blk;
2723                else {
2724                        scmd = scsi_host_find_tag(cb->host, id - 3);
2725                        if (scmd)
2726                                cmd_blk = scsi_cmd_priv(scmd);
2727                }
2728                if (cmd_blk)
2729                        cmd_blk->status = next_stat_mbox->status;
2730                else
2731                        dev_err(&cb->pdev->dev,
2732                                "Unhandled command completion %d\n", id);
2733
2734                memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2735                if (++next_stat_mbox > cb->last_stat_mbox)
2736                        next_stat_mbox = cb->first_stat_mbox;
2737
2738                if (cmd_blk) {
2739                        if (id < 3)
2740                                myrb_handle_cmdblk(cb, cmd_blk);
2741                        else
2742                                myrb_handle_scsi(cb, cmd_blk, scmd);
2743                }
2744        }
2745        cb->next_stat_mbox = next_stat_mbox;
2746        spin_unlock_irqrestore(&cb->queue_lock, flags);
2747        return IRQ_HANDLED;
2748}
2749
2750static struct myrb_privdata DAC960_LA_privdata = {
2751        .hw_init =      DAC960_LA_hw_init,
2752        .irq_handler =  DAC960_LA_intr_handler,
2753        .mmio_size =    DAC960_LA_mmio_size,
2754};
2755
2756/*
2757 * DAC960 PG Series Controllers
2758 */
2759static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2760{
2761        writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2762}
2763
2764static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2765{
2766        writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2767}
2768
2769static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2770{
2771        writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2772}
2773
2774static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2775{
2776        writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2777}
2778
2779static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2780{
2781        unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2782
2783        return idb & DAC960_PG_IDB_HWMBOX_FULL;
2784}
2785
2786static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2787{
2788        unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2789
2790        return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2791}
2792
2793static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2794{
2795        writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2796}
2797
2798static inline void DAC960_PG_ack_intr(void __iomem *base)
2799{
2800        writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2801               base + DAC960_PG_ODB_OFFSET);
2802}
2803
2804static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2805{
2806        unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2807
2808        return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2809}
2810
2811static inline void DAC960_PG_enable_intr(void __iomem *base)
2812{
2813        unsigned int imask = (unsigned int)-1;
2814
2815        imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2816        writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2817}
2818
2819static inline void DAC960_PG_disable_intr(void __iomem *base)
2820{
2821        unsigned int imask = (unsigned int)-1;
2822
2823        writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2824}
2825
2826static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2827                union myrb_cmd_mbox *mbox)
2828{
2829        mem_mbox->words[1] = mbox->words[1];
2830        mem_mbox->words[2] = mbox->words[2];
2831        mem_mbox->words[3] = mbox->words[3];
2832        /* Memory barrier to prevent reordering */
2833        wmb();
2834        mem_mbox->words[0] = mbox->words[0];
2835        /* Memory barrier to force PCI access */
2836        mb();
2837}
2838
2839static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2840                union myrb_cmd_mbox *mbox)
2841{
2842        writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2843        writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2844        writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2845        writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2846}
2847
2848static inline unsigned short
2849DAC960_PG_read_status(void __iomem *base)
2850{
2851        return readw(base + DAC960_PG_STS_OFFSET);
2852}
2853
2854static inline bool
2855DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2856                unsigned char *param0, unsigned char *param1)
2857{
2858        unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2859
2860        if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2861                return false;
2862        errsts &= ~DAC960_PG_ERRSTS_PENDING;
2863        *error = errsts;
2864        *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2865        *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2866        writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2867        return true;
2868}
2869
2870static inline unsigned short
2871DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2872                union myrb_cmd_mbox *mbox)
2873{
2874        unsigned short status;
2875        int timeout = 0;
2876
2877        while (timeout < MYRB_MAILBOX_TIMEOUT) {
2878                if (!DAC960_PG_hw_mbox_is_full(base))
2879                        break;
2880                udelay(10);
2881                timeout++;
2882        }
2883        if (DAC960_PG_hw_mbox_is_full(base)) {
2884                dev_err(&pdev->dev,
2885                        "Timeout waiting for empty mailbox\n");
2886                return MYRB_STATUS_SUBSYS_TIMEOUT;
2887        }
2888        DAC960_PG_write_hw_mbox(base, mbox);
2889        DAC960_PG_hw_mbox_new_cmd(base);
2890
2891        timeout = 0;
2892        while (timeout < MYRB_MAILBOX_TIMEOUT) {
2893                if (DAC960_PG_hw_mbox_status_available(base))
2894                        break;
2895                udelay(10);
2896                timeout++;
2897        }
2898        if (!DAC960_PG_hw_mbox_status_available(base)) {
2899                dev_err(&pdev->dev,
2900                        "Timeout waiting for mailbox status\n");
2901                return MYRB_STATUS_SUBSYS_TIMEOUT;
2902        }
2903        status = DAC960_PG_read_status(base);
2904        DAC960_PG_ack_hw_mbox_intr(base);
2905        DAC960_PG_ack_hw_mbox_status(base);
2906
2907        return status;
2908}
2909
2910static int DAC960_PG_hw_init(struct pci_dev *pdev,
2911                struct myrb_hba *cb, void __iomem *base)
2912{
2913        int timeout = 0;
2914        unsigned char error, parm0, parm1;
2915
2916        DAC960_PG_disable_intr(base);
2917        DAC960_PG_ack_hw_mbox_status(base);
2918        udelay(1000);
2919        while (DAC960_PG_init_in_progress(base) &&
2920               timeout < MYRB_MAILBOX_TIMEOUT) {
2921                if (DAC960_PG_read_error_status(base, &error,
2922                                                &parm0, &parm1) &&
2923                    myrb_err_status(cb, error, parm0, parm1))
2924                        return -EIO;
2925                udelay(10);
2926                timeout++;
2927        }
2928        if (timeout == MYRB_MAILBOX_TIMEOUT) {
2929                dev_err(&pdev->dev,
2930                        "Timeout waiting for Controller Initialisation\n");
2931                return -ETIMEDOUT;
2932        }
2933        if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
2934                dev_err(&pdev->dev,
2935                        "Unable to Enable Memory Mailbox Interface\n");
2936                DAC960_PG_reset_ctrl(base);
2937                return -ENODEV;
2938        }
2939        DAC960_PG_enable_intr(base);
2940        cb->qcmd = myrb_qcmd;
2941        cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
2942        if (cb->dual_mode_interface)
2943                cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
2944        else
2945                cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
2946        cb->disable_intr = DAC960_PG_disable_intr;
2947        cb->reset = DAC960_PG_reset_ctrl;
2948
2949        return 0;
2950}
2951
2952static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
2953{
2954        struct myrb_hba *cb = arg;
2955        void __iomem *base = cb->io_base;
2956        struct myrb_stat_mbox *next_stat_mbox;
2957        unsigned long flags;
2958
2959        spin_lock_irqsave(&cb->queue_lock, flags);
2960        DAC960_PG_ack_intr(base);
2961        next_stat_mbox = cb->next_stat_mbox;
2962        while (next_stat_mbox->valid) {
2963                unsigned char id = next_stat_mbox->id;
2964                struct scsi_cmnd *scmd = NULL;
2965                struct myrb_cmdblk *cmd_blk = NULL;
2966
2967                if (id == MYRB_DCMD_TAG)
2968                        cmd_blk = &cb->dcmd_blk;
2969                else if (id == MYRB_MCMD_TAG)
2970                        cmd_blk = &cb->mcmd_blk;
2971                else {
2972                        scmd = scsi_host_find_tag(cb->host, id - 3);
2973                        if (scmd)
2974                                cmd_blk = scsi_cmd_priv(scmd);
2975                }
2976                if (cmd_blk)
2977                        cmd_blk->status = next_stat_mbox->status;
2978                else
2979                        dev_err(&cb->pdev->dev,
2980                                "Unhandled command completion %d\n", id);
2981
2982                memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2983                if (++next_stat_mbox > cb->last_stat_mbox)
2984                        next_stat_mbox = cb->first_stat_mbox;
2985
2986                if (id < 3)
2987                        myrb_handle_cmdblk(cb, cmd_blk);
2988                else
2989                        myrb_handle_scsi(cb, cmd_blk, scmd);
2990        }
2991        cb->next_stat_mbox = next_stat_mbox;
2992        spin_unlock_irqrestore(&cb->queue_lock, flags);
2993        return IRQ_HANDLED;
2994}
2995
2996static struct myrb_privdata DAC960_PG_privdata = {
2997        .hw_init =      DAC960_PG_hw_init,
2998        .irq_handler =  DAC960_PG_intr_handler,
2999        .mmio_size =    DAC960_PG_mmio_size,
3000};
3001
3002
3003/*
3004 * DAC960 PD Series Controllers
3005 */
3006
3007static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3008{
3009        writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3010}
3011
3012static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3013{
3014        writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3015}
3016
3017static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3018{
3019        writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3020}
3021
3022static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3023{
3024        unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3025
3026        return idb & DAC960_PD_IDB_HWMBOX_FULL;
3027}
3028
3029static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3030{
3031        unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3032
3033        return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3034}
3035
3036static inline void DAC960_PD_ack_intr(void __iomem *base)
3037{
3038        writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3039}
3040
3041static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3042{
3043        unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3044
3045        return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3046}
3047
3048static inline void DAC960_PD_enable_intr(void __iomem *base)
3049{
3050        writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3051}
3052
3053static inline void DAC960_PD_disable_intr(void __iomem *base)
3054{
3055        writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3056}
3057
3058static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3059                union myrb_cmd_mbox *mbox)
3060{
3061        writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3062        writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3063        writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3064        writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3065}
3066
3067static inline unsigned char
3068DAC960_PD_read_status_cmd_ident(void __iomem *base)
3069{
3070        return readb(base + DAC960_PD_STSID_OFFSET);
3071}
3072
3073static inline unsigned short
3074DAC960_PD_read_status(void __iomem *base)
3075{
3076        return readw(base + DAC960_PD_STS_OFFSET);
3077}
3078
3079static inline bool
3080DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3081                unsigned char *param0, unsigned char *param1)
3082{
3083        unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3084
3085        if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3086                return false;
3087        errsts &= ~DAC960_PD_ERRSTS_PENDING;
3088        *error = errsts;
3089        *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3090        *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3091        writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3092        return true;
3093}
3094
3095static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3096{
3097        void __iomem *base = cb->io_base;
3098        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3099
3100        while (DAC960_PD_hw_mbox_is_full(base))
3101                udelay(1);
3102        DAC960_PD_write_cmd_mbox(base, mbox);
3103        DAC960_PD_hw_mbox_new_cmd(base);
3104}
3105
3106static int DAC960_PD_hw_init(struct pci_dev *pdev,
3107                struct myrb_hba *cb, void __iomem *base)
3108{
3109        int timeout = 0;
3110        unsigned char error, parm0, parm1;
3111
3112        if (!request_region(cb->io_addr, 0x80, "myrb")) {
3113                dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3114                        (unsigned long)cb->io_addr);
3115                return -EBUSY;
3116        }
3117        DAC960_PD_disable_intr(base);
3118        DAC960_PD_ack_hw_mbox_status(base);
3119        udelay(1000);
3120        while (DAC960_PD_init_in_progress(base) &&
3121               timeout < MYRB_MAILBOX_TIMEOUT) {
3122                if (DAC960_PD_read_error_status(base, &error,
3123                                              &parm0, &parm1) &&
3124                    myrb_err_status(cb, error, parm0, parm1))
3125                        return -EIO;
3126                udelay(10);
3127                timeout++;
3128        }
3129        if (timeout == MYRB_MAILBOX_TIMEOUT) {
3130                dev_err(&pdev->dev,
3131                        "Timeout waiting for Controller Initialisation\n");
3132                return -ETIMEDOUT;
3133        }
3134        if (!myrb_enable_mmio(cb, NULL)) {
3135                dev_err(&pdev->dev,
3136                        "Unable to Enable Memory Mailbox Interface\n");
3137                DAC960_PD_reset_ctrl(base);
3138                return -ENODEV;
3139        }
3140        DAC960_PD_enable_intr(base);
3141        cb->qcmd = DAC960_PD_qcmd;
3142        cb->disable_intr = DAC960_PD_disable_intr;
3143        cb->reset = DAC960_PD_reset_ctrl;
3144
3145        return 0;
3146}
3147
3148static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3149{
3150        struct myrb_hba *cb = arg;
3151        void __iomem *base = cb->io_base;
3152        unsigned long flags;
3153
3154        spin_lock_irqsave(&cb->queue_lock, flags);
3155        while (DAC960_PD_hw_mbox_status_available(base)) {
3156                unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3157                struct scsi_cmnd *scmd = NULL;
3158                struct myrb_cmdblk *cmd_blk = NULL;
3159
3160                if (id == MYRB_DCMD_TAG)
3161                        cmd_blk = &cb->dcmd_blk;
3162                else if (id == MYRB_MCMD_TAG)
3163                        cmd_blk = &cb->mcmd_blk;
3164                else {
3165                        scmd = scsi_host_find_tag(cb->host, id - 3);
3166                        if (scmd)
3167                                cmd_blk = scsi_cmd_priv(scmd);
3168                }
3169                if (cmd_blk)
3170                        cmd_blk->status = DAC960_PD_read_status(base);
3171                else
3172                        dev_err(&cb->pdev->dev,
3173                                "Unhandled command completion %d\n", id);
3174
3175                DAC960_PD_ack_intr(base);
3176                DAC960_PD_ack_hw_mbox_status(base);
3177
3178                if (id < 3)
3179                        myrb_handle_cmdblk(cb, cmd_blk);
3180                else
3181                        myrb_handle_scsi(cb, cmd_blk, scmd);
3182        }
3183        spin_unlock_irqrestore(&cb->queue_lock, flags);
3184        return IRQ_HANDLED;
3185}
3186
3187static struct myrb_privdata DAC960_PD_privdata = {
3188        .hw_init =      DAC960_PD_hw_init,
3189        .irq_handler =  DAC960_PD_intr_handler,
3190        .mmio_size =    DAC960_PD_mmio_size,
3191};
3192
3193
3194/*
3195 * DAC960 P Series Controllers
3196 *
3197 * Similar to the DAC960 PD Series Controllers, but some commands have
3198 * to be translated.
3199 */
3200
3201static inline void myrb_translate_enquiry(void *enq)
3202{
3203        memcpy(enq + 132, enq + 36, 64);
3204        memset(enq + 36, 0, 96);
3205}
3206
3207static inline void myrb_translate_devstate(void *state)
3208{
3209        memcpy(state + 2, state + 3, 1);
3210        memmove(state + 4, state + 5, 2);
3211        memmove(state + 6, state + 8, 4);
3212}
3213
3214static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3215{
3216        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3217        int ldev_num = mbox->type5.ld.ldev_num;
3218
3219        mbox->bytes[3] &= 0x7;
3220        mbox->bytes[3] |= mbox->bytes[7] << 6;
3221        mbox->bytes[7] = ldev_num;
3222}
3223
3224static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3225{
3226        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3227        int ldev_num = mbox->bytes[7];
3228
3229        mbox->bytes[7] = mbox->bytes[3] >> 6;
3230        mbox->bytes[3] &= 0x7;
3231        mbox->bytes[3] |= ldev_num << 3;
3232}
3233
3234static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3235{
3236        void __iomem *base = cb->io_base;
3237        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3238
3239        switch (mbox->common.opcode) {
3240        case MYRB_CMD_ENQUIRY:
3241                mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3242                break;
3243        case MYRB_CMD_GET_DEVICE_STATE:
3244                mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3245                break;
3246        case MYRB_CMD_READ:
3247                mbox->common.opcode = MYRB_CMD_READ_OLD;
3248                myrb_translate_to_rw_command(cmd_blk);
3249                break;
3250        case MYRB_CMD_WRITE:
3251                mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3252                myrb_translate_to_rw_command(cmd_blk);
3253                break;
3254        case MYRB_CMD_READ_SG:
3255                mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3256                myrb_translate_to_rw_command(cmd_blk);
3257                break;
3258        case MYRB_CMD_WRITE_SG:
3259                mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3260                myrb_translate_to_rw_command(cmd_blk);
3261                break;
3262        default:
3263                break;
3264        }
3265        while (DAC960_PD_hw_mbox_is_full(base))
3266                udelay(1);
3267        DAC960_PD_write_cmd_mbox(base, mbox);
3268        DAC960_PD_hw_mbox_new_cmd(base);
3269}
3270
3271
3272static int DAC960_P_hw_init(struct pci_dev *pdev,
3273                struct myrb_hba *cb, void __iomem *base)
3274{
3275        int timeout = 0;
3276        unsigned char error, parm0, parm1;
3277
3278        if (!request_region(cb->io_addr, 0x80, "myrb")) {
3279                dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3280                        (unsigned long)cb->io_addr);
3281                return -EBUSY;
3282        }
3283        DAC960_PD_disable_intr(base);
3284        DAC960_PD_ack_hw_mbox_status(base);
3285        udelay(1000);
3286        while (DAC960_PD_init_in_progress(base) &&
3287               timeout < MYRB_MAILBOX_TIMEOUT) {
3288                if (DAC960_PD_read_error_status(base, &error,
3289                                                &parm0, &parm1) &&
3290                    myrb_err_status(cb, error, parm0, parm1))
3291                        return -EAGAIN;
3292                udelay(10);
3293                timeout++;
3294        }
3295        if (timeout == MYRB_MAILBOX_TIMEOUT) {
3296                dev_err(&pdev->dev,
3297                        "Timeout waiting for Controller Initialisation\n");
3298                return -ETIMEDOUT;
3299        }
3300        if (!myrb_enable_mmio(cb, NULL)) {
3301                dev_err(&pdev->dev,
3302                        "Unable to allocate DMA mapped memory\n");
3303                DAC960_PD_reset_ctrl(base);
3304                return -ETIMEDOUT;
3305        }
3306        DAC960_PD_enable_intr(base);
3307        cb->qcmd = DAC960_P_qcmd;
3308        cb->disable_intr = DAC960_PD_disable_intr;
3309        cb->reset = DAC960_PD_reset_ctrl;
3310
3311        return 0;
3312}
3313
3314static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3315{
3316        struct myrb_hba *cb = arg;
3317        void __iomem *base = cb->io_base;
3318        unsigned long flags;
3319
3320        spin_lock_irqsave(&cb->queue_lock, flags);
3321        while (DAC960_PD_hw_mbox_status_available(base)) {
3322                unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3323                struct scsi_cmnd *scmd = NULL;
3324                struct myrb_cmdblk *cmd_blk = NULL;
3325                union myrb_cmd_mbox *mbox;
3326                enum myrb_cmd_opcode op;
3327
3328
3329                if (id == MYRB_DCMD_TAG)
3330                        cmd_blk = &cb->dcmd_blk;
3331                else if (id == MYRB_MCMD_TAG)
3332                        cmd_blk = &cb->mcmd_blk;
3333                else {
3334                        scmd = scsi_host_find_tag(cb->host, id - 3);
3335                        if (scmd)
3336                                cmd_blk = scsi_cmd_priv(scmd);
3337                }
3338                if (cmd_blk)
3339                        cmd_blk->status = DAC960_PD_read_status(base);
3340                else
3341                        dev_err(&cb->pdev->dev,
3342                                "Unhandled command completion %d\n", id);
3343
3344                DAC960_PD_ack_intr(base);
3345                DAC960_PD_ack_hw_mbox_status(base);
3346
3347                if (!cmd_blk)
3348                        continue;
3349
3350                mbox = &cmd_blk->mbox;
3351                op = mbox->common.opcode;
3352                switch (op) {
3353                case MYRB_CMD_ENQUIRY_OLD:
3354                        mbox->common.opcode = MYRB_CMD_ENQUIRY;
3355                        myrb_translate_enquiry(cb->enquiry);
3356                        break;
3357                case MYRB_CMD_READ_OLD:
3358                        mbox->common.opcode = MYRB_CMD_READ;
3359                        myrb_translate_from_rw_command(cmd_blk);
3360                        break;
3361                case MYRB_CMD_WRITE_OLD:
3362                        mbox->common.opcode = MYRB_CMD_WRITE;
3363                        myrb_translate_from_rw_command(cmd_blk);
3364                        break;
3365                case MYRB_CMD_READ_SG_OLD:
3366                        mbox->common.opcode = MYRB_CMD_READ_SG;
3367                        myrb_translate_from_rw_command(cmd_blk);
3368                        break;
3369                case MYRB_CMD_WRITE_SG_OLD:
3370                        mbox->common.opcode = MYRB_CMD_WRITE_SG;
3371                        myrb_translate_from_rw_command(cmd_blk);
3372                        break;
3373                default:
3374                        break;
3375                }
3376                if (id < 3)
3377                        myrb_handle_cmdblk(cb, cmd_blk);
3378                else
3379                        myrb_handle_scsi(cb, cmd_blk, scmd);
3380        }
3381        spin_unlock_irqrestore(&cb->queue_lock, flags);
3382        return IRQ_HANDLED;
3383}
3384
3385static struct myrb_privdata DAC960_P_privdata = {
3386        .hw_init =      DAC960_P_hw_init,
3387        .irq_handler =  DAC960_P_intr_handler,
3388        .mmio_size =    DAC960_PD_mmio_size,
3389};
3390
3391static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3392                const struct pci_device_id *entry)
3393{
3394        struct myrb_privdata *privdata =
3395                (struct myrb_privdata *)entry->driver_data;
3396        irq_handler_t irq_handler = privdata->irq_handler;
3397        unsigned int mmio_size = privdata->mmio_size;
3398        struct Scsi_Host *shost;
3399        struct myrb_hba *cb = NULL;
3400
3401        shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3402        if (!shost) {
3403                dev_err(&pdev->dev, "Unable to allocate Controller\n");
3404                return NULL;
3405        }
3406        shost->max_cmd_len = 12;
3407        shost->max_lun = 256;
3408        cb = shost_priv(shost);
3409        mutex_init(&cb->dcmd_mutex);
3410        mutex_init(&cb->dma_mutex);
3411        cb->pdev = pdev;
3412
3413        if (pci_enable_device(pdev))
3414                goto failure;
3415
3416        if (privdata->hw_init == DAC960_PD_hw_init ||
3417            privdata->hw_init == DAC960_P_hw_init) {
3418                cb->io_addr = pci_resource_start(pdev, 0);
3419                cb->pci_addr = pci_resource_start(pdev, 1);
3420        } else
3421                cb->pci_addr = pci_resource_start(pdev, 0);
3422
3423        pci_set_drvdata(pdev, cb);
3424        spin_lock_init(&cb->queue_lock);
3425        if (mmio_size < PAGE_SIZE)
3426                mmio_size = PAGE_SIZE;
3427        cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3428        if (cb->mmio_base == NULL) {
3429                dev_err(&pdev->dev,
3430                        "Unable to map Controller Register Window\n");
3431                goto failure;
3432        }
3433
3434        cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3435        if (privdata->hw_init(pdev, cb, cb->io_base))
3436                goto failure;
3437
3438        if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3439                dev_err(&pdev->dev,
3440                        "Unable to acquire IRQ Channel %d\n", pdev->irq);
3441                goto failure;
3442        }
3443        cb->irq = pdev->irq;
3444        return cb;
3445
3446failure:
3447        dev_err(&pdev->dev,
3448                "Failed to initialize Controller\n");
3449        myrb_cleanup(cb);
3450        return NULL;
3451}
3452
3453static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3454{
3455        struct myrb_hba *cb;
3456        int ret;
3457
3458        cb = myrb_detect(dev, entry);
3459        if (!cb)
3460                return -ENODEV;
3461
3462        ret = myrb_get_hba_config(cb);
3463        if (ret < 0) {
3464                myrb_cleanup(cb);
3465                return ret;
3466        }
3467
3468        if (!myrb_create_mempools(dev, cb)) {
3469                ret = -ENOMEM;
3470                goto failed;
3471        }
3472
3473        ret = scsi_add_host(cb->host, &dev->dev);
3474        if (ret) {
3475                dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3476                myrb_destroy_mempools(cb);
3477                goto failed;
3478        }
3479        scsi_scan_host(cb->host);
3480        return 0;
3481failed:
3482        myrb_cleanup(cb);
3483        return ret;
3484}
3485
3486
3487static void myrb_remove(struct pci_dev *pdev)
3488{
3489        struct myrb_hba *cb = pci_get_drvdata(pdev);
3490
3491        shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3492        myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3493        myrb_cleanup(cb);
3494        myrb_destroy_mempools(cb);
3495}
3496
3497
3498static const struct pci_device_id myrb_id_table[] = {
3499        {
3500                PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3501                               PCI_DEVICE_ID_DEC_21285,
3502                               PCI_VENDOR_ID_MYLEX,
3503                               PCI_DEVICE_ID_MYLEX_DAC960_LA),
3504                .driver_data    = (unsigned long) &DAC960_LA_privdata,
3505        },
3506        {
3507                PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3508        },
3509        {
3510                PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3511        },
3512        {
3513                PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3514        },
3515        {0, },
3516};
3517
3518MODULE_DEVICE_TABLE(pci, myrb_id_table);
3519
3520static struct pci_driver myrb_pci_driver = {
3521        .name           = "myrb",
3522        .id_table       = myrb_id_table,
3523        .probe          = myrb_probe,
3524        .remove         = myrb_remove,
3525};
3526
3527static int __init myrb_init_module(void)
3528{
3529        int ret;
3530
3531        myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3532        if (!myrb_raid_template)
3533                return -ENODEV;
3534
3535        ret = pci_register_driver(&myrb_pci_driver);
3536        if (ret)
3537                raid_class_release(myrb_raid_template);
3538
3539        return ret;
3540}
3541
3542static void __exit myrb_cleanup_module(void)
3543{
3544        pci_unregister_driver(&myrb_pci_driver);
3545        raid_class_release(myrb_raid_template);
3546}
3547
3548module_init(myrb_init_module);
3549module_exit(myrb_cleanup_module);
3550
3551MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3552MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3553MODULE_LICENSE("GPL");
3554