linux/drivers/scsi/myrs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
   4 *
   5 * This driver supports the newer, SCSI-based firmware interface only.
   6 *
   7 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
   8 *
   9 * Based on the original DAC960 driver, which has
  10 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
  11 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/types.h>
  16#include <linux/delay.h>
  17#include <linux/interrupt.h>
  18#include <linux/pci.h>
  19#include <linux/raid_class.h>
  20#include <asm/unaligned.h>
  21#include <scsi/scsi.h>
  22#include <scsi/scsi_host.h>
  23#include <scsi/scsi_device.h>
  24#include <scsi/scsi_cmnd.h>
  25#include <scsi/scsi_tcq.h>
  26#include "myrs.h"
  27
  28static struct raid_template *myrs_raid_template;
  29
  30static struct myrs_devstate_name_entry {
  31        enum myrs_devstate state;
  32        char *name;
  33} myrs_devstate_name_list[] = {
  34        { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
  35        { MYRS_DEVICE_ONLINE, "Online" },
  36        { MYRS_DEVICE_REBUILD, "Rebuild" },
  37        { MYRS_DEVICE_MISSING, "Missing" },
  38        { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
  39        { MYRS_DEVICE_OFFLINE, "Offline" },
  40        { MYRS_DEVICE_CRITICAL, "Critical" },
  41        { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
  42        { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
  43        { MYRS_DEVICE_STANDBY, "Standby" },
  44        { MYRS_DEVICE_INVALID_STATE, "Invalid" },
  45};
  46
  47static char *myrs_devstate_name(enum myrs_devstate state)
  48{
  49        struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
  50        int i;
  51
  52        for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
  53                if (entry[i].state == state)
  54                        return entry[i].name;
  55        }
  56        return NULL;
  57}
  58
  59static struct myrs_raid_level_name_entry {
  60        enum myrs_raid_level level;
  61        char *name;
  62} myrs_raid_level_name_list[] = {
  63        { MYRS_RAID_LEVEL0, "RAID0" },
  64        { MYRS_RAID_LEVEL1, "RAID1" },
  65        { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
  66        { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
  67        { MYRS_RAID_LEVEL6, "RAID6" },
  68        { MYRS_RAID_JBOD, "JBOD" },
  69        { MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
  70        { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
  71        { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
  72        { MYRS_RAID_SPAN, "Mylex SPAN" },
  73        { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
  74        { MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
  75        { MYRS_RAID_PHYSICAL, "Physical device" },
  76};
  77
  78static char *myrs_raid_level_name(enum myrs_raid_level level)
  79{
  80        struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
  81        int i;
  82
  83        for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
  84                if (entry[i].level == level)
  85                        return entry[i].name;
  86        }
  87        return NULL;
  88}
  89
  90/*
  91 * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
  92 */
  93static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
  94{
  95        union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  96
  97        memset(mbox, 0, sizeof(union myrs_cmd_mbox));
  98        cmd_blk->status = 0;
  99}
 100
 101/*
 102 * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
 103 */
 104static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
 105{
 106        void __iomem *base = cs->io_base;
 107        union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 108        union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
 109
 110        cs->write_cmd_mbox(next_mbox, mbox);
 111
 112        if (cs->prev_cmd_mbox1->words[0] == 0 ||
 113            cs->prev_cmd_mbox2->words[0] == 0)
 114                cs->get_cmd_mbox(base);
 115
 116        cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
 117        cs->prev_cmd_mbox1 = next_mbox;
 118
 119        if (++next_mbox > cs->last_cmd_mbox)
 120                next_mbox = cs->first_cmd_mbox;
 121
 122        cs->next_cmd_mbox = next_mbox;
 123}
 124
 125/*
 126 * myrs_exec_cmd - executes V2 Command and waits for completion.
 127 */
 128static void myrs_exec_cmd(struct myrs_hba *cs,
 129                struct myrs_cmdblk *cmd_blk)
 130{
 131        DECLARE_COMPLETION_ONSTACK(complete);
 132        unsigned long flags;
 133
 134        cmd_blk->complete = &complete;
 135        spin_lock_irqsave(&cs->queue_lock, flags);
 136        myrs_qcmd(cs, cmd_blk);
 137        spin_unlock_irqrestore(&cs->queue_lock, flags);
 138
 139        wait_for_completion(&complete);
 140}
 141
 142/*
 143 * myrs_report_progress - prints progress message
 144 */
 145static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
 146                unsigned char *msg, unsigned long blocks,
 147                unsigned long size)
 148{
 149        shost_printk(KERN_INFO, cs->host,
 150                     "Logical Drive %d: %s in Progress: %d%% completed\n",
 151                     ldev_num, msg,
 152                     (100 * (int)(blocks >> 7)) / (int)(size >> 7));
 153}
 154
 155/*
 156 * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
 157 */
 158static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
 159{
 160        struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
 161        union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 162        dma_addr_t ctlr_info_addr;
 163        union myrs_sgl *sgl;
 164        unsigned char status;
 165        unsigned short ldev_present, ldev_critical, ldev_offline;
 166
 167        ldev_present = cs->ctlr_info->ldev_present;
 168        ldev_critical = cs->ctlr_info->ldev_critical;
 169        ldev_offline = cs->ctlr_info->ldev_offline;
 170
 171        ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
 172                                        sizeof(struct myrs_ctlr_info),
 173                                        DMA_FROM_DEVICE);
 174        if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
 175                return MYRS_STATUS_FAILED;
 176
 177        mutex_lock(&cs->dcmd_mutex);
 178        myrs_reset_cmd(cmd_blk);
 179        mbox->ctlr_info.id = MYRS_DCMD_TAG;
 180        mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
 181        mbox->ctlr_info.control.dma_ctrl_to_host = true;
 182        mbox->ctlr_info.control.no_autosense = true;
 183        mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
 184        mbox->ctlr_info.ctlr_num = 0;
 185        mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
 186        sgl = &mbox->ctlr_info.dma_addr;
 187        sgl->sge[0].sge_addr = ctlr_info_addr;
 188        sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
 189        dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
 190        myrs_exec_cmd(cs, cmd_blk);
 191        status = cmd_blk->status;
 192        mutex_unlock(&cs->dcmd_mutex);
 193        dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
 194                         sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
 195        if (status == MYRS_STATUS_SUCCESS) {
 196                if (cs->ctlr_info->bg_init_active +
 197                    cs->ctlr_info->ldev_init_active +
 198                    cs->ctlr_info->pdev_init_active +
 199                    cs->ctlr_info->cc_active +
 200                    cs->ctlr_info->rbld_active +
 201                    cs->ctlr_info->exp_active != 0)
 202                        cs->needs_update = true;
 203                if (cs->ctlr_info->ldev_present != ldev_present ||
 204                    cs->ctlr_info->ldev_critical != ldev_critical ||
 205                    cs->ctlr_info->ldev_offline != ldev_offline)
 206                        shost_printk(KERN_INFO, cs->host,
 207                                     "Logical drive count changes (%d/%d/%d)\n",
 208                                     cs->ctlr_info->ldev_critical,
 209                                     cs->ctlr_info->ldev_offline,
 210                                     cs->ctlr_info->ldev_present);
 211        }
 212
 213        return status;
 214}
 215
 216/*
 217 * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
 218 */
 219static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
 220                unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
 221{
 222        struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
 223        union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 224        dma_addr_t ldev_info_addr;
 225        struct myrs_ldev_info ldev_info_orig;
 226        union myrs_sgl *sgl;
 227        unsigned char status;
 228
 229        memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
 230        ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
 231                                        sizeof(struct myrs_ldev_info),
 232                                        DMA_FROM_DEVICE);
 233        if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
 234                return MYRS_STATUS_FAILED;
 235
 236        mutex_lock(&cs->dcmd_mutex);
 237        myrs_reset_cmd(cmd_blk);
 238        mbox->ldev_info.id = MYRS_DCMD_TAG;
 239        mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
 240        mbox->ldev_info.control.dma_ctrl_to_host = true;
 241        mbox->ldev_info.control.no_autosense = true;
 242        mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
 243        mbox->ldev_info.ldev.ldev_num = ldev_num;
 244        mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
 245        sgl = &mbox->ldev_info.dma_addr;
 246        sgl->sge[0].sge_addr = ldev_info_addr;
 247        sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
 248        dev_dbg(&cs->host->shost_gendev,
 249                "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
 250        myrs_exec_cmd(cs, cmd_blk);
 251        status = cmd_blk->status;
 252        mutex_unlock(&cs->dcmd_mutex);
 253        dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
 254                         sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
 255        if (status == MYRS_STATUS_SUCCESS) {
 256                unsigned short ldev_num = ldev_info->ldev_num;
 257                struct myrs_ldev_info *new = ldev_info;
 258                struct myrs_ldev_info *old = &ldev_info_orig;
 259                unsigned long ldev_size = new->cfg_devsize;
 260
 261                if (new->dev_state != old->dev_state) {
 262                        const char *name;
 263
 264                        name = myrs_devstate_name(new->dev_state);
 265                        shost_printk(KERN_INFO, cs->host,
 266                                     "Logical Drive %d is now %s\n",
 267                                     ldev_num, name ? name : "Invalid");
 268                }
 269                if ((new->soft_errs != old->soft_errs) ||
 270                    (new->cmds_failed != old->cmds_failed) ||
 271                    (new->deferred_write_errs != old->deferred_write_errs))
 272                        shost_printk(KERN_INFO, cs->host,
 273                                     "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
 274                                     ldev_num, new->soft_errs,
 275                                     new->cmds_failed,
 276                                     new->deferred_write_errs);
 277                if (new->bg_init_active)
 278                        myrs_report_progress(cs, ldev_num,
 279                                             "Background Initialization",
 280                                             new->bg_init_lba, ldev_size);
 281                else if (new->fg_init_active)
 282                        myrs_report_progress(cs, ldev_num,
 283                                             "Foreground Initialization",
 284                                             new->fg_init_lba, ldev_size);
 285                else if (new->migration_active)
 286                        myrs_report_progress(cs, ldev_num,
 287                                             "Data Migration",
 288                                             new->migration_lba, ldev_size);
 289                else if (new->patrol_active)
 290                        myrs_report_progress(cs, ldev_num,
 291                                             "Patrol Operation",
 292                                             new->patrol_lba, ldev_size);
 293                if (old->bg_init_active && !new->bg_init_active)
 294                        shost_printk(KERN_INFO, cs->host,
 295                                     "Logical Drive %d: Background Initialization %s\n",
 296                                     ldev_num,
 297                                     (new->ldev_control.ldev_init_done ?
 298                                      "Completed" : "Failed"));
 299        }
 300        return status;
 301}
 302
 303/*
 304 * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
 305 */
 306static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
 307                unsigned char channel, unsigned char target, unsigned char lun,
 308                struct myrs_pdev_info *pdev_info)
 309{
 310        struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
 311        union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 312        dma_addr_t pdev_info_addr;
 313        union myrs_sgl *sgl;
 314        unsigned char status;
 315
 316        pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
 317                                        sizeof(struct myrs_pdev_info),
 318                                        DMA_FROM_DEVICE);
 319        if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
 320                return MYRS_STATUS_FAILED;
 321
 322        mutex_lock(&cs->dcmd_mutex);
 323        myrs_reset_cmd(cmd_blk);
 324        mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
 325        mbox->pdev_info.id = MYRS_DCMD_TAG;
 326        mbox->pdev_info.control.dma_ctrl_to_host = true;
 327        mbox->pdev_info.control.no_autosense = true;
 328        mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
 329        mbox->pdev_info.pdev.lun = lun;
 330        mbox->pdev_info.pdev.target = target;
 331        mbox->pdev_info.pdev.channel = channel;
 332        mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
 333        sgl = &mbox->pdev_info.dma_addr;
 334        sgl->sge[0].sge_addr = pdev_info_addr;
 335        sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
 336        dev_dbg(&cs->host->shost_gendev,
 337                "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
 338                channel, target, lun);
 339        myrs_exec_cmd(cs, cmd_blk);
 340        status = cmd_blk->status;
 341        mutex_unlock(&cs->dcmd_mutex);
 342        dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
 343                         sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
 344        return status;
 345}
 346
 347/*
 348 * myrs_dev_op - executes a "Device Operation" Command
 349 */
 350static unsigned char myrs_dev_op(struct myrs_hba *cs,
 351                enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
 352{
 353        struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
 354        union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 355        unsigned char status;
 356
 357        mutex_lock(&cs->dcmd_mutex);
 358        myrs_reset_cmd(cmd_blk);
 359        mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
 360        mbox->dev_op.id = MYRS_DCMD_TAG;
 361        mbox->dev_op.control.dma_ctrl_to_host = true;
 362        mbox->dev_op.control.no_autosense = true;
 363        mbox->dev_op.ioctl_opcode = opcode;
 364        mbox->dev_op.opdev = opdev;
 365        myrs_exec_cmd(cs, cmd_blk);
 366        status = cmd_blk->status;
 367        mutex_unlock(&cs->dcmd_mutex);
 368        return status;
 369}
 370
 371/*
 372 * myrs_translate_pdev - translates a Physical Device Channel and
 373 * TargetID into a Logical Device.
 374 */
 375static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
 376                unsigned char channel, unsigned char target, unsigned char lun,
 377                struct myrs_devmap *devmap)
 378{
 379        struct pci_dev *pdev = cs->pdev;
 380        dma_addr_t devmap_addr;
 381        struct myrs_cmdblk *cmd_blk;
 382        union myrs_cmd_mbox *mbox;
 383        union myrs_sgl *sgl;
 384        unsigned char status;
 385
 386        memset(devmap, 0x0, sizeof(struct myrs_devmap));
 387        devmap_addr = dma_map_single(&pdev->dev, devmap,
 388                                     sizeof(struct myrs_devmap),
 389                                     DMA_FROM_DEVICE);
 390        if (dma_mapping_error(&pdev->dev, devmap_addr))
 391                return MYRS_STATUS_FAILED;
 392
 393        mutex_lock(&cs->dcmd_mutex);
 394        cmd_blk = &cs->dcmd_blk;
 395        mbox = &cmd_blk->mbox;
 396        mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
 397        mbox->pdev_info.control.dma_ctrl_to_host = true;
 398        mbox->pdev_info.control.no_autosense = true;
 399        mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
 400        mbox->pdev_info.pdev.target = target;
 401        mbox->pdev_info.pdev.channel = channel;
 402        mbox->pdev_info.pdev.lun = lun;
 403        mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
 404        sgl = &mbox->pdev_info.dma_addr;
 405        sgl->sge[0].sge_addr = devmap_addr;
 406        sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
 407
 408        myrs_exec_cmd(cs, cmd_blk);
 409        status = cmd_blk->status;
 410        mutex_unlock(&cs->dcmd_mutex);
 411        dma_unmap_single(&pdev->dev, devmap_addr,
 412                         sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
 413        return status;
 414}
 415
 416/*
 417 * myrs_get_event - executes a Get Event Command
 418 */
 419static unsigned char myrs_get_event(struct myrs_hba *cs,
 420                unsigned int event_num, struct myrs_event *event_buf)
 421{
 422        struct pci_dev *pdev = cs->pdev;
 423        dma_addr_t event_addr;
 424        struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
 425        union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 426        union myrs_sgl *sgl;
 427        unsigned char status;
 428
 429        event_addr = dma_map_single(&pdev->dev, event_buf,
 430                                    sizeof(struct myrs_event), DMA_FROM_DEVICE);
 431        if (dma_mapping_error(&pdev->dev, event_addr))
 432                return MYRS_STATUS_FAILED;
 433
 434        mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
 435        mbox->get_event.dma_size = sizeof(struct myrs_event);
 436        mbox->get_event.evnum_upper = event_num >> 16;
 437        mbox->get_event.ctlr_num = 0;
 438        mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
 439        mbox->get_event.evnum_lower = event_num & 0xFFFF;
 440        sgl = &mbox->get_event.dma_addr;
 441        sgl->sge[0].sge_addr = event_addr;
 442        sgl->sge[0].sge_count = mbox->get_event.dma_size;
 443        myrs_exec_cmd(cs, cmd_blk);
 444        status = cmd_blk->status;
 445        dma_unmap_single(&pdev->dev, event_addr,
 446                         sizeof(struct myrs_event), DMA_FROM_DEVICE);
 447
 448        return status;
 449}
 450
 451/*
 452 * myrs_get_fwstatus - executes a Get Health Status Command
 453 */
 454static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
 455{
 456        struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
 457        union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 458        union myrs_sgl *sgl;
 459        unsigned char status = cmd_blk->status;
 460
 461        myrs_reset_cmd(cmd_blk);
 462        mbox->common.opcode = MYRS_CMD_OP_IOCTL;
 463        mbox->common.id = MYRS_MCMD_TAG;
 464        mbox->common.control.dma_ctrl_to_host = true;
 465        mbox->common.control.no_autosense = true;
 466        mbox->common.dma_size = sizeof(struct myrs_fwstat);
 467        mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
 468        sgl = &mbox->common.dma_addr;
 469        sgl->sge[0].sge_addr = cs->fwstat_addr;
 470        sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
 471        dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
 472        myrs_exec_cmd(cs, cmd_blk);
 473        status = cmd_blk->status;
 474
 475        return status;
 476}
 477
 478/*
 479 * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
 480 */
 481static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
 482                enable_mbox_t enable_mbox_fn)
 483{
 484        void __iomem *base = cs->io_base;
 485        struct pci_dev *pdev = cs->pdev;
 486        union myrs_cmd_mbox *cmd_mbox;
 487        struct myrs_stat_mbox *stat_mbox;
 488        union myrs_cmd_mbox *mbox;
 489        dma_addr_t mbox_addr;
 490        unsigned char status = MYRS_STATUS_FAILED;
 491
 492        if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
 493                if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 494                        dev_err(&pdev->dev, "DMA mask out of range\n");
 495                        return false;
 496                }
 497
 498        /* Temporary dma mapping, used only in the scope of this function */
 499        mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
 500                                  &mbox_addr, GFP_KERNEL);
 501        if (dma_mapping_error(&pdev->dev, mbox_addr))
 502                return false;
 503
 504        /* These are the base addresses for the command memory mailbox array */
 505        cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
 506        cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
 507                                      &cs->cmd_mbox_addr, GFP_KERNEL);
 508        if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
 509                dev_err(&pdev->dev, "Failed to map command mailbox\n");
 510                goto out_free;
 511        }
 512        cs->first_cmd_mbox = cmd_mbox;
 513        cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
 514        cs->last_cmd_mbox = cmd_mbox;
 515        cs->next_cmd_mbox = cs->first_cmd_mbox;
 516        cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
 517        cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
 518
 519        /* These are the base addresses for the status memory mailbox array */
 520        cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
 521        stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
 522                                       &cs->stat_mbox_addr, GFP_KERNEL);
 523        if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
 524                dev_err(&pdev->dev, "Failed to map status mailbox\n");
 525                goto out_free;
 526        }
 527
 528        cs->first_stat_mbox = stat_mbox;
 529        stat_mbox += MYRS_MAX_STAT_MBOX - 1;
 530        cs->last_stat_mbox = stat_mbox;
 531        cs->next_stat_mbox = cs->first_stat_mbox;
 532
 533        cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
 534                                            sizeof(struct myrs_fwstat),
 535                                            &cs->fwstat_addr, GFP_KERNEL);
 536        if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
 537                dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
 538                cs->fwstat_buf = NULL;
 539                goto out_free;
 540        }
 541        cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
 542                                GFP_KERNEL | GFP_DMA);
 543        if (!cs->ctlr_info)
 544                goto out_free;
 545
 546        cs->event_buf = kzalloc(sizeof(struct myrs_event),
 547                                GFP_KERNEL | GFP_DMA);
 548        if (!cs->event_buf)
 549                goto out_free;
 550
 551        /* Enable the Memory Mailbox Interface. */
 552        memset(mbox, 0, sizeof(union myrs_cmd_mbox));
 553        mbox->set_mbox.id = 1;
 554        mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
 555        mbox->set_mbox.control.no_autosense = true;
 556        mbox->set_mbox.first_cmd_mbox_size_kb =
 557                (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
 558        mbox->set_mbox.first_stat_mbox_size_kb =
 559                (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
 560        mbox->set_mbox.second_cmd_mbox_size_kb = 0;
 561        mbox->set_mbox.second_stat_mbox_size_kb = 0;
 562        mbox->set_mbox.sense_len = 0;
 563        mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
 564        mbox->set_mbox.fwstat_buf_size_kb = 1;
 565        mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
 566        mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
 567        mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
 568        status = enable_mbox_fn(base, mbox_addr);
 569
 570out_free:
 571        dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
 572                          mbox, mbox_addr);
 573        if (status != MYRS_STATUS_SUCCESS)
 574                dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
 575                        status);
 576        return (status == MYRS_STATUS_SUCCESS);
 577}
 578
 579/*
 580 * myrs_get_config - reads the Configuration Information
 581 */
 582static int myrs_get_config(struct myrs_hba *cs)
 583{
 584        struct myrs_ctlr_info *info = cs->ctlr_info;
 585        struct Scsi_Host *shost = cs->host;
 586        unsigned char status;
 587        unsigned char model[20];
 588        unsigned char fw_version[12];
 589        int i, model_len;
 590
 591        /* Get data into dma-able area, then copy into permanent location */
 592        mutex_lock(&cs->cinfo_mutex);
 593        status = myrs_get_ctlr_info(cs);
 594        mutex_unlock(&cs->cinfo_mutex);
 595        if (status != MYRS_STATUS_SUCCESS) {
 596                shost_printk(KERN_ERR, shost,
 597                             "Failed to get controller information\n");
 598                return -ENODEV;
 599        }
 600
 601        /* Initialize the Controller Model Name and Full Model Name fields. */
 602        model_len = sizeof(info->ctlr_name);
 603        if (model_len > sizeof(model)-1)
 604                model_len = sizeof(model)-1;
 605        memcpy(model, info->ctlr_name, model_len);
 606        model_len--;
 607        while (model[model_len] == ' ' || model[model_len] == '\0')
 608                model_len--;
 609        model[++model_len] = '\0';
 610        strcpy(cs->model_name, "DAC960 ");
 611        strcat(cs->model_name, model);
 612        /* Initialize the Controller Firmware Version field. */
 613        sprintf(fw_version, "%d.%02d-%02d",
 614                info->fw_major_version, info->fw_minor_version,
 615                info->fw_turn_number);
 616        if (info->fw_major_version == 6 &&
 617            info->fw_minor_version == 0 &&
 618            info->fw_turn_number < 1) {
 619                shost_printk(KERN_WARNING, shost,
 620                        "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
 621                        "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
 622                        "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
 623                        fw_version);
 624                return -ENODEV;
 625        }
 626        /* Initialize the Controller Channels and Targets. */
 627        shost->max_channel = info->physchan_present + info->virtchan_present;
 628        shost->max_id = info->max_targets[0];
 629        for (i = 1; i < 16; i++) {
 630                if (!info->max_targets[i])
 631                        continue;
 632                if (shost->max_id < info->max_targets[i])
 633                        shost->max_id = info->max_targets[i];
 634        }
 635
 636        /*
 637         * Initialize the Controller Queue Depth, Driver Queue Depth,
 638         * Logical Drive Count, Maximum Blocks per Command, Controller
 639         * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
 640         * The Driver Queue Depth must be at most three less than
 641         * the Controller Queue Depth; tag '1' is reserved for
 642         * direct commands, and tag '2' for monitoring commands.
 643         */
 644        shost->can_queue = info->max_tcq - 3;
 645        if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
 646                shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
 647        shost->max_sectors = info->max_transfer_size;
 648        shost->sg_tablesize = info->max_sge;
 649        if (shost->sg_tablesize > MYRS_SG_LIMIT)
 650                shost->sg_tablesize = MYRS_SG_LIMIT;
 651
 652        shost_printk(KERN_INFO, shost,
 653                "Configuring %s PCI RAID Controller\n", model);
 654        shost_printk(KERN_INFO, shost,
 655                "  Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
 656                fw_version, info->physchan_present, info->mem_size_mb);
 657
 658        shost_printk(KERN_INFO, shost,
 659                     "  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
 660                     shost->can_queue, shost->max_sectors);
 661
 662        shost_printk(KERN_INFO, shost,
 663                     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
 664                     shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
 665        for (i = 0; i < info->physchan_max; i++) {
 666                if (!info->max_targets[i])
 667                        continue;
 668                shost_printk(KERN_INFO, shost,
 669                             "  Device Channel %d: max %d devices\n",
 670                             i, info->max_targets[i]);
 671        }
 672        shost_printk(KERN_INFO, shost,
 673                     "  Physical: %d/%d channels, %d disks, %d devices\n",
 674                     info->physchan_present, info->physchan_max,
 675                     info->pdisk_present, info->pdev_present);
 676
 677        shost_printk(KERN_INFO, shost,
 678                     "  Logical: %d/%d channels, %d disks\n",
 679                     info->virtchan_present, info->virtchan_max,
 680                     info->ldev_present);
 681        return 0;
 682}
 683
 684/*
 685 * myrs_log_event - prints a Controller Event message
 686 */
 687static struct {
 688        int ev_code;
 689        unsigned char *ev_msg;
 690} myrs_ev_list[] = {
 691        /* Physical Device Events (0x0000 - 0x007F) */
 692        { 0x0001, "P Online" },
 693        { 0x0002, "P Standby" },
 694        { 0x0005, "P Automatic Rebuild Started" },
 695        { 0x0006, "P Manual Rebuild Started" },
 696        { 0x0007, "P Rebuild Completed" },
 697        { 0x0008, "P Rebuild Cancelled" },
 698        { 0x0009, "P Rebuild Failed for Unknown Reasons" },
 699        { 0x000A, "P Rebuild Failed due to New Physical Device" },
 700        { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
 701        { 0x000C, "S Offline" },
 702        { 0x000D, "P Found" },
 703        { 0x000E, "P Removed" },
 704        { 0x000F, "P Unconfigured" },
 705        { 0x0010, "P Expand Capacity Started" },
 706        { 0x0011, "P Expand Capacity Completed" },
 707        { 0x0012, "P Expand Capacity Failed" },
 708        { 0x0013, "P Command Timed Out" },
 709        { 0x0014, "P Command Aborted" },
 710        { 0x0015, "P Command Retried" },
 711        { 0x0016, "P Parity Error" },
 712        { 0x0017, "P Soft Error" },
 713        { 0x0018, "P Miscellaneous Error" },
 714        { 0x0019, "P Reset" },
 715        { 0x001A, "P Active Spare Found" },
 716        { 0x001B, "P Warm Spare Found" },
 717        { 0x001C, "S Sense Data Received" },
 718        { 0x001D, "P Initialization Started" },
 719        { 0x001E, "P Initialization Completed" },
 720        { 0x001F, "P Initialization Failed" },
 721        { 0x0020, "P Initialization Cancelled" },
 722        { 0x0021, "P Failed because Write Recovery Failed" },
 723        { 0x0022, "P Failed because SCSI Bus Reset Failed" },
 724        { 0x0023, "P Failed because of Double Check Condition" },
 725        { 0x0024, "P Failed because Device Cannot Be Accessed" },
 726        { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
 727        { 0x0026, "P Failed because of Bad Tag from Device" },
 728        { 0x0027, "P Failed because of Command Timeout" },
 729        { 0x0028, "P Failed because of System Reset" },
 730        { 0x0029, "P Failed because of Busy Status or Parity Error" },
 731        { 0x002A, "P Failed because Host Set Device to Failed State" },
 732        { 0x002B, "P Failed because of Selection Timeout" },
 733        { 0x002C, "P Failed because of SCSI Bus Phase Error" },
 734        { 0x002D, "P Failed because Device Returned Unknown Status" },
 735        { 0x002E, "P Failed because Device Not Ready" },
 736        { 0x002F, "P Failed because Device Not Found at Startup" },
 737        { 0x0030, "P Failed because COD Write Operation Failed" },
 738        { 0x0031, "P Failed because BDT Write Operation Failed" },
 739        { 0x0039, "P Missing at Startup" },
 740        { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
 741        { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
 742        { 0x003D, "P Standby Rebuild Started" },
 743        /* Logical Device Events (0x0080 - 0x00FF) */
 744        { 0x0080, "M Consistency Check Started" },
 745        { 0x0081, "M Consistency Check Completed" },
 746        { 0x0082, "M Consistency Check Cancelled" },
 747        { 0x0083, "M Consistency Check Completed With Errors" },
 748        { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
 749        { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
 750        { 0x0086, "L Offline" },
 751        { 0x0087, "L Critical" },
 752        { 0x0088, "L Online" },
 753        { 0x0089, "M Automatic Rebuild Started" },
 754        { 0x008A, "M Manual Rebuild Started" },
 755        { 0x008B, "M Rebuild Completed" },
 756        { 0x008C, "M Rebuild Cancelled" },
 757        { 0x008D, "M Rebuild Failed for Unknown Reasons" },
 758        { 0x008E, "M Rebuild Failed due to New Physical Device" },
 759        { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
 760        { 0x0090, "M Initialization Started" },
 761        { 0x0091, "M Initialization Completed" },
 762        { 0x0092, "M Initialization Cancelled" },
 763        { 0x0093, "M Initialization Failed" },
 764        { 0x0094, "L Found" },
 765        { 0x0095, "L Deleted" },
 766        { 0x0096, "M Expand Capacity Started" },
 767        { 0x0097, "M Expand Capacity Completed" },
 768        { 0x0098, "M Expand Capacity Failed" },
 769        { 0x0099, "L Bad Block Found" },
 770        { 0x009A, "L Size Changed" },
 771        { 0x009B, "L Type Changed" },
 772        { 0x009C, "L Bad Data Block Found" },
 773        { 0x009E, "L Read of Data Block in BDT" },
 774        { 0x009F, "L Write Back Data for Disk Block Lost" },
 775        { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
 776        { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
 777        { 0x00A2, "L Standby Rebuild Started" },
 778        /* Fault Management Events (0x0100 - 0x017F) */
 779        { 0x0140, "E Fan %d Failed" },
 780        { 0x0141, "E Fan %d OK" },
 781        { 0x0142, "E Fan %d Not Present" },
 782        { 0x0143, "E Power Supply %d Failed" },
 783        { 0x0144, "E Power Supply %d OK" },
 784        { 0x0145, "E Power Supply %d Not Present" },
 785        { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
 786        { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
 787        { 0x0148, "E Temperature Sensor %d Temperature Normal" },
 788        { 0x0149, "E Temperature Sensor %d Not Present" },
 789        { 0x014A, "E Enclosure Management Unit %d Access Critical" },
 790        { 0x014B, "E Enclosure Management Unit %d Access OK" },
 791        { 0x014C, "E Enclosure Management Unit %d Access Offline" },
 792        /* Controller Events (0x0180 - 0x01FF) */
 793        { 0x0181, "C Cache Write Back Error" },
 794        { 0x0188, "C Battery Backup Unit Found" },
 795        { 0x0189, "C Battery Backup Unit Charge Level Low" },
 796        { 0x018A, "C Battery Backup Unit Charge Level OK" },
 797        { 0x0193, "C Installation Aborted" },
 798        { 0x0195, "C Battery Backup Unit Physically Removed" },
 799        { 0x0196, "C Memory Error During Warm Boot" },
 800        { 0x019E, "C Memory Soft ECC Error Corrected" },
 801        { 0x019F, "C Memory Hard ECC Error Corrected" },
 802        { 0x01A2, "C Battery Backup Unit Failed" },
 803        { 0x01AB, "C Mirror Race Recovery Failed" },
 804        { 0x01AC, "C Mirror Race on Critical Drive" },
 805        /* Controller Internal Processor Events */
 806        { 0x0380, "C Internal Controller Hung" },
 807        { 0x0381, "C Internal Controller Firmware Breakpoint" },
 808        { 0x0390, "C Internal Controller i960 Processor Specific Error" },
 809        { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
 810        { 0, "" }
 811};
 812
 813static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
 814{
 815        unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
 816        int ev_idx = 0, ev_code;
 817        unsigned char ev_type, *ev_msg;
 818        struct Scsi_Host *shost = cs->host;
 819        struct scsi_device *sdev;
 820        struct scsi_sense_hdr sshdr = {0};
 821        unsigned char sense_info[4];
 822        unsigned char cmd_specific[4];
 823
 824        if (ev->ev_code == 0x1C) {
 825                if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
 826                        memset(&sshdr, 0x0, sizeof(sshdr));
 827                        memset(sense_info, 0x0, sizeof(sense_info));
 828                        memset(cmd_specific, 0x0, sizeof(cmd_specific));
 829                } else {
 830                        memcpy(sense_info, &ev->sense_data[3], 4);
 831                        memcpy(cmd_specific, &ev->sense_data[7], 4);
 832                }
 833        }
 834        if (sshdr.sense_key == VENDOR_SPECIFIC &&
 835            (sshdr.asc == 0x80 || sshdr.asc == 0x81))
 836                ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
 837        while (true) {
 838                ev_code = myrs_ev_list[ev_idx].ev_code;
 839                if (ev_code == ev->ev_code || ev_code == 0)
 840                        break;
 841                ev_idx++;
 842        }
 843        ev_type = myrs_ev_list[ev_idx].ev_msg[0];
 844        ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
 845        if (ev_code == 0) {
 846                shost_printk(KERN_WARNING, shost,
 847                             "Unknown Controller Event Code %04X\n",
 848                             ev->ev_code);
 849                return;
 850        }
 851        switch (ev_type) {
 852        case 'P':
 853                sdev = scsi_device_lookup(shost, ev->channel,
 854                                          ev->target, 0);
 855                sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
 856                            ev->ev_seq, ev_msg);
 857                if (sdev && sdev->hostdata &&
 858                    sdev->channel < cs->ctlr_info->physchan_present) {
 859                        struct myrs_pdev_info *pdev_info = sdev->hostdata;
 860
 861                        switch (ev->ev_code) {
 862                        case 0x0001:
 863                        case 0x0007:
 864                                pdev_info->dev_state = MYRS_DEVICE_ONLINE;
 865                                break;
 866                        case 0x0002:
 867                                pdev_info->dev_state = MYRS_DEVICE_STANDBY;
 868                                break;
 869                        case 0x000C:
 870                                pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
 871                                break;
 872                        case 0x000E:
 873                                pdev_info->dev_state = MYRS_DEVICE_MISSING;
 874                                break;
 875                        case 0x000F:
 876                                pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
 877                                break;
 878                        }
 879                }
 880                break;
 881        case 'L':
 882                shost_printk(KERN_INFO, shost,
 883                             "event %d: Logical Drive %d %s\n",
 884                             ev->ev_seq, ev->lun, ev_msg);
 885                cs->needs_update = true;
 886                break;
 887        case 'M':
 888                shost_printk(KERN_INFO, shost,
 889                             "event %d: Logical Drive %d %s\n",
 890                             ev->ev_seq, ev->lun, ev_msg);
 891                cs->needs_update = true;
 892                break;
 893        case 'S':
 894                if (sshdr.sense_key == NO_SENSE ||
 895                    (sshdr.sense_key == NOT_READY &&
 896                     sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
 897                                            sshdr.ascq == 0x02)))
 898                        break;
 899                shost_printk(KERN_INFO, shost,
 900                             "event %d: Physical Device %d:%d %s\n",
 901                             ev->ev_seq, ev->channel, ev->target, ev_msg);
 902                shost_printk(KERN_INFO, shost,
 903                             "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
 904                             ev->channel, ev->target,
 905                             sshdr.sense_key, sshdr.asc, sshdr.ascq);
 906                shost_printk(KERN_INFO, shost,
 907                             "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
 908                             ev->channel, ev->target,
 909                             sense_info[0], sense_info[1],
 910                             sense_info[2], sense_info[3],
 911                             cmd_specific[0], cmd_specific[1],
 912                             cmd_specific[2], cmd_specific[3]);
 913                break;
 914        case 'E':
 915                if (cs->disable_enc_msg)
 916                        break;
 917                sprintf(msg_buf, ev_msg, ev->lun);
 918                shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
 919                             ev->ev_seq, ev->target, msg_buf);
 920                break;
 921        case 'C':
 922                shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
 923                             ev->ev_seq, ev_msg);
 924                break;
 925        default:
 926                shost_printk(KERN_INFO, shost,
 927                             "event %d: Unknown Event Code %04X\n",
 928                             ev->ev_seq, ev->ev_code);
 929                break;
 930        }
 931}
 932
 933/*
 934 * SCSI sysfs interface functions
 935 */
 936static ssize_t raid_state_show(struct device *dev,
 937                struct device_attribute *attr, char *buf)
 938{
 939        struct scsi_device *sdev = to_scsi_device(dev);
 940        struct myrs_hba *cs = shost_priv(sdev->host);
 941        int ret;
 942
 943        if (!sdev->hostdata)
 944                return snprintf(buf, 16, "Unknown\n");
 945
 946        if (sdev->channel >= cs->ctlr_info->physchan_present) {
 947                struct myrs_ldev_info *ldev_info = sdev->hostdata;
 948                const char *name;
 949
 950                name = myrs_devstate_name(ldev_info->dev_state);
 951                if (name)
 952                        ret = snprintf(buf, 32, "%s\n", name);
 953                else
 954                        ret = snprintf(buf, 32, "Invalid (%02X)\n",
 955                                       ldev_info->dev_state);
 956        } else {
 957                struct myrs_pdev_info *pdev_info;
 958                const char *name;
 959
 960                pdev_info = sdev->hostdata;
 961                name = myrs_devstate_name(pdev_info->dev_state);
 962                if (name)
 963                        ret = snprintf(buf, 32, "%s\n", name);
 964                else
 965                        ret = snprintf(buf, 32, "Invalid (%02X)\n",
 966                                       pdev_info->dev_state);
 967        }
 968        return ret;
 969}
 970
 971static ssize_t raid_state_store(struct device *dev,
 972                struct device_attribute *attr, const char *buf, size_t count)
 973{
 974        struct scsi_device *sdev = to_scsi_device(dev);
 975        struct myrs_hba *cs = shost_priv(sdev->host);
 976        struct myrs_cmdblk *cmd_blk;
 977        union myrs_cmd_mbox *mbox;
 978        enum myrs_devstate new_state;
 979        unsigned short ldev_num;
 980        unsigned char status;
 981
 982        if (!strncmp(buf, "offline", 7) ||
 983            !strncmp(buf, "kill", 4))
 984                new_state = MYRS_DEVICE_OFFLINE;
 985        else if (!strncmp(buf, "online", 6))
 986                new_state = MYRS_DEVICE_ONLINE;
 987        else if (!strncmp(buf, "standby", 7))
 988                new_state = MYRS_DEVICE_STANDBY;
 989        else
 990                return -EINVAL;
 991
 992        if (sdev->channel < cs->ctlr_info->physchan_present) {
 993                struct myrs_pdev_info *pdev_info = sdev->hostdata;
 994                struct myrs_devmap *pdev_devmap =
 995                        (struct myrs_devmap *)&pdev_info->rsvd13;
 996
 997                if (pdev_info->dev_state == new_state) {
 998                        sdev_printk(KERN_INFO, sdev,
 999                                    "Device already in %s\n",
1000                                    myrs_devstate_name(new_state));
1001                        return count;
1002                }
1003                status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
1004                                             sdev->lun, pdev_devmap);
1005                if (status != MYRS_STATUS_SUCCESS)
1006                        return -ENXIO;
1007                ldev_num = pdev_devmap->ldev_num;
1008        } else {
1009                struct myrs_ldev_info *ldev_info = sdev->hostdata;
1010
1011                if (ldev_info->dev_state == new_state) {
1012                        sdev_printk(KERN_INFO, sdev,
1013                                    "Device already in %s\n",
1014                                    myrs_devstate_name(new_state));
1015                        return count;
1016                }
1017                ldev_num = ldev_info->ldev_num;
1018        }
1019        mutex_lock(&cs->dcmd_mutex);
1020        cmd_blk = &cs->dcmd_blk;
1021        myrs_reset_cmd(cmd_blk);
1022        mbox = &cmd_blk->mbox;
1023        mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1024        mbox->common.id = MYRS_DCMD_TAG;
1025        mbox->common.control.dma_ctrl_to_host = true;
1026        mbox->common.control.no_autosense = true;
1027        mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
1028        mbox->set_devstate.state = new_state;
1029        mbox->set_devstate.ldev.ldev_num = ldev_num;
1030        myrs_exec_cmd(cs, cmd_blk);
1031        status = cmd_blk->status;
1032        mutex_unlock(&cs->dcmd_mutex);
1033        if (status == MYRS_STATUS_SUCCESS) {
1034                if (sdev->channel < cs->ctlr_info->physchan_present) {
1035                        struct myrs_pdev_info *pdev_info = sdev->hostdata;
1036
1037                        pdev_info->dev_state = new_state;
1038                } else {
1039                        struct myrs_ldev_info *ldev_info = sdev->hostdata;
1040
1041                        ldev_info->dev_state = new_state;
1042                }
1043                sdev_printk(KERN_INFO, sdev,
1044                            "Set device state to %s\n",
1045                            myrs_devstate_name(new_state));
1046                return count;
1047        }
1048        sdev_printk(KERN_INFO, sdev,
1049                    "Failed to set device state to %s, status 0x%02x\n",
1050                    myrs_devstate_name(new_state), status);
1051        return -EINVAL;
1052}
1053static DEVICE_ATTR_RW(raid_state);
1054
1055static ssize_t raid_level_show(struct device *dev,
1056                struct device_attribute *attr, char *buf)
1057{
1058        struct scsi_device *sdev = to_scsi_device(dev);
1059        struct myrs_hba *cs = shost_priv(sdev->host);
1060        const char *name = NULL;
1061
1062        if (!sdev->hostdata)
1063                return snprintf(buf, 16, "Unknown\n");
1064
1065        if (sdev->channel >= cs->ctlr_info->physchan_present) {
1066                struct myrs_ldev_info *ldev_info;
1067
1068                ldev_info = sdev->hostdata;
1069                name = myrs_raid_level_name(ldev_info->raid_level);
1070                if (!name)
1071                        return snprintf(buf, 32, "Invalid (%02X)\n",
1072                                        ldev_info->dev_state);
1073
1074        } else
1075                name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
1076
1077        return snprintf(buf, 32, "%s\n", name);
1078}
1079static DEVICE_ATTR_RO(raid_level);
1080
1081static ssize_t rebuild_show(struct device *dev,
1082                struct device_attribute *attr, char *buf)
1083{
1084        struct scsi_device *sdev = to_scsi_device(dev);
1085        struct myrs_hba *cs = shost_priv(sdev->host);
1086        struct myrs_ldev_info *ldev_info;
1087        unsigned short ldev_num;
1088        unsigned char status;
1089
1090        if (sdev->channel < cs->ctlr_info->physchan_present)
1091                return snprintf(buf, 32, "physical device - not rebuilding\n");
1092
1093        ldev_info = sdev->hostdata;
1094        ldev_num = ldev_info->ldev_num;
1095        status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1096        if (status != MYRS_STATUS_SUCCESS) {
1097                sdev_printk(KERN_INFO, sdev,
1098                            "Failed to get device information, status 0x%02x\n",
1099                            status);
1100                return -EIO;
1101        }
1102        if (ldev_info->rbld_active) {
1103                return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
1104                                (size_t)ldev_info->rbld_lba,
1105                                (size_t)ldev_info->cfg_devsize);
1106        } else
1107                return snprintf(buf, 32, "not rebuilding\n");
1108}
1109
1110static ssize_t rebuild_store(struct device *dev,
1111                struct device_attribute *attr, const char *buf, size_t count)
1112{
1113        struct scsi_device *sdev = to_scsi_device(dev);
1114        struct myrs_hba *cs = shost_priv(sdev->host);
1115        struct myrs_ldev_info *ldev_info;
1116        struct myrs_cmdblk *cmd_blk;
1117        union myrs_cmd_mbox *mbox;
1118        unsigned short ldev_num;
1119        unsigned char status;
1120        int rebuild, ret;
1121
1122        if (sdev->channel < cs->ctlr_info->physchan_present)
1123                return -EINVAL;
1124
1125        ldev_info = sdev->hostdata;
1126        if (!ldev_info)
1127                return -ENXIO;
1128        ldev_num = ldev_info->ldev_num;
1129
1130        ret = kstrtoint(buf, 0, &rebuild);
1131        if (ret)
1132                return ret;
1133
1134        status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1135        if (status != MYRS_STATUS_SUCCESS) {
1136                sdev_printk(KERN_INFO, sdev,
1137                            "Failed to get device information, status 0x%02x\n",
1138                            status);
1139                return -EIO;
1140        }
1141
1142        if (rebuild && ldev_info->rbld_active) {
1143                sdev_printk(KERN_INFO, sdev,
1144                            "Rebuild Not Initiated; already in progress\n");
1145                return -EALREADY;
1146        }
1147        if (!rebuild && !ldev_info->rbld_active) {
1148                sdev_printk(KERN_INFO, sdev,
1149                            "Rebuild Not Cancelled; no rebuild in progress\n");
1150                return count;
1151        }
1152
1153        mutex_lock(&cs->dcmd_mutex);
1154        cmd_blk = &cs->dcmd_blk;
1155        myrs_reset_cmd(cmd_blk);
1156        mbox = &cmd_blk->mbox;
1157        mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1158        mbox->common.id = MYRS_DCMD_TAG;
1159        mbox->common.control.dma_ctrl_to_host = true;
1160        mbox->common.control.no_autosense = true;
1161        if (rebuild) {
1162                mbox->ldev_info.ldev.ldev_num = ldev_num;
1163                mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
1164        } else {
1165                mbox->ldev_info.ldev.ldev_num = ldev_num;
1166                mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
1167        }
1168        myrs_exec_cmd(cs, cmd_blk);
1169        status = cmd_blk->status;
1170        mutex_unlock(&cs->dcmd_mutex);
1171        if (status) {
1172                sdev_printk(KERN_INFO, sdev,
1173                            "Rebuild Not %s, status 0x%02x\n",
1174                            rebuild ? "Initiated" : "Cancelled", status);
1175                ret = -EIO;
1176        } else {
1177                sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1178                            rebuild ? "Initiated" : "Cancelled");
1179                ret = count;
1180        }
1181
1182        return ret;
1183}
1184static DEVICE_ATTR_RW(rebuild);
1185
1186static ssize_t consistency_check_show(struct device *dev,
1187                struct device_attribute *attr, char *buf)
1188{
1189        struct scsi_device *sdev = to_scsi_device(dev);
1190        struct myrs_hba *cs = shost_priv(sdev->host);
1191        struct myrs_ldev_info *ldev_info;
1192        unsigned short ldev_num;
1193
1194        if (sdev->channel < cs->ctlr_info->physchan_present)
1195                return snprintf(buf, 32, "physical device - not checking\n");
1196
1197        ldev_info = sdev->hostdata;
1198        if (!ldev_info)
1199                return -ENXIO;
1200        ldev_num = ldev_info->ldev_num;
1201        myrs_get_ldev_info(cs, ldev_num, ldev_info);
1202        if (ldev_info->cc_active)
1203                return snprintf(buf, 32, "checking block %zu of %zu\n",
1204                                (size_t)ldev_info->cc_lba,
1205                                (size_t)ldev_info->cfg_devsize);
1206        else
1207                return snprintf(buf, 32, "not checking\n");
1208}
1209
1210static ssize_t consistency_check_store(struct device *dev,
1211                struct device_attribute *attr, const char *buf, size_t count)
1212{
1213        struct scsi_device *sdev = to_scsi_device(dev);
1214        struct myrs_hba *cs = shost_priv(sdev->host);
1215        struct myrs_ldev_info *ldev_info;
1216        struct myrs_cmdblk *cmd_blk;
1217        union myrs_cmd_mbox *mbox;
1218        unsigned short ldev_num;
1219        unsigned char status;
1220        int check, ret;
1221
1222        if (sdev->channel < cs->ctlr_info->physchan_present)
1223                return -EINVAL;
1224
1225        ldev_info = sdev->hostdata;
1226        if (!ldev_info)
1227                return -ENXIO;
1228        ldev_num = ldev_info->ldev_num;
1229
1230        ret = kstrtoint(buf, 0, &check);
1231        if (ret)
1232                return ret;
1233
1234        status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1235        if (status != MYRS_STATUS_SUCCESS) {
1236                sdev_printk(KERN_INFO, sdev,
1237                            "Failed to get device information, status 0x%02x\n",
1238                            status);
1239                return -EIO;
1240        }
1241        if (check && ldev_info->cc_active) {
1242                sdev_printk(KERN_INFO, sdev,
1243                            "Consistency Check Not Initiated; "
1244                            "already in progress\n");
1245                return -EALREADY;
1246        }
1247        if (!check && !ldev_info->cc_active) {
1248                sdev_printk(KERN_INFO, sdev,
1249                            "Consistency Check Not Cancelled; "
1250                            "check not in progress\n");
1251                return count;
1252        }
1253
1254        mutex_lock(&cs->dcmd_mutex);
1255        cmd_blk = &cs->dcmd_blk;
1256        myrs_reset_cmd(cmd_blk);
1257        mbox = &cmd_blk->mbox;
1258        mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1259        mbox->common.id = MYRS_DCMD_TAG;
1260        mbox->common.control.dma_ctrl_to_host = true;
1261        mbox->common.control.no_autosense = true;
1262        if (check) {
1263                mbox->cc.ldev.ldev_num = ldev_num;
1264                mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
1265                mbox->cc.restore_consistency = true;
1266                mbox->cc.initialized_area_only = false;
1267        } else {
1268                mbox->cc.ldev.ldev_num = ldev_num;
1269                mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
1270        }
1271        myrs_exec_cmd(cs, cmd_blk);
1272        status = cmd_blk->status;
1273        mutex_unlock(&cs->dcmd_mutex);
1274        if (status != MYRS_STATUS_SUCCESS) {
1275                sdev_printk(KERN_INFO, sdev,
1276                            "Consistency Check Not %s, status 0x%02x\n",
1277                            check ? "Initiated" : "Cancelled", status);
1278                ret = -EIO;
1279        } else {
1280                sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
1281                            check ? "Initiated" : "Cancelled");
1282                ret = count;
1283        }
1284
1285        return ret;
1286}
1287static DEVICE_ATTR_RW(consistency_check);
1288
1289static struct device_attribute *myrs_sdev_attrs[] = {
1290        &dev_attr_consistency_check,
1291        &dev_attr_rebuild,
1292        &dev_attr_raid_state,
1293        &dev_attr_raid_level,
1294        NULL,
1295};
1296
1297static ssize_t serial_show(struct device *dev,
1298                struct device_attribute *attr, char *buf)
1299{
1300        struct Scsi_Host *shost = class_to_shost(dev);
1301        struct myrs_hba *cs = shost_priv(shost);
1302        char serial[17];
1303
1304        memcpy(serial, cs->ctlr_info->serial_number, 16);
1305        serial[16] = '\0';
1306        return snprintf(buf, 16, "%s\n", serial);
1307}
1308static DEVICE_ATTR_RO(serial);
1309
1310static ssize_t ctlr_num_show(struct device *dev,
1311                struct device_attribute *attr, char *buf)
1312{
1313        struct Scsi_Host *shost = class_to_shost(dev);
1314        struct myrs_hba *cs = shost_priv(shost);
1315
1316        return snprintf(buf, 20, "%d\n", cs->host->host_no);
1317}
1318static DEVICE_ATTR_RO(ctlr_num);
1319
1320static struct myrs_cpu_type_tbl {
1321        enum myrs_cpu_type type;
1322        char *name;
1323} myrs_cpu_type_names[] = {
1324        { MYRS_CPUTYPE_i960CA, "i960CA" },
1325        { MYRS_CPUTYPE_i960RD, "i960RD" },
1326        { MYRS_CPUTYPE_i960RN, "i960RN" },
1327        { MYRS_CPUTYPE_i960RP, "i960RP" },
1328        { MYRS_CPUTYPE_NorthBay, "NorthBay" },
1329        { MYRS_CPUTYPE_StrongArm, "StrongARM" },
1330        { MYRS_CPUTYPE_i960RM, "i960RM" },
1331};
1332
1333static ssize_t processor_show(struct device *dev,
1334                struct device_attribute *attr, char *buf)
1335{
1336        struct Scsi_Host *shost = class_to_shost(dev);
1337        struct myrs_hba *cs = shost_priv(shost);
1338        struct myrs_cpu_type_tbl *tbl;
1339        const char *first_processor = NULL;
1340        const char *second_processor = NULL;
1341        struct myrs_ctlr_info *info = cs->ctlr_info;
1342        ssize_t ret;
1343        int i;
1344
1345        if (info->cpu[0].cpu_count) {
1346                tbl = myrs_cpu_type_names;
1347                for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
1348                        if (tbl[i].type == info->cpu[0].cpu_type) {
1349                                first_processor = tbl[i].name;
1350                                break;
1351                        }
1352                }
1353        }
1354        if (info->cpu[1].cpu_count) {
1355                tbl = myrs_cpu_type_names;
1356                for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
1357                        if (tbl[i].type == info->cpu[1].cpu_type) {
1358                                second_processor = tbl[i].name;
1359                                break;
1360                        }
1361                }
1362        }
1363        if (first_processor && second_processor)
1364                ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
1365                               "2: %s (%s, %d cpus)\n",
1366                               info->cpu[0].cpu_name,
1367                               first_processor, info->cpu[0].cpu_count,
1368                               info->cpu[1].cpu_name,
1369                               second_processor, info->cpu[1].cpu_count);
1370        else if (first_processor && !second_processor)
1371                ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
1372                               info->cpu[0].cpu_name,
1373                               first_processor, info->cpu[0].cpu_count);
1374        else if (!first_processor && second_processor)
1375                ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
1376                               info->cpu[1].cpu_name,
1377                               second_processor, info->cpu[1].cpu_count);
1378        else
1379                ret = snprintf(buf, 64, "1: absent\n2: absent\n");
1380
1381        return ret;
1382}
1383static DEVICE_ATTR_RO(processor);
1384
1385static ssize_t model_show(struct device *dev,
1386                struct device_attribute *attr, char *buf)
1387{
1388        struct Scsi_Host *shost = class_to_shost(dev);
1389        struct myrs_hba *cs = shost_priv(shost);
1390
1391        return snprintf(buf, 28, "%s\n", cs->model_name);
1392}
1393static DEVICE_ATTR_RO(model);
1394
1395static ssize_t ctlr_type_show(struct device *dev,
1396                struct device_attribute *attr, char *buf)
1397{
1398        struct Scsi_Host *shost = class_to_shost(dev);
1399        struct myrs_hba *cs = shost_priv(shost);
1400
1401        return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
1402}
1403static DEVICE_ATTR_RO(ctlr_type);
1404
1405static ssize_t cache_size_show(struct device *dev,
1406                struct device_attribute *attr, char *buf)
1407{
1408        struct Scsi_Host *shost = class_to_shost(dev);
1409        struct myrs_hba *cs = shost_priv(shost);
1410
1411        return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
1412}
1413static DEVICE_ATTR_RO(cache_size);
1414
1415static ssize_t firmware_show(struct device *dev,
1416                struct device_attribute *attr, char *buf)
1417{
1418        struct Scsi_Host *shost = class_to_shost(dev);
1419        struct myrs_hba *cs = shost_priv(shost);
1420
1421        return snprintf(buf, 16, "%d.%02d-%02d\n",
1422                        cs->ctlr_info->fw_major_version,
1423                        cs->ctlr_info->fw_minor_version,
1424                        cs->ctlr_info->fw_turn_number);
1425}
1426static DEVICE_ATTR_RO(firmware);
1427
1428static ssize_t discovery_store(struct device *dev,
1429                struct device_attribute *attr, const char *buf, size_t count)
1430{
1431        struct Scsi_Host *shost = class_to_shost(dev);
1432        struct myrs_hba *cs = shost_priv(shost);
1433        struct myrs_cmdblk *cmd_blk;
1434        union myrs_cmd_mbox *mbox;
1435        unsigned char status;
1436
1437        mutex_lock(&cs->dcmd_mutex);
1438        cmd_blk = &cs->dcmd_blk;
1439        myrs_reset_cmd(cmd_blk);
1440        mbox = &cmd_blk->mbox;
1441        mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1442        mbox->common.id = MYRS_DCMD_TAG;
1443        mbox->common.control.dma_ctrl_to_host = true;
1444        mbox->common.control.no_autosense = true;
1445        mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
1446        myrs_exec_cmd(cs, cmd_blk);
1447        status = cmd_blk->status;
1448        mutex_unlock(&cs->dcmd_mutex);
1449        if (status != MYRS_STATUS_SUCCESS) {
1450                shost_printk(KERN_INFO, shost,
1451                             "Discovery Not Initiated, status %02X\n",
1452                             status);
1453                return -EINVAL;
1454        }
1455        shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
1456        cs->next_evseq = 0;
1457        cs->needs_update = true;
1458        queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
1459        flush_delayed_work(&cs->monitor_work);
1460        shost_printk(KERN_INFO, shost, "Discovery Completed\n");
1461
1462        return count;
1463}
1464static DEVICE_ATTR_WO(discovery);
1465
1466static ssize_t flush_cache_store(struct device *dev,
1467                struct device_attribute *attr, const char *buf, size_t count)
1468{
1469        struct Scsi_Host *shost = class_to_shost(dev);
1470        struct myrs_hba *cs = shost_priv(shost);
1471        unsigned char status;
1472
1473        status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
1474                             MYRS_RAID_CONTROLLER);
1475        if (status == MYRS_STATUS_SUCCESS) {
1476                shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
1477                return count;
1478        }
1479        shost_printk(KERN_INFO, shost,
1480                     "Cache Flush failed, status 0x%02x\n", status);
1481        return -EIO;
1482}
1483static DEVICE_ATTR_WO(flush_cache);
1484
1485static ssize_t disable_enclosure_messages_show(struct device *dev,
1486                struct device_attribute *attr, char *buf)
1487{
1488        struct Scsi_Host *shost = class_to_shost(dev);
1489        struct myrs_hba *cs = shost_priv(shost);
1490
1491        return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
1492}
1493
1494static ssize_t disable_enclosure_messages_store(struct device *dev,
1495                struct device_attribute *attr, const char *buf, size_t count)
1496{
1497        struct scsi_device *sdev = to_scsi_device(dev);
1498        struct myrs_hba *cs = shost_priv(sdev->host);
1499        int value, ret;
1500
1501        ret = kstrtoint(buf, 0, &value);
1502        if (ret)
1503                return ret;
1504
1505        if (value > 2)
1506                return -EINVAL;
1507
1508        cs->disable_enc_msg = value;
1509        return count;
1510}
1511static DEVICE_ATTR_RW(disable_enclosure_messages);
1512
1513static struct device_attribute *myrs_shost_attrs[] = {
1514        &dev_attr_serial,
1515        &dev_attr_ctlr_num,
1516        &dev_attr_processor,
1517        &dev_attr_model,
1518        &dev_attr_ctlr_type,
1519        &dev_attr_cache_size,
1520        &dev_attr_firmware,
1521        &dev_attr_discovery,
1522        &dev_attr_flush_cache,
1523        &dev_attr_disable_enclosure_messages,
1524        NULL,
1525};
1526
1527/*
1528 * SCSI midlayer interface
1529 */
1530static int myrs_host_reset(struct scsi_cmnd *scmd)
1531{
1532        struct Scsi_Host *shost = scmd->device->host;
1533        struct myrs_hba *cs = shost_priv(shost);
1534
1535        cs->reset(cs->io_base);
1536        return SUCCESS;
1537}
1538
1539static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
1540                struct myrs_ldev_info *ldev_info)
1541{
1542        unsigned char modes[32], *mode_pg;
1543        bool dbd;
1544        size_t mode_len;
1545
1546        dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1547        if (dbd) {
1548                mode_len = 24;
1549                mode_pg = &modes[4];
1550        } else {
1551                mode_len = 32;
1552                mode_pg = &modes[12];
1553        }
1554        memset(modes, 0, sizeof(modes));
1555        modes[0] = mode_len - 1;
1556        modes[2] = 0x10; /* Enable FUA */
1557        if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
1558                modes[2] |= 0x80;
1559        if (!dbd) {
1560                unsigned char *block_desc = &modes[4];
1561
1562                modes[3] = 8;
1563                put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
1564                put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
1565        }
1566        mode_pg[0] = 0x08;
1567        mode_pg[1] = 0x12;
1568        if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
1569                mode_pg[2] |= 0x01;
1570        if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
1571            ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
1572                mode_pg[2] |= 0x04;
1573        if (ldev_info->cacheline_size) {
1574                mode_pg[2] |= 0x08;
1575                put_unaligned_be16(1 << ldev_info->cacheline_size,
1576                                   &mode_pg[14]);
1577        }
1578
1579        scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1580}
1581
1582static int myrs_queuecommand(struct Scsi_Host *shost,
1583                struct scsi_cmnd *scmd)
1584{
1585        struct request *rq = scsi_cmd_to_rq(scmd);
1586        struct myrs_hba *cs = shost_priv(shost);
1587        struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1588        union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
1589        struct scsi_device *sdev = scmd->device;
1590        union myrs_sgl *hw_sge;
1591        dma_addr_t sense_addr;
1592        struct scatterlist *sgl;
1593        unsigned long flags, timeout;
1594        int nsge;
1595
1596        if (!scmd->device->hostdata) {
1597                scmd->result = (DID_NO_CONNECT << 16);
1598                scmd->scsi_done(scmd);
1599                return 0;
1600        }
1601
1602        switch (scmd->cmnd[0]) {
1603        case REPORT_LUNS:
1604                scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0x0);
1605                scmd->scsi_done(scmd);
1606                return 0;
1607        case MODE_SENSE:
1608                if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1609                        struct myrs_ldev_info *ldev_info = sdev->hostdata;
1610
1611                        if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1612                            (scmd->cmnd[2] & 0x3F) != 0x08) {
1613                                /* Illegal request, invalid field in CDB */
1614                                scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1615                        } else {
1616                                myrs_mode_sense(cs, scmd, ldev_info);
1617                                scmd->result = (DID_OK << 16);
1618                        }
1619                        scmd->scsi_done(scmd);
1620                        return 0;
1621                }
1622                break;
1623        }
1624
1625        myrs_reset_cmd(cmd_blk);
1626        cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
1627                                        &sense_addr);
1628        if (!cmd_blk->sense)
1629                return SCSI_MLQUEUE_HOST_BUSY;
1630        cmd_blk->sense_addr = sense_addr;
1631
1632        timeout = rq->timeout;
1633        if (scmd->cmd_len <= 10) {
1634                if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1635                        struct myrs_ldev_info *ldev_info = sdev->hostdata;
1636
1637                        mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
1638                        mbox->SCSI_10.pdev.lun = ldev_info->lun;
1639                        mbox->SCSI_10.pdev.target = ldev_info->target;
1640                        mbox->SCSI_10.pdev.channel = ldev_info->channel;
1641                        mbox->SCSI_10.pdev.ctlr = 0;
1642                } else {
1643                        mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
1644                        mbox->SCSI_10.pdev.lun = sdev->lun;
1645                        mbox->SCSI_10.pdev.target = sdev->id;
1646                        mbox->SCSI_10.pdev.channel = sdev->channel;
1647                }
1648                mbox->SCSI_10.id = rq->tag + 3;
1649                mbox->SCSI_10.control.dma_ctrl_to_host =
1650                        (scmd->sc_data_direction == DMA_FROM_DEVICE);
1651                if (rq->cmd_flags & REQ_FUA)
1652                        mbox->SCSI_10.control.fua = true;
1653                mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
1654                mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
1655                mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
1656                mbox->SCSI_10.cdb_len = scmd->cmd_len;
1657                if (timeout > 60) {
1658                        mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
1659                        mbox->SCSI_10.tmo.tmo_val = timeout / 60;
1660                } else {
1661                        mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
1662                        mbox->SCSI_10.tmo.tmo_val = timeout;
1663                }
1664                memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
1665                hw_sge = &mbox->SCSI_10.dma_addr;
1666                cmd_blk->dcdb = NULL;
1667        } else {
1668                dma_addr_t dcdb_dma;
1669
1670                cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
1671                                               &dcdb_dma);
1672                if (!cmd_blk->dcdb) {
1673                        dma_pool_free(cs->sense_pool, cmd_blk->sense,
1674                                      cmd_blk->sense_addr);
1675                        cmd_blk->sense = NULL;
1676                        cmd_blk->sense_addr = 0;
1677                        return SCSI_MLQUEUE_HOST_BUSY;
1678                }
1679                cmd_blk->dcdb_dma = dcdb_dma;
1680                if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1681                        struct myrs_ldev_info *ldev_info = sdev->hostdata;
1682
1683                        mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
1684                        mbox->SCSI_255.pdev.lun = ldev_info->lun;
1685                        mbox->SCSI_255.pdev.target = ldev_info->target;
1686                        mbox->SCSI_255.pdev.channel = ldev_info->channel;
1687                        mbox->SCSI_255.pdev.ctlr = 0;
1688                } else {
1689                        mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
1690                        mbox->SCSI_255.pdev.lun = sdev->lun;
1691                        mbox->SCSI_255.pdev.target = sdev->id;
1692                        mbox->SCSI_255.pdev.channel = sdev->channel;
1693                }
1694                mbox->SCSI_255.id = rq->tag + 3;
1695                mbox->SCSI_255.control.dma_ctrl_to_host =
1696                        (scmd->sc_data_direction == DMA_FROM_DEVICE);
1697                if (rq->cmd_flags & REQ_FUA)
1698                        mbox->SCSI_255.control.fua = true;
1699                mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
1700                mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
1701                mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
1702                mbox->SCSI_255.cdb_len = scmd->cmd_len;
1703                mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
1704                if (timeout > 60) {
1705                        mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
1706                        mbox->SCSI_255.tmo.tmo_val = timeout / 60;
1707                } else {
1708                        mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
1709                        mbox->SCSI_255.tmo.tmo_val = timeout;
1710                }
1711                memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
1712                hw_sge = &mbox->SCSI_255.dma_addr;
1713        }
1714        if (scmd->sc_data_direction == DMA_NONE)
1715                goto submit;
1716        nsge = scsi_dma_map(scmd);
1717        if (nsge == 1) {
1718                sgl = scsi_sglist(scmd);
1719                hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
1720                hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
1721        } else {
1722                struct myrs_sge *hw_sgl;
1723                dma_addr_t hw_sgl_addr;
1724                int i;
1725
1726                if (nsge > 2) {
1727                        hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
1728                                                &hw_sgl_addr);
1729                        if (WARN_ON(!hw_sgl)) {
1730                                if (cmd_blk->dcdb) {
1731                                        dma_pool_free(cs->dcdb_pool,
1732                                                      cmd_blk->dcdb,
1733                                                      cmd_blk->dcdb_dma);
1734                                        cmd_blk->dcdb = NULL;
1735                                        cmd_blk->dcdb_dma = 0;
1736                                }
1737                                dma_pool_free(cs->sense_pool,
1738                                              cmd_blk->sense,
1739                                              cmd_blk->sense_addr);
1740                                cmd_blk->sense = NULL;
1741                                cmd_blk->sense_addr = 0;
1742                                return SCSI_MLQUEUE_HOST_BUSY;
1743                        }
1744                        cmd_blk->sgl = hw_sgl;
1745                        cmd_blk->sgl_addr = hw_sgl_addr;
1746                        if (scmd->cmd_len <= 10)
1747                                mbox->SCSI_10.control.add_sge_mem = true;
1748                        else
1749                                mbox->SCSI_255.control.add_sge_mem = true;
1750                        hw_sge->ext.sge0_len = nsge;
1751                        hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
1752                } else
1753                        hw_sgl = hw_sge->sge;
1754
1755                scsi_for_each_sg(scmd, sgl, nsge, i) {
1756                        if (WARN_ON(!hw_sgl)) {
1757                                scsi_dma_unmap(scmd);
1758                                scmd->result = (DID_ERROR << 16);
1759                                scmd->scsi_done(scmd);
1760                                return 0;
1761                        }
1762                        hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
1763                        hw_sgl->sge_count = (u64)sg_dma_len(sgl);
1764                        hw_sgl++;
1765                }
1766        }
1767submit:
1768        spin_lock_irqsave(&cs->queue_lock, flags);
1769        myrs_qcmd(cs, cmd_blk);
1770        spin_unlock_irqrestore(&cs->queue_lock, flags);
1771
1772        return 0;
1773}
1774
1775static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
1776                struct scsi_device *sdev)
1777{
1778        unsigned short ldev_num;
1779        unsigned int chan_offset =
1780                sdev->channel - cs->ctlr_info->physchan_present;
1781
1782        ldev_num = sdev->id + chan_offset * sdev->host->max_id;
1783
1784        return ldev_num;
1785}
1786
1787static int myrs_slave_alloc(struct scsi_device *sdev)
1788{
1789        struct myrs_hba *cs = shost_priv(sdev->host);
1790        unsigned char status;
1791
1792        if (sdev->channel > sdev->host->max_channel)
1793                return 0;
1794
1795        if (sdev->channel >= cs->ctlr_info->physchan_present) {
1796                struct myrs_ldev_info *ldev_info;
1797                unsigned short ldev_num;
1798
1799                if (sdev->lun > 0)
1800                        return -ENXIO;
1801
1802                ldev_num = myrs_translate_ldev(cs, sdev);
1803
1804                ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
1805                if (!ldev_info)
1806                        return -ENOMEM;
1807
1808                status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1809                if (status != MYRS_STATUS_SUCCESS) {
1810                        sdev->hostdata = NULL;
1811                        kfree(ldev_info);
1812                } else {
1813                        enum raid_level level;
1814
1815                        dev_dbg(&sdev->sdev_gendev,
1816                                "Logical device mapping %d:%d:%d -> %d\n",
1817                                ldev_info->channel, ldev_info->target,
1818                                ldev_info->lun, ldev_info->ldev_num);
1819
1820                        sdev->hostdata = ldev_info;
1821                        switch (ldev_info->raid_level) {
1822                        case MYRS_RAID_LEVEL0:
1823                                level = RAID_LEVEL_LINEAR;
1824                                break;
1825                        case MYRS_RAID_LEVEL1:
1826                                level = RAID_LEVEL_1;
1827                                break;
1828                        case MYRS_RAID_LEVEL3:
1829                        case MYRS_RAID_LEVEL3F:
1830                        case MYRS_RAID_LEVEL3L:
1831                                level = RAID_LEVEL_3;
1832                                break;
1833                        case MYRS_RAID_LEVEL5:
1834                        case MYRS_RAID_LEVEL5L:
1835                                level = RAID_LEVEL_5;
1836                                break;
1837                        case MYRS_RAID_LEVEL6:
1838                                level = RAID_LEVEL_6;
1839                                break;
1840                        case MYRS_RAID_LEVELE:
1841                        case MYRS_RAID_NEWSPAN:
1842                        case MYRS_RAID_SPAN:
1843                                level = RAID_LEVEL_LINEAR;
1844                                break;
1845                        case MYRS_RAID_JBOD:
1846                                level = RAID_LEVEL_JBOD;
1847                                break;
1848                        default:
1849                                level = RAID_LEVEL_UNKNOWN;
1850                                break;
1851                        }
1852                        raid_set_level(myrs_raid_template,
1853                                       &sdev->sdev_gendev, level);
1854                        if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
1855                                const char *name;
1856
1857                                name = myrs_devstate_name(ldev_info->dev_state);
1858                                sdev_printk(KERN_DEBUG, sdev,
1859                                            "logical device in state %s\n",
1860                                            name ? name : "Invalid");
1861                        }
1862                }
1863        } else {
1864                struct myrs_pdev_info *pdev_info;
1865
1866                pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1867                if (!pdev_info)
1868                        return -ENOMEM;
1869
1870                status = myrs_get_pdev_info(cs, sdev->channel,
1871                                            sdev->id, sdev->lun,
1872                                            pdev_info);
1873                if (status != MYRS_STATUS_SUCCESS) {
1874                        sdev->hostdata = NULL;
1875                        kfree(pdev_info);
1876                        return -ENXIO;
1877                }
1878                sdev->hostdata = pdev_info;
1879        }
1880        return 0;
1881}
1882
1883static int myrs_slave_configure(struct scsi_device *sdev)
1884{
1885        struct myrs_hba *cs = shost_priv(sdev->host);
1886        struct myrs_ldev_info *ldev_info;
1887
1888        if (sdev->channel > sdev->host->max_channel)
1889                return -ENXIO;
1890
1891        if (sdev->channel < cs->ctlr_info->physchan_present) {
1892                /* Skip HBA device */
1893                if (sdev->type == TYPE_RAID)
1894                        return -ENXIO;
1895                sdev->no_uld_attach = 1;
1896                return 0;
1897        }
1898        if (sdev->lun != 0)
1899                return -ENXIO;
1900
1901        ldev_info = sdev->hostdata;
1902        if (!ldev_info)
1903                return -ENXIO;
1904        if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
1905            ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
1906                sdev->wce_default_on = 1;
1907        sdev->tagged_supported = 1;
1908        return 0;
1909}
1910
1911static void myrs_slave_destroy(struct scsi_device *sdev)
1912{
1913        kfree(sdev->hostdata);
1914}
1915
1916static struct scsi_host_template myrs_template = {
1917        .module                 = THIS_MODULE,
1918        .name                   = "DAC960",
1919        .proc_name              = "myrs",
1920        .queuecommand           = myrs_queuecommand,
1921        .eh_host_reset_handler  = myrs_host_reset,
1922        .slave_alloc            = myrs_slave_alloc,
1923        .slave_configure        = myrs_slave_configure,
1924        .slave_destroy          = myrs_slave_destroy,
1925        .cmd_size               = sizeof(struct myrs_cmdblk),
1926        .shost_attrs            = myrs_shost_attrs,
1927        .sdev_attrs             = myrs_sdev_attrs,
1928        .this_id                = -1,
1929};
1930
1931static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
1932                const struct pci_device_id *entry)
1933{
1934        struct Scsi_Host *shost;
1935        struct myrs_hba *cs;
1936
1937        shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
1938        if (!shost)
1939                return NULL;
1940
1941        shost->max_cmd_len = 16;
1942        shost->max_lun = 256;
1943        cs = shost_priv(shost);
1944        mutex_init(&cs->dcmd_mutex);
1945        mutex_init(&cs->cinfo_mutex);
1946        cs->host = shost;
1947
1948        return cs;
1949}
1950
1951/*
1952 * RAID template functions
1953 */
1954
1955/**
1956 * myrs_is_raid - return boolean indicating device is raid volume
1957 * @dev: the device struct object
1958 */
1959static int
1960myrs_is_raid(struct device *dev)
1961{
1962        struct scsi_device *sdev = to_scsi_device(dev);
1963        struct myrs_hba *cs = shost_priv(sdev->host);
1964
1965        return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
1966}
1967
1968/**
1969 * myrs_get_resync - get raid volume resync percent complete
1970 * @dev: the device struct object
1971 */
1972static void
1973myrs_get_resync(struct device *dev)
1974{
1975        struct scsi_device *sdev = to_scsi_device(dev);
1976        struct myrs_hba *cs = shost_priv(sdev->host);
1977        struct myrs_ldev_info *ldev_info = sdev->hostdata;
1978        u64 percent_complete = 0;
1979
1980        if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
1981                return;
1982        if (ldev_info->rbld_active) {
1983                unsigned short ldev_num = ldev_info->ldev_num;
1984
1985                myrs_get_ldev_info(cs, ldev_num, ldev_info);
1986                percent_complete = ldev_info->rbld_lba * 100;
1987                do_div(percent_complete, ldev_info->cfg_devsize);
1988        }
1989        raid_set_resync(myrs_raid_template, dev, percent_complete);
1990}
1991
1992/**
1993 * myrs_get_state - get raid volume status
1994 * @dev: the device struct object
1995 */
1996static void
1997myrs_get_state(struct device *dev)
1998{
1999        struct scsi_device *sdev = to_scsi_device(dev);
2000        struct myrs_hba *cs = shost_priv(sdev->host);
2001        struct myrs_ldev_info *ldev_info = sdev->hostdata;
2002        enum raid_state state = RAID_STATE_UNKNOWN;
2003
2004        if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
2005                state = RAID_STATE_UNKNOWN;
2006        else {
2007                switch (ldev_info->dev_state) {
2008                case MYRS_DEVICE_ONLINE:
2009                        state = RAID_STATE_ACTIVE;
2010                        break;
2011                case MYRS_DEVICE_SUSPECTED_CRITICAL:
2012                case MYRS_DEVICE_CRITICAL:
2013                        state = RAID_STATE_DEGRADED;
2014                        break;
2015                case MYRS_DEVICE_REBUILD:
2016                        state = RAID_STATE_RESYNCING;
2017                        break;
2018                case MYRS_DEVICE_UNCONFIGURED:
2019                case MYRS_DEVICE_INVALID_STATE:
2020                        state = RAID_STATE_UNKNOWN;
2021                        break;
2022                default:
2023                        state = RAID_STATE_OFFLINE;
2024                }
2025        }
2026        raid_set_state(myrs_raid_template, dev, state);
2027}
2028
2029static struct raid_function_template myrs_raid_functions = {
2030        .cookie         = &myrs_template,
2031        .is_raid        = myrs_is_raid,
2032        .get_resync     = myrs_get_resync,
2033        .get_state      = myrs_get_state,
2034};
2035
2036/*
2037 * PCI interface functions
2038 */
2039static void myrs_flush_cache(struct myrs_hba *cs)
2040{
2041        myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
2042}
2043
2044static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
2045                struct scsi_cmnd *scmd)
2046{
2047        unsigned char status;
2048
2049        if (!cmd_blk)
2050                return;
2051
2052        scsi_dma_unmap(scmd);
2053        status = cmd_blk->status;
2054        if (cmd_blk->sense) {
2055                if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
2056                        unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
2057
2058                        if (sense_len > cmd_blk->sense_len)
2059                                sense_len = cmd_blk->sense_len;
2060                        memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
2061                }
2062                dma_pool_free(cs->sense_pool, cmd_blk->sense,
2063                              cmd_blk->sense_addr);
2064                cmd_blk->sense = NULL;
2065                cmd_blk->sense_addr = 0;
2066        }
2067        if (cmd_blk->dcdb) {
2068                dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
2069                              cmd_blk->dcdb_dma);
2070                cmd_blk->dcdb = NULL;
2071                cmd_blk->dcdb_dma = 0;
2072        }
2073        if (cmd_blk->sgl) {
2074                dma_pool_free(cs->sg_pool, cmd_blk->sgl,
2075                              cmd_blk->sgl_addr);
2076                cmd_blk->sgl = NULL;
2077                cmd_blk->sgl_addr = 0;
2078        }
2079        if (cmd_blk->residual)
2080                scsi_set_resid(scmd, cmd_blk->residual);
2081        if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
2082            status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
2083                scmd->result = (DID_BAD_TARGET << 16);
2084        else
2085                scmd->result = (DID_OK << 16) | status;
2086        scmd->scsi_done(scmd);
2087}
2088
2089static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
2090{
2091        if (!cmd_blk)
2092                return;
2093
2094        if (cmd_blk->complete) {
2095                complete(cmd_blk->complete);
2096                cmd_blk->complete = NULL;
2097        }
2098}
2099
2100static void myrs_monitor(struct work_struct *work)
2101{
2102        struct myrs_hba *cs = container_of(work, struct myrs_hba,
2103                                           monitor_work.work);
2104        struct Scsi_Host *shost = cs->host;
2105        struct myrs_ctlr_info *info = cs->ctlr_info;
2106        unsigned int epoch = cs->fwstat_buf->epoch;
2107        unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
2108        unsigned char status;
2109
2110        dev_dbg(&shost->shost_gendev, "monitor tick\n");
2111
2112        status = myrs_get_fwstatus(cs);
2113
2114        if (cs->needs_update) {
2115                cs->needs_update = false;
2116                mutex_lock(&cs->cinfo_mutex);
2117                status = myrs_get_ctlr_info(cs);
2118                mutex_unlock(&cs->cinfo_mutex);
2119        }
2120        if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
2121                status = myrs_get_event(cs, cs->next_evseq,
2122                                        cs->event_buf);
2123                if (status == MYRS_STATUS_SUCCESS) {
2124                        myrs_log_event(cs, cs->event_buf);
2125                        cs->next_evseq++;
2126                        interval = 1;
2127                }
2128        }
2129
2130        if (time_after(jiffies, cs->secondary_monitor_time
2131                       + MYRS_SECONDARY_MONITOR_INTERVAL))
2132                cs->secondary_monitor_time = jiffies;
2133
2134        if (info->bg_init_active +
2135            info->ldev_init_active +
2136            info->pdev_init_active +
2137            info->cc_active +
2138            info->rbld_active +
2139            info->exp_active != 0) {
2140                struct scsi_device *sdev;
2141
2142                shost_for_each_device(sdev, shost) {
2143                        struct myrs_ldev_info *ldev_info;
2144                        int ldev_num;
2145
2146                        if (sdev->channel < info->physchan_present)
2147                                continue;
2148                        ldev_info = sdev->hostdata;
2149                        if (!ldev_info)
2150                                continue;
2151                        ldev_num = ldev_info->ldev_num;
2152                        myrs_get_ldev_info(cs, ldev_num, ldev_info);
2153                }
2154                cs->needs_update = true;
2155        }
2156        if (epoch == cs->epoch &&
2157            cs->fwstat_buf->next_evseq == cs->next_evseq &&
2158            (cs->needs_update == false ||
2159             time_before(jiffies, cs->primary_monitor_time
2160                         + MYRS_PRIMARY_MONITOR_INTERVAL))) {
2161                interval = MYRS_SECONDARY_MONITOR_INTERVAL;
2162        }
2163
2164        if (interval > 1)
2165                cs->primary_monitor_time = jiffies;
2166        queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
2167}
2168
2169static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
2170{
2171        struct Scsi_Host *shost = cs->host;
2172        size_t elem_size, elem_align;
2173
2174        elem_align = sizeof(struct myrs_sge);
2175        elem_size = shost->sg_tablesize * elem_align;
2176        cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
2177                                      elem_size, elem_align, 0);
2178        if (cs->sg_pool == NULL) {
2179                shost_printk(KERN_ERR, shost,
2180                             "Failed to allocate SG pool\n");
2181                return false;
2182        }
2183
2184        cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
2185                                         MYRS_SENSE_SIZE, sizeof(int), 0);
2186        if (cs->sense_pool == NULL) {
2187                dma_pool_destroy(cs->sg_pool);
2188                cs->sg_pool = NULL;
2189                shost_printk(KERN_ERR, shost,
2190                             "Failed to allocate sense data pool\n");
2191                return false;
2192        }
2193
2194        cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
2195                                        MYRS_DCDB_SIZE,
2196                                        sizeof(unsigned char), 0);
2197        if (!cs->dcdb_pool) {
2198                dma_pool_destroy(cs->sg_pool);
2199                cs->sg_pool = NULL;
2200                dma_pool_destroy(cs->sense_pool);
2201                cs->sense_pool = NULL;
2202                shost_printk(KERN_ERR, shost,
2203                             "Failed to allocate DCDB pool\n");
2204                return false;
2205        }
2206
2207        snprintf(cs->work_q_name, sizeof(cs->work_q_name),
2208                 "myrs_wq_%d", shost->host_no);
2209        cs->work_q = create_singlethread_workqueue(cs->work_q_name);
2210        if (!cs->work_q) {
2211                dma_pool_destroy(cs->dcdb_pool);
2212                cs->dcdb_pool = NULL;
2213                dma_pool_destroy(cs->sg_pool);
2214                cs->sg_pool = NULL;
2215                dma_pool_destroy(cs->sense_pool);
2216                cs->sense_pool = NULL;
2217                shost_printk(KERN_ERR, shost,
2218                             "Failed to create workqueue\n");
2219                return false;
2220        }
2221
2222        /* Initialize the Monitoring Timer. */
2223        INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
2224        queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
2225
2226        return true;
2227}
2228
2229static void myrs_destroy_mempools(struct myrs_hba *cs)
2230{
2231        cancel_delayed_work_sync(&cs->monitor_work);
2232        destroy_workqueue(cs->work_q);
2233
2234        dma_pool_destroy(cs->sg_pool);
2235        dma_pool_destroy(cs->dcdb_pool);
2236        dma_pool_destroy(cs->sense_pool);
2237}
2238
2239static void myrs_unmap(struct myrs_hba *cs)
2240{
2241        kfree(cs->event_buf);
2242        kfree(cs->ctlr_info);
2243        if (cs->fwstat_buf) {
2244                dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
2245                                  cs->fwstat_buf, cs->fwstat_addr);
2246                cs->fwstat_buf = NULL;
2247        }
2248        if (cs->first_stat_mbox) {
2249                dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
2250                                  cs->first_stat_mbox, cs->stat_mbox_addr);
2251                cs->first_stat_mbox = NULL;
2252        }
2253        if (cs->first_cmd_mbox) {
2254                dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
2255                                  cs->first_cmd_mbox, cs->cmd_mbox_addr);
2256                cs->first_cmd_mbox = NULL;
2257        }
2258}
2259
2260static void myrs_cleanup(struct myrs_hba *cs)
2261{
2262        struct pci_dev *pdev = cs->pdev;
2263
2264        /* Free the memory mailbox, status, and related structures */
2265        myrs_unmap(cs);
2266
2267        if (cs->mmio_base) {
2268                cs->disable_intr(cs);
2269                iounmap(cs->mmio_base);
2270                cs->mmio_base = NULL;
2271        }
2272        if (cs->irq)
2273                free_irq(cs->irq, cs);
2274        if (cs->io_addr)
2275                release_region(cs->io_addr, 0x80);
2276        pci_set_drvdata(pdev, NULL);
2277        pci_disable_device(pdev);
2278        scsi_host_put(cs->host);
2279}
2280
2281static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
2282                const struct pci_device_id *entry)
2283{
2284        struct myrs_privdata *privdata =
2285                (struct myrs_privdata *)entry->driver_data;
2286        irq_handler_t irq_handler = privdata->irq_handler;
2287        unsigned int mmio_size = privdata->mmio_size;
2288        struct myrs_hba *cs = NULL;
2289
2290        cs = myrs_alloc_host(pdev, entry);
2291        if (!cs) {
2292                dev_err(&pdev->dev, "Unable to allocate Controller\n");
2293                return NULL;
2294        }
2295        cs->pdev = pdev;
2296
2297        if (pci_enable_device(pdev))
2298                goto Failure;
2299
2300        cs->pci_addr = pci_resource_start(pdev, 0);
2301
2302        pci_set_drvdata(pdev, cs);
2303        spin_lock_init(&cs->queue_lock);
2304        /* Map the Controller Register Window. */
2305        if (mmio_size < PAGE_SIZE)
2306                mmio_size = PAGE_SIZE;
2307        cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size);
2308        if (cs->mmio_base == NULL) {
2309                dev_err(&pdev->dev,
2310                        "Unable to map Controller Register Window\n");
2311                goto Failure;
2312        }
2313
2314        cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
2315        if (privdata->hw_init(pdev, cs, cs->io_base))
2316                goto Failure;
2317
2318        /* Acquire shared access to the IRQ Channel. */
2319        if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
2320                dev_err(&pdev->dev,
2321                        "Unable to acquire IRQ Channel %d\n", pdev->irq);
2322                goto Failure;
2323        }
2324        cs->irq = pdev->irq;
2325        return cs;
2326
2327Failure:
2328        dev_err(&pdev->dev,
2329                "Failed to initialize Controller\n");
2330        myrs_cleanup(cs);
2331        return NULL;
2332}
2333
2334/*
2335 * myrs_err_status reports Controller BIOS Messages passed through
2336 * the Error Status Register when the driver performs the BIOS handshaking.
2337 * It returns true for fatal errors and false otherwise.
2338 */
2339
2340static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
2341                unsigned char parm0, unsigned char parm1)
2342{
2343        struct pci_dev *pdev = cs->pdev;
2344
2345        switch (status) {
2346        case 0x00:
2347                dev_info(&pdev->dev,
2348                         "Physical Device %d:%d Not Responding\n",
2349                         parm1, parm0);
2350                break;
2351        case 0x08:
2352                dev_notice(&pdev->dev, "Spinning Up Drives\n");
2353                break;
2354        case 0x30:
2355                dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2356                break;
2357        case 0x60:
2358                dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2359                break;
2360        case 0x70:
2361                dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2362                break;
2363        case 0x90:
2364                dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2365                           parm1, parm0);
2366                break;
2367        case 0xA0:
2368                dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2369                break;
2370        case 0xB0:
2371                dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2372                break;
2373        case 0xD0:
2374                dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2375                break;
2376        case 0xF0:
2377                dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2378                return true;
2379        default:
2380                dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2381                        status);
2382                return true;
2383        }
2384        return false;
2385}
2386
2387/*
2388 * Hardware-specific functions
2389 */
2390
2391/*
2392 * DAC960 GEM Series Controllers.
2393 */
2394
2395static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
2396{
2397        __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
2398
2399        writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2400}
2401
2402static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
2403{
2404        __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
2405
2406        writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
2407}
2408
2409static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
2410{
2411        __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
2412
2413        writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2414}
2415
2416static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
2417{
2418        __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
2419
2420        writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2421}
2422
2423static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
2424{
2425        __le32 val;
2426
2427        val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
2428        return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
2429}
2430
2431static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
2432{
2433        __le32 val;
2434
2435        val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
2436        return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
2437}
2438
2439static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
2440{
2441        __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
2442
2443        writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
2444}
2445
2446static inline void DAC960_GEM_ack_intr(void __iomem *base)
2447{
2448        __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
2449                                  DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
2450
2451        writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
2452}
2453
2454static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
2455{
2456        __le32 val;
2457
2458        val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
2459        return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
2460}
2461
2462static inline void DAC960_GEM_enable_intr(void __iomem *base)
2463{
2464        __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
2465                                  DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
2466        writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
2467}
2468
2469static inline void DAC960_GEM_disable_intr(void __iomem *base)
2470{
2471        __le32 val = 0;
2472
2473        writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
2474}
2475
2476static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
2477                union myrs_cmd_mbox *mbox)
2478{
2479        memcpy(&mem_mbox->words[1], &mbox->words[1],
2480               sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
2481        /* Barrier to avoid reordering */
2482        wmb();
2483        mem_mbox->words[0] = mbox->words[0];
2484        /* Barrier to force PCI access */
2485        mb();
2486}
2487
2488static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
2489                dma_addr_t cmd_mbox_addr)
2490{
2491        dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
2492}
2493
2494static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
2495{
2496        return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
2497}
2498
2499static inline bool
2500DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
2501                unsigned char *param0, unsigned char *param1)
2502{
2503        __le32 val;
2504
2505        val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
2506        if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
2507                return false;
2508        *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
2509        *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
2510        *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
2511        writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
2512        return true;
2513}
2514
2515static inline unsigned char
2516DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
2517{
2518        unsigned char status;
2519
2520        while (DAC960_GEM_hw_mbox_is_full(base))
2521                udelay(1);
2522        DAC960_GEM_write_hw_mbox(base, mbox_addr);
2523        DAC960_GEM_hw_mbox_new_cmd(base);
2524        while (!DAC960_GEM_hw_mbox_status_available(base))
2525                udelay(1);
2526        status = DAC960_GEM_read_cmd_status(base);
2527        DAC960_GEM_ack_hw_mbox_intr(base);
2528        DAC960_GEM_ack_hw_mbox_status(base);
2529
2530        return status;
2531}
2532
2533static int DAC960_GEM_hw_init(struct pci_dev *pdev,
2534                struct myrs_hba *cs, void __iomem *base)
2535{
2536        int timeout = 0;
2537        unsigned char status, parm0, parm1;
2538
2539        DAC960_GEM_disable_intr(base);
2540        DAC960_GEM_ack_hw_mbox_status(base);
2541        udelay(1000);
2542        while (DAC960_GEM_init_in_progress(base) &&
2543               timeout < MYRS_MAILBOX_TIMEOUT) {
2544                if (DAC960_GEM_read_error_status(base, &status,
2545                                                 &parm0, &parm1) &&
2546                    myrs_err_status(cs, status, parm0, parm1))
2547                        return -EIO;
2548                udelay(10);
2549                timeout++;
2550        }
2551        if (timeout == MYRS_MAILBOX_TIMEOUT) {
2552                dev_err(&pdev->dev,
2553                        "Timeout waiting for Controller Initialisation\n");
2554                return -ETIMEDOUT;
2555        }
2556        if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
2557                dev_err(&pdev->dev,
2558                        "Unable to Enable Memory Mailbox Interface\n");
2559                DAC960_GEM_reset_ctrl(base);
2560                return -EAGAIN;
2561        }
2562        DAC960_GEM_enable_intr(base);
2563        cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
2564        cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
2565        cs->disable_intr = DAC960_GEM_disable_intr;
2566        cs->reset = DAC960_GEM_reset_ctrl;
2567        return 0;
2568}
2569
2570static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
2571{
2572        struct myrs_hba *cs = arg;
2573        void __iomem *base = cs->io_base;
2574        struct myrs_stat_mbox *next_stat_mbox;
2575        unsigned long flags;
2576
2577        spin_lock_irqsave(&cs->queue_lock, flags);
2578        DAC960_GEM_ack_intr(base);
2579        next_stat_mbox = cs->next_stat_mbox;
2580        while (next_stat_mbox->id > 0) {
2581                unsigned short id = next_stat_mbox->id;
2582                struct scsi_cmnd *scmd = NULL;
2583                struct myrs_cmdblk *cmd_blk = NULL;
2584
2585                if (id == MYRS_DCMD_TAG)
2586                        cmd_blk = &cs->dcmd_blk;
2587                else if (id == MYRS_MCMD_TAG)
2588                        cmd_blk = &cs->mcmd_blk;
2589                else {
2590                        scmd = scsi_host_find_tag(cs->host, id - 3);
2591                        if (scmd)
2592                                cmd_blk = scsi_cmd_priv(scmd);
2593                }
2594                if (cmd_blk) {
2595                        cmd_blk->status = next_stat_mbox->status;
2596                        cmd_blk->sense_len = next_stat_mbox->sense_len;
2597                        cmd_blk->residual = next_stat_mbox->residual;
2598                } else
2599                        dev_err(&cs->pdev->dev,
2600                                "Unhandled command completion %d\n", id);
2601
2602                memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
2603                if (++next_stat_mbox > cs->last_stat_mbox)
2604                        next_stat_mbox = cs->first_stat_mbox;
2605
2606                if (cmd_blk) {
2607                        if (id < 3)
2608                                myrs_handle_cmdblk(cs, cmd_blk);
2609                        else
2610                                myrs_handle_scsi(cs, cmd_blk, scmd);
2611                }
2612        }
2613        cs->next_stat_mbox = next_stat_mbox;
2614        spin_unlock_irqrestore(&cs->queue_lock, flags);
2615        return IRQ_HANDLED;
2616}
2617
2618static struct myrs_privdata DAC960_GEM_privdata = {
2619        .hw_init =              DAC960_GEM_hw_init,
2620        .irq_handler =          DAC960_GEM_intr_handler,
2621        .mmio_size =            DAC960_GEM_mmio_size,
2622};
2623
2624/*
2625 * DAC960 BA Series Controllers.
2626 */
2627
2628static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
2629{
2630        writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
2631}
2632
2633static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
2634{
2635        writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
2636}
2637
2638static inline void DAC960_BA_reset_ctrl(void __iomem *base)
2639{
2640        writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
2641}
2642
2643static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
2644{
2645        writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
2646}
2647
2648static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
2649{
2650        u8 val;
2651
2652        val = readb(base + DAC960_BA_IDB_OFFSET);
2653        return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
2654}
2655
2656static inline bool DAC960_BA_init_in_progress(void __iomem *base)
2657{
2658        u8 val;
2659
2660        val = readb(base + DAC960_BA_IDB_OFFSET);
2661        return !(val & DAC960_BA_IDB_INIT_DONE);
2662}
2663
2664static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
2665{
2666        writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
2667}
2668
2669static inline void DAC960_BA_ack_intr(void __iomem *base)
2670{
2671        writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
2672               base + DAC960_BA_ODB_OFFSET);
2673}
2674
2675static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
2676{
2677        u8 val;
2678
2679        val = readb(base + DAC960_BA_ODB_OFFSET);
2680        return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
2681}
2682
2683static inline void DAC960_BA_enable_intr(void __iomem *base)
2684{
2685        writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
2686}
2687
2688static inline void DAC960_BA_disable_intr(void __iomem *base)
2689{
2690        writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
2691}
2692
2693static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
2694                union myrs_cmd_mbox *mbox)
2695{
2696        memcpy(&mem_mbox->words[1], &mbox->words[1],
2697               sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
2698        /* Barrier to avoid reordering */
2699        wmb();
2700        mem_mbox->words[0] = mbox->words[0];
2701        /* Barrier to force PCI access */
2702        mb();
2703}
2704
2705
2706static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
2707                dma_addr_t cmd_mbox_addr)
2708{
2709        dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
2710}
2711
2712static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
2713{
2714        return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
2715}
2716
2717static inline bool
2718DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
2719                unsigned char *param0, unsigned char *param1)
2720{
2721        u8 val;
2722
2723        val = readb(base + DAC960_BA_ERRSTS_OFFSET);
2724        if (!(val & DAC960_BA_ERRSTS_PENDING))
2725                return false;
2726        val &= ~DAC960_BA_ERRSTS_PENDING;
2727        *error = val;
2728        *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
2729        *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
2730        writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
2731        return true;
2732}
2733
2734static inline unsigned char
2735DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
2736{
2737        unsigned char status;
2738
2739        while (DAC960_BA_hw_mbox_is_full(base))
2740                udelay(1);
2741        DAC960_BA_write_hw_mbox(base, mbox_addr);
2742        DAC960_BA_hw_mbox_new_cmd(base);
2743        while (!DAC960_BA_hw_mbox_status_available(base))
2744                udelay(1);
2745        status = DAC960_BA_read_cmd_status(base);
2746        DAC960_BA_ack_hw_mbox_intr(base);
2747        DAC960_BA_ack_hw_mbox_status(base);
2748
2749        return status;
2750}
2751
2752static int DAC960_BA_hw_init(struct pci_dev *pdev,
2753                struct myrs_hba *cs, void __iomem *base)
2754{
2755        int timeout = 0;
2756        unsigned char status, parm0, parm1;
2757
2758        DAC960_BA_disable_intr(base);
2759        DAC960_BA_ack_hw_mbox_status(base);
2760        udelay(1000);
2761        while (DAC960_BA_init_in_progress(base) &&
2762               timeout < MYRS_MAILBOX_TIMEOUT) {
2763                if (DAC960_BA_read_error_status(base, &status,
2764                                              &parm0, &parm1) &&
2765                    myrs_err_status(cs, status, parm0, parm1))
2766                        return -EIO;
2767                udelay(10);
2768                timeout++;
2769        }
2770        if (timeout == MYRS_MAILBOX_TIMEOUT) {
2771                dev_err(&pdev->dev,
2772                        "Timeout waiting for Controller Initialisation\n");
2773                return -ETIMEDOUT;
2774        }
2775        if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
2776                dev_err(&pdev->dev,
2777                        "Unable to Enable Memory Mailbox Interface\n");
2778                DAC960_BA_reset_ctrl(base);
2779                return -EAGAIN;
2780        }
2781        DAC960_BA_enable_intr(base);
2782        cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
2783        cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
2784        cs->disable_intr = DAC960_BA_disable_intr;
2785        cs->reset = DAC960_BA_reset_ctrl;
2786        return 0;
2787}
2788
2789static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
2790{
2791        struct myrs_hba *cs = arg;
2792        void __iomem *base = cs->io_base;
2793        struct myrs_stat_mbox *next_stat_mbox;
2794        unsigned long flags;
2795
2796        spin_lock_irqsave(&cs->queue_lock, flags);
2797        DAC960_BA_ack_intr(base);
2798        next_stat_mbox = cs->next_stat_mbox;
2799        while (next_stat_mbox->id > 0) {
2800                unsigned short id = next_stat_mbox->id;
2801                struct scsi_cmnd *scmd = NULL;
2802                struct myrs_cmdblk *cmd_blk = NULL;
2803
2804                if (id == MYRS_DCMD_TAG)
2805                        cmd_blk = &cs->dcmd_blk;
2806                else if (id == MYRS_MCMD_TAG)
2807                        cmd_blk = &cs->mcmd_blk;
2808                else {
2809                        scmd = scsi_host_find_tag(cs->host, id - 3);
2810                        if (scmd)
2811                                cmd_blk = scsi_cmd_priv(scmd);
2812                }
2813                if (cmd_blk) {
2814                        cmd_blk->status = next_stat_mbox->status;
2815                        cmd_blk->sense_len = next_stat_mbox->sense_len;
2816                        cmd_blk->residual = next_stat_mbox->residual;
2817                } else
2818                        dev_err(&cs->pdev->dev,
2819                                "Unhandled command completion %d\n", id);
2820
2821                memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
2822                if (++next_stat_mbox > cs->last_stat_mbox)
2823                        next_stat_mbox = cs->first_stat_mbox;
2824
2825                if (cmd_blk) {
2826                        if (id < 3)
2827                                myrs_handle_cmdblk(cs, cmd_blk);
2828                        else
2829                                myrs_handle_scsi(cs, cmd_blk, scmd);
2830                }
2831        }
2832        cs->next_stat_mbox = next_stat_mbox;
2833        spin_unlock_irqrestore(&cs->queue_lock, flags);
2834        return IRQ_HANDLED;
2835}
2836
2837static struct myrs_privdata DAC960_BA_privdata = {
2838        .hw_init =              DAC960_BA_hw_init,
2839        .irq_handler =          DAC960_BA_intr_handler,
2840        .mmio_size =            DAC960_BA_mmio_size,
2841};
2842
2843/*
2844 * DAC960 LP Series Controllers.
2845 */
2846
2847static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
2848{
2849        writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
2850}
2851
2852static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
2853{
2854        writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
2855}
2856
2857static inline void DAC960_LP_reset_ctrl(void __iomem *base)
2858{
2859        writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
2860}
2861
2862static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
2863{
2864        writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
2865}
2866
2867static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
2868{
2869        u8 val;
2870
2871        val = readb(base + DAC960_LP_IDB_OFFSET);
2872        return val & DAC960_LP_IDB_HWMBOX_FULL;
2873}
2874
2875static inline bool DAC960_LP_init_in_progress(void __iomem *base)
2876{
2877        u8 val;
2878
2879        val = readb(base + DAC960_LP_IDB_OFFSET);
2880        return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
2881}
2882
2883static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
2884{
2885        writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
2886}
2887
2888static inline void DAC960_LP_ack_intr(void __iomem *base)
2889{
2890        writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
2891               base + DAC960_LP_ODB_OFFSET);
2892}
2893
2894static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
2895{
2896        u8 val;
2897
2898        val = readb(base + DAC960_LP_ODB_OFFSET);
2899        return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
2900}
2901
2902static inline void DAC960_LP_enable_intr(void __iomem *base)
2903{
2904        writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
2905}
2906
2907static inline void DAC960_LP_disable_intr(void __iomem *base)
2908{
2909        writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
2910}
2911
2912static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
2913                union myrs_cmd_mbox *mbox)
2914{
2915        memcpy(&mem_mbox->words[1], &mbox->words[1],
2916               sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
2917        /* Barrier to avoid reordering */
2918        wmb();
2919        mem_mbox->words[0] = mbox->words[0];
2920        /* Barrier to force PCI access */
2921        mb();
2922}
2923
2924static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
2925                dma_addr_t cmd_mbox_addr)
2926{
2927        dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
2928}
2929
2930static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
2931{
2932        return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
2933}
2934
2935static inline bool
2936DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
2937                unsigned char *param0, unsigned char *param1)
2938{
2939        u8 val;
2940
2941        val = readb(base + DAC960_LP_ERRSTS_OFFSET);
2942        if (!(val & DAC960_LP_ERRSTS_PENDING))
2943                return false;
2944        val &= ~DAC960_LP_ERRSTS_PENDING;
2945        *error = val;
2946        *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
2947        *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
2948        writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
2949        return true;
2950}
2951
2952static inline unsigned char
2953DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
2954{
2955        unsigned char status;
2956
2957        while (DAC960_LP_hw_mbox_is_full(base))
2958                udelay(1);
2959        DAC960_LP_write_hw_mbox(base, mbox_addr);
2960        DAC960_LP_hw_mbox_new_cmd(base);
2961        while (!DAC960_LP_hw_mbox_status_available(base))
2962                udelay(1);
2963        status = DAC960_LP_read_cmd_status(base);
2964        DAC960_LP_ack_hw_mbox_intr(base);
2965        DAC960_LP_ack_hw_mbox_status(base);
2966
2967        return status;
2968}
2969
2970static int DAC960_LP_hw_init(struct pci_dev *pdev,
2971                struct myrs_hba *cs, void __iomem *base)
2972{
2973        int timeout = 0;
2974        unsigned char status, parm0, parm1;
2975
2976        DAC960_LP_disable_intr(base);
2977        DAC960_LP_ack_hw_mbox_status(base);
2978        udelay(1000);
2979        while (DAC960_LP_init_in_progress(base) &&
2980               timeout < MYRS_MAILBOX_TIMEOUT) {
2981                if (DAC960_LP_read_error_status(base, &status,
2982                                              &parm0, &parm1) &&
2983                    myrs_err_status(cs, status, parm0, parm1))
2984                        return -EIO;
2985                udelay(10);
2986                timeout++;
2987        }
2988        if (timeout == MYRS_MAILBOX_TIMEOUT) {
2989                dev_err(&pdev->dev,
2990                        "Timeout waiting for Controller Initialisation\n");
2991                return -ETIMEDOUT;
2992        }
2993        if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
2994                dev_err(&pdev->dev,
2995                        "Unable to Enable Memory Mailbox Interface\n");
2996                DAC960_LP_reset_ctrl(base);
2997                return -ENODEV;
2998        }
2999        DAC960_LP_enable_intr(base);
3000        cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
3001        cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
3002        cs->disable_intr = DAC960_LP_disable_intr;
3003        cs->reset = DAC960_LP_reset_ctrl;
3004
3005        return 0;
3006}
3007
3008static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
3009{
3010        struct myrs_hba *cs = arg;
3011        void __iomem *base = cs->io_base;
3012        struct myrs_stat_mbox *next_stat_mbox;
3013        unsigned long flags;
3014
3015        spin_lock_irqsave(&cs->queue_lock, flags);
3016        DAC960_LP_ack_intr(base);
3017        next_stat_mbox = cs->next_stat_mbox;
3018        while (next_stat_mbox->id > 0) {
3019                unsigned short id = next_stat_mbox->id;
3020                struct scsi_cmnd *scmd = NULL;
3021                struct myrs_cmdblk *cmd_blk = NULL;
3022
3023                if (id == MYRS_DCMD_TAG)
3024                        cmd_blk = &cs->dcmd_blk;
3025                else if (id == MYRS_MCMD_TAG)
3026                        cmd_blk = &cs->mcmd_blk;
3027                else {
3028                        scmd = scsi_host_find_tag(cs->host, id - 3);
3029                        if (scmd)
3030                                cmd_blk = scsi_cmd_priv(scmd);
3031                }
3032                if (cmd_blk) {
3033                        cmd_blk->status = next_stat_mbox->status;
3034                        cmd_blk->sense_len = next_stat_mbox->sense_len;
3035                        cmd_blk->residual = next_stat_mbox->residual;
3036                } else
3037                        dev_err(&cs->pdev->dev,
3038                                "Unhandled command completion %d\n", id);
3039
3040                memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
3041                if (++next_stat_mbox > cs->last_stat_mbox)
3042                        next_stat_mbox = cs->first_stat_mbox;
3043
3044                if (cmd_blk) {
3045                        if (id < 3)
3046                                myrs_handle_cmdblk(cs, cmd_blk);
3047                        else
3048                                myrs_handle_scsi(cs, cmd_blk, scmd);
3049                }
3050        }
3051        cs->next_stat_mbox = next_stat_mbox;
3052        spin_unlock_irqrestore(&cs->queue_lock, flags);
3053        return IRQ_HANDLED;
3054}
3055
3056static struct myrs_privdata DAC960_LP_privdata = {
3057        .hw_init =              DAC960_LP_hw_init,
3058        .irq_handler =          DAC960_LP_intr_handler,
3059        .mmio_size =            DAC960_LP_mmio_size,
3060};
3061
3062/*
3063 * Module functions
3064 */
3065static int
3066myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3067{
3068        struct myrs_hba *cs;
3069        int ret;
3070
3071        cs = myrs_detect(dev, entry);
3072        if (!cs)
3073                return -ENODEV;
3074
3075        ret = myrs_get_config(cs);
3076        if (ret < 0) {
3077                myrs_cleanup(cs);
3078                return ret;
3079        }
3080
3081        if (!myrs_create_mempools(dev, cs)) {
3082                ret = -ENOMEM;
3083                goto failed;
3084        }
3085
3086        ret = scsi_add_host(cs->host, &dev->dev);
3087        if (ret) {
3088                dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3089                myrs_destroy_mempools(cs);
3090                goto failed;
3091        }
3092        scsi_scan_host(cs->host);
3093        return 0;
3094failed:
3095        myrs_cleanup(cs);
3096        return ret;
3097}
3098
3099
3100static void myrs_remove(struct pci_dev *pdev)
3101{
3102        struct myrs_hba *cs = pci_get_drvdata(pdev);
3103
3104        if (cs == NULL)
3105                return;
3106
3107        shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
3108        myrs_flush_cache(cs);
3109        myrs_destroy_mempools(cs);
3110        myrs_cleanup(cs);
3111}
3112
3113
3114static const struct pci_device_id myrs_id_table[] = {
3115        {
3116                PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
3117                               PCI_DEVICE_ID_MYLEX_DAC960_GEM,
3118                               PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
3119                .driver_data    = (unsigned long) &DAC960_GEM_privdata,
3120        },
3121        {
3122                PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
3123        },
3124        {
3125                PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
3126        },
3127        {0, },
3128};
3129
3130MODULE_DEVICE_TABLE(pci, myrs_id_table);
3131
3132static struct pci_driver myrs_pci_driver = {
3133        .name           = "myrs",
3134        .id_table       = myrs_id_table,
3135        .probe          = myrs_probe,
3136        .remove         = myrs_remove,
3137};
3138
3139static int __init myrs_init_module(void)
3140{
3141        int ret;
3142
3143        myrs_raid_template = raid_class_attach(&myrs_raid_functions);
3144        if (!myrs_raid_template)
3145                return -ENODEV;
3146
3147        ret = pci_register_driver(&myrs_pci_driver);
3148        if (ret)
3149                raid_class_release(myrs_raid_template);
3150
3151        return ret;
3152}
3153
3154static void __exit myrs_cleanup_module(void)
3155{
3156        pci_unregister_driver(&myrs_pci_driver);
3157        raid_class_release(myrs_raid_template);
3158}
3159
3160module_init(myrs_init_module);
3161module_exit(myrs_cleanup_module);
3162
3163MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
3164MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3165MODULE_LICENSE("GPL");
3166