linux/drivers/scsi/arcmsr/arcmsr_hba.c
<<
>>
Prefs
   1/*
   2*******************************************************************************
   3**        O.S   : Linux
   4**   FILE NAME  : arcmsr_hba.c
   5**        BY    : Erich Chen
   6**   Description: SCSI RAID Device Driver for
   7**                ARECA RAID Host adapter
   8*******************************************************************************
   9** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
  10**
  11**     Web site: www.areca.com.tw
  12**       E-mail: support@areca.com.tw
  13**
  14** This program is free software; you can redistribute it and/or modify
  15** it under the terms of the GNU General Public License version 2 as
  16** published by the Free Software Foundation.
  17** This program is distributed in the hope that it will be useful,
  18** but WITHOUT ANY WARRANTY; without even the implied warranty of
  19** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20** GNU General Public License for more details.
  21*******************************************************************************
  22** Redistribution and use in source and binary forms, with or without
  23** modification, are permitted provided that the following conditions
  24** are met:
  25** 1. Redistributions of source code must retain the above copyright
  26**    notice, this list of conditions and the following disclaimer.
  27** 2. Redistributions in binary form must reproduce the above copyright
  28**    notice, this list of conditions and the following disclaimer in the
  29**    documentation and/or other materials provided with the distribution.
  30** 3. The name of the author may not be used to endorse or promote products
  31**    derived from this software without specific prior written permission.
  32**
  33** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  34** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  35** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  36** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  37** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
  38** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  39** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
  40** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  41** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
  42** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  43*******************************************************************************
  44** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
  45**     Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
  46*******************************************************************************
  47*/
  48#include <linux/module.h>
  49#include <linux/reboot.h>
  50#include <linux/spinlock.h>
  51#include <linux/pci_ids.h>
  52#include <linux/interrupt.h>
  53#include <linux/moduleparam.h>
  54#include <linux/errno.h>
  55#include <linux/types.h>
  56#include <linux/delay.h>
  57#include <linux/dma-mapping.h>
  58#include <linux/timer.h>
  59#include <linux/pci.h>
  60#include <linux/aer.h>
  61#include <asm/dma.h>
  62#include <asm/io.h>
  63#include <asm/system.h>
  64#include <asm/uaccess.h>
  65#include <scsi/scsi_host.h>
  66#include <scsi/scsi.h>
  67#include <scsi/scsi_cmnd.h>
  68#include <scsi/scsi_tcq.h>
  69#include <scsi/scsi_device.h>
  70#include <scsi/scsi_transport.h>
  71#include <scsi/scsicam.h>
  72#include "arcmsr.h"
  73
  74MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
  75MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter");
  76MODULE_LICENSE("Dual BSD/GPL");
  77MODULE_VERSION(ARCMSR_DRIVER_VERSION);
  78
  79static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
  80                                        struct scsi_cmnd *cmd);
  81static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
  82static int arcmsr_abort(struct scsi_cmnd *);
  83static int arcmsr_bus_reset(struct scsi_cmnd *);
  84static int arcmsr_bios_param(struct scsi_device *sdev,
  85                struct block_device *bdev, sector_t capacity, int *info);
  86static int arcmsr_queue_command(struct scsi_cmnd *cmd,
  87                                        void (*done) (struct scsi_cmnd *));
  88static int arcmsr_probe(struct pci_dev *pdev,
  89                                const struct pci_device_id *id);
  90static void arcmsr_remove(struct pci_dev *pdev);
  91static void arcmsr_shutdown(struct pci_dev *pdev);
  92static void arcmsr_iop_init(struct AdapterControlBlock *acb);
  93static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
  94static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
  95static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
  96static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
  97static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
  98static const char *arcmsr_info(struct Scsi_Host *);
  99static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
 100static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
 101                                                                int queue_depth)
 102{
 103        if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
 104                queue_depth = ARCMSR_MAX_CMD_PERLUN;
 105        scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
 106        return queue_depth;
 107}
 108
 109static struct scsi_host_template arcmsr_scsi_host_template = {
 110        .module                 = THIS_MODULE,
 111        .name                   = "ARCMSR ARECA SATA/SAS RAID HOST Adapter"
 112                                                        ARCMSR_DRIVER_VERSION,
 113        .info                   = arcmsr_info,
 114        .queuecommand           = arcmsr_queue_command,
 115        .eh_abort_handler       = arcmsr_abort,
 116        .eh_bus_reset_handler   = arcmsr_bus_reset,
 117        .bios_param             = arcmsr_bios_param,
 118        .change_queue_depth     = arcmsr_adjust_disk_queue_depth,
 119        .can_queue              = ARCMSR_MAX_OUTSTANDING_CMD,
 120        .this_id                = ARCMSR_SCSI_INITIATOR_ID,
 121        .sg_tablesize           = ARCMSR_MAX_SG_ENTRIES,
 122        .max_sectors            = ARCMSR_MAX_XFER_SECTORS,
 123        .cmd_per_lun            = ARCMSR_MAX_CMD_PERLUN,
 124        .use_clustering         = ENABLE_CLUSTERING,
 125        .shost_attrs            = arcmsr_host_attrs,
 126};
 127#ifdef CONFIG_SCSI_ARCMSR_AER
 128static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev);
 129static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
 130                                                pci_channel_state_t state);
 131
 132static struct pci_error_handlers arcmsr_pci_error_handlers = {
 133        .error_detected         = arcmsr_pci_error_detected,
 134        .slot_reset             = arcmsr_pci_slot_reset,
 135};
 136#endif
 137static struct pci_device_id arcmsr_device_id_table[] = {
 138        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
 139        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
 140        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
 141        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
 142        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
 143        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
 144        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
 145        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
 146        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
 147        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
 148        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
 149        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
 150        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
 151        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
 152        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
 153        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
 154        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
 155        {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
 156        {0, 0}, /* Terminating entry */
 157};
 158MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
 159static struct pci_driver arcmsr_pci_driver = {
 160        .name                   = "arcmsr",
 161        .id_table               = arcmsr_device_id_table,
 162        .probe                  = arcmsr_probe,
 163        .remove                 = arcmsr_remove,
 164        .shutdown               = arcmsr_shutdown,
 165        #ifdef CONFIG_SCSI_ARCMSR_AER
 166        .err_handler            = &arcmsr_pci_error_handlers,
 167        #endif
 168};
 169
 170static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
 171{
 172        irqreturn_t handle_state;
 173        struct AdapterControlBlock *acb = dev_id;
 174
 175        spin_lock(acb->host->host_lock);
 176        handle_state = arcmsr_interrupt(acb);
 177        spin_unlock(acb->host->host_lock);
 178
 179        return handle_state;
 180}
 181
 182static int arcmsr_bios_param(struct scsi_device *sdev,
 183                struct block_device *bdev, sector_t capacity, int *geom)
 184{
 185        int ret, heads, sectors, cylinders, total_capacity;
 186        unsigned char *buffer;/* return copy of block device's partition table */
 187
 188        buffer = scsi_bios_ptable(bdev);
 189        if (buffer) {
 190                ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
 191                kfree(buffer);
 192                if (ret != -1)
 193                        return ret;
 194        }
 195        total_capacity = capacity;
 196        heads = 64;
 197        sectors = 32;
 198        cylinders = total_capacity / (heads * sectors);
 199        if (cylinders > 1024) {
 200                heads = 255;
 201                sectors = 63;
 202                cylinders = total_capacity / (heads * sectors);
 203        }
 204        geom[0] = heads;
 205        geom[1] = sectors;
 206        geom[2] = cylinders;
 207        return 0;
 208}
 209
 210static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
 211{
 212        struct pci_dev *pdev = acb->pdev;
 213        u16 dev_id;
 214        pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
 215        switch (dev_id) {
 216        case 0x1201 : {
 217                acb->adapter_type = ACB_ADAPTER_TYPE_B;
 218                }
 219                break;
 220
 221        default : acb->adapter_type = ACB_ADAPTER_TYPE_A;
 222        }
 223}
 224
 225static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
 226{
 227
 228        switch (acb->adapter_type) {
 229
 230        case ACB_ADAPTER_TYPE_A: {
 231                struct pci_dev *pdev = acb->pdev;
 232                void *dma_coherent;
 233                dma_addr_t dma_coherent_handle, dma_addr;
 234                struct CommandControlBlock *ccb_tmp;
 235                uint32_t intmask_org;
 236                int i, j;
 237
 238                acb->pmuA = pci_ioremap_bar(pdev, 0);
 239                if (!acb->pmuA) {
 240                        printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
 241                                                        acb->host->host_no);
 242                        return -ENOMEM;
 243                }
 244
 245                dma_coherent = dma_alloc_coherent(&pdev->dev,
 246                        ARCMSR_MAX_FREECCB_NUM *
 247                        sizeof (struct CommandControlBlock) + 0x20,
 248                        &dma_coherent_handle, GFP_KERNEL);
 249
 250                if (!dma_coherent) {
 251                        iounmap(acb->pmuA);
 252                        return -ENOMEM;
 253                }
 254
 255                acb->dma_coherent = dma_coherent;
 256                acb->dma_coherent_handle = dma_coherent_handle;
 257
 258                if (((unsigned long)dma_coherent & 0x1F)) {
 259                        dma_coherent = dma_coherent +
 260                                (0x20 - ((unsigned long)dma_coherent & 0x1F));
 261                        dma_coherent_handle = dma_coherent_handle +
 262                                (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
 263                }
 264
 265                dma_addr = dma_coherent_handle;
 266                ccb_tmp = (struct CommandControlBlock *)dma_coherent;
 267                for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
 268                        ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
 269                        ccb_tmp->acb = acb;
 270                        acb->pccb_pool[i] = ccb_tmp;
 271                        list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
 272                        dma_addr = dma_addr + sizeof(struct CommandControlBlock);
 273                        ccb_tmp++;
 274                }
 275
 276                acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
 277                for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
 278                        for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
 279                                acb->devstate[i][j] = ARECA_RAID_GONE;
 280
 281                /*
 282                ** here we need to tell iop 331 our ccb_tmp.HighPart
 283                ** if ccb_tmp.HighPart is not zero
 284                */
 285                intmask_org = arcmsr_disable_outbound_ints(acb);
 286                }
 287                break;
 288
 289        case ACB_ADAPTER_TYPE_B: {
 290
 291                struct pci_dev *pdev = acb->pdev;
 292                struct MessageUnit_B *reg;
 293                void __iomem *mem_base0, *mem_base1;
 294                void *dma_coherent;
 295                dma_addr_t dma_coherent_handle, dma_addr;
 296                uint32_t intmask_org;
 297                struct CommandControlBlock *ccb_tmp;
 298                int i, j;
 299
 300                dma_coherent = dma_alloc_coherent(&pdev->dev,
 301                        ((ARCMSR_MAX_FREECCB_NUM *
 302                        sizeof(struct CommandControlBlock) + 0x20) +
 303                        sizeof(struct MessageUnit_B)),
 304                        &dma_coherent_handle, GFP_KERNEL);
 305                if (!dma_coherent)
 306                        return -ENOMEM;
 307
 308                acb->dma_coherent = dma_coherent;
 309                acb->dma_coherent_handle = dma_coherent_handle;
 310
 311                if (((unsigned long)dma_coherent & 0x1F)) {
 312                        dma_coherent = dma_coherent +
 313                                (0x20 - ((unsigned long)dma_coherent & 0x1F));
 314                        dma_coherent_handle = dma_coherent_handle +
 315                                (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
 316                }
 317
 318                dma_addr = dma_coherent_handle;
 319                ccb_tmp = (struct CommandControlBlock *)dma_coherent;
 320                for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
 321                        ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
 322                        ccb_tmp->acb = acb;
 323                        acb->pccb_pool[i] = ccb_tmp;
 324                        list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
 325                        dma_addr = dma_addr + sizeof(struct CommandControlBlock);
 326                        ccb_tmp++;
 327                }
 328
 329                reg = (struct MessageUnit_B *)(dma_coherent +
 330                ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
 331                acb->pmuB = reg;
 332                mem_base0 = pci_ioremap_bar(pdev, 0);
 333                if (!mem_base0)
 334                        goto out;
 335
 336                mem_base1 = pci_ioremap_bar(pdev, 2);
 337                if (!mem_base1) {
 338                        iounmap(mem_base0);
 339                        goto out;
 340                }
 341
 342                reg->drv2iop_doorbell_reg = mem_base0 + ARCMSR_DRV2IOP_DOORBELL;
 343                reg->drv2iop_doorbell_mask_reg = mem_base0 +
 344                                                ARCMSR_DRV2IOP_DOORBELL_MASK;
 345                reg->iop2drv_doorbell_reg = mem_base0 + ARCMSR_IOP2DRV_DOORBELL;
 346                reg->iop2drv_doorbell_mask_reg = mem_base0 +
 347                                                ARCMSR_IOP2DRV_DOORBELL_MASK;
 348                reg->ioctl_wbuffer_reg = mem_base1 + ARCMSR_IOCTL_WBUFFER;
 349                reg->ioctl_rbuffer_reg = mem_base1 + ARCMSR_IOCTL_RBUFFER;
 350                reg->msgcode_rwbuffer_reg = mem_base1 + ARCMSR_MSGCODE_RWBUFFER;
 351
 352                acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
 353                for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
 354                        for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
 355                                acb->devstate[i][j] = ARECA_RAID_GOOD;
 356
 357                /*
 358                ** here we need to tell iop 331 our ccb_tmp.HighPart
 359                ** if ccb_tmp.HighPart is not zero
 360                */
 361                intmask_org = arcmsr_disable_outbound_ints(acb);
 362                }
 363                break;
 364        }
 365        return 0;
 366
 367out:
 368        dma_free_coherent(&acb->pdev->dev,
 369                (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
 370                sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
 371        return -ENOMEM;
 372}
 373
 374static int arcmsr_probe(struct pci_dev *pdev,
 375        const struct pci_device_id *id)
 376{
 377        struct Scsi_Host *host;
 378        struct AdapterControlBlock *acb;
 379        uint8_t bus, dev_fun;
 380        int error;
 381
 382        error = pci_enable_device(pdev);
 383        if (error)
 384                goto out;
 385        pci_set_master(pdev);
 386
 387        host = scsi_host_alloc(&arcmsr_scsi_host_template,
 388                        sizeof(struct AdapterControlBlock));
 389        if (!host) {
 390                error = -ENOMEM;
 391                goto out_disable_device;
 392        }
 393        acb = (struct AdapterControlBlock *)host->hostdata;
 394        memset(acb, 0, sizeof (struct AdapterControlBlock));
 395
 396        error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
 397        if (error) {
 398                error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 399                if (error) {
 400                        printk(KERN_WARNING
 401                               "scsi%d: No suitable DMA mask available\n",
 402                               host->host_no);
 403                        goto out_host_put;
 404                }
 405        }
 406        bus = pdev->bus->number;
 407        dev_fun = pdev->devfn;
 408        acb->host = host;
 409        acb->pdev = pdev;
 410        host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
 411        host->max_lun = ARCMSR_MAX_TARGETLUN;
 412        host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
 413        host->max_cmd_len = 16;    /*this is issue of 64bit LBA, over 2T byte*/
 414        host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
 415        host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
 416        host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
 417        host->this_id = ARCMSR_SCSI_INITIATOR_ID;
 418        host->unique_id = (bus << 8) | dev_fun;
 419        host->irq = pdev->irq;
 420        error = pci_request_regions(pdev, "arcmsr");
 421        if (error) {
 422                goto out_host_put;
 423        }
 424        arcmsr_define_adapter_type(acb);
 425
 426        acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
 427                           ACB_F_MESSAGE_RQBUFFER_CLEARED |
 428                           ACB_F_MESSAGE_WQBUFFER_READED);
 429        acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
 430        INIT_LIST_HEAD(&acb->ccb_free_list);
 431
 432        error = arcmsr_alloc_ccb_pool(acb);
 433        if (error)
 434                goto out_release_regions;
 435
 436        error = request_irq(pdev->irq, arcmsr_do_interrupt,
 437                            IRQF_SHARED, "arcmsr", acb);
 438        if (error)
 439                goto out_free_ccb_pool;
 440
 441        arcmsr_iop_init(acb);
 442        pci_set_drvdata(pdev, host);
 443        if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
 444                host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
 445
 446        error = scsi_add_host(host, &pdev->dev);
 447        if (error)
 448                goto out_free_irq;
 449
 450        error = arcmsr_alloc_sysfs_attr(acb);
 451        if (error)
 452                goto out_free_sysfs;
 453
 454        scsi_scan_host(host);
 455        #ifdef CONFIG_SCSI_ARCMSR_AER
 456        pci_enable_pcie_error_reporting(pdev);
 457        #endif
 458        return 0;
 459 out_free_sysfs:
 460 out_free_irq:
 461        free_irq(pdev->irq, acb);
 462 out_free_ccb_pool:
 463        arcmsr_free_ccb_pool(acb);
 464 out_release_regions:
 465        pci_release_regions(pdev);
 466 out_host_put:
 467        scsi_host_put(host);
 468 out_disable_device:
 469        pci_disable_device(pdev);
 470 out:
 471        return error;
 472}
 473
 474static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
 475{
 476        struct MessageUnit_A __iomem *reg = acb->pmuA;
 477        uint32_t Index;
 478        uint8_t Retries = 0x00;
 479
 480        do {
 481                for (Index = 0; Index < 100; Index++) {
 482                        if (readl(&reg->outbound_intstatus) &
 483                                        ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
 484                                writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
 485                                        &reg->outbound_intstatus);
 486                                return 0x00;
 487                        }
 488                        msleep(10);
 489                }/*max 1 seconds*/
 490
 491        } while (Retries++ < 20);/*max 20 sec*/
 492        return 0xff;
 493}
 494
 495static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
 496{
 497        struct MessageUnit_B *reg = acb->pmuB;
 498        uint32_t Index;
 499        uint8_t Retries = 0x00;
 500
 501        do {
 502                for (Index = 0; Index < 100; Index++) {
 503                        if (readl(reg->iop2drv_doorbell_reg)
 504                                & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
 505                                writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
 506                                        , reg->iop2drv_doorbell_reg);
 507                                writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
 508                                return 0x00;
 509                        }
 510                        msleep(10);
 511                }/*max 1 seconds*/
 512
 513        } while (Retries++ < 20);/*max 20 sec*/
 514        return 0xff;
 515}
 516
 517static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
 518{
 519        struct MessageUnit_A __iomem *reg = acb->pmuA;
 520
 521        writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
 522        if (arcmsr_hba_wait_msgint_ready(acb))
 523                printk(KERN_NOTICE
 524                        "arcmsr%d: wait 'abort all outstanding command' timeout \n"
 525                        , acb->host->host_no);
 526}
 527
 528static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
 529{
 530        struct MessageUnit_B *reg = acb->pmuB;
 531
 532        writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
 533        if (arcmsr_hbb_wait_msgint_ready(acb))
 534                printk(KERN_NOTICE
 535                        "arcmsr%d: wait 'abort all outstanding command' timeout \n"
 536                        , acb->host->host_no);
 537}
 538
 539static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
 540{
 541        switch (acb->adapter_type) {
 542        case ACB_ADAPTER_TYPE_A: {
 543                arcmsr_abort_hba_allcmd(acb);
 544                }
 545                break;
 546
 547        case ACB_ADAPTER_TYPE_B: {
 548                arcmsr_abort_hbb_allcmd(acb);
 549                }
 550        }
 551}
 552
 553static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
 554{
 555        struct scsi_cmnd *pcmd = ccb->pcmd;
 556
 557        scsi_dma_unmap(pcmd);
 558}
 559
 560static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
 561{
 562        struct AdapterControlBlock *acb = ccb->acb;
 563        struct scsi_cmnd *pcmd = ccb->pcmd;
 564
 565        arcmsr_pci_unmap_dma(ccb);
 566        if (stand_flag == 1)
 567                atomic_dec(&acb->ccboutstandingcount);
 568        ccb->startdone = ARCMSR_CCB_DONE;
 569        ccb->ccb_flags = 0;
 570        list_add_tail(&ccb->list, &acb->ccb_free_list);
 571        pcmd->scsi_done(pcmd);
 572}
 573
 574static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
 575{
 576        struct MessageUnit_A __iomem *reg = acb->pmuA;
 577        int retry_count = 30;
 578
 579        writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
 580        do {
 581                if (!arcmsr_hba_wait_msgint_ready(acb))
 582                        break;
 583                else {
 584                        retry_count--;
 585                        printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
 586                        timeout, retry count down = %d \n", acb->host->host_no, retry_count);
 587                }
 588        } while (retry_count != 0);
 589}
 590
 591static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
 592{
 593        struct MessageUnit_B *reg = acb->pmuB;
 594        int retry_count = 30;
 595
 596        writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg);
 597        do {
 598                if (!arcmsr_hbb_wait_msgint_ready(acb))
 599                        break;
 600                else {
 601                        retry_count--;
 602                        printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
 603                        timeout,retry count down = %d \n", acb->host->host_no, retry_count);
 604                }
 605        } while (retry_count != 0);
 606}
 607
 608static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
 609{
 610        switch (acb->adapter_type) {
 611
 612        case ACB_ADAPTER_TYPE_A: {
 613                arcmsr_flush_hba_cache(acb);
 614                }
 615                break;
 616
 617        case ACB_ADAPTER_TYPE_B: {
 618                arcmsr_flush_hbb_cache(acb);
 619                }
 620        }
 621}
 622
 623static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
 624{
 625
 626        struct scsi_cmnd *pcmd = ccb->pcmd;
 627        struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
 628
 629        pcmd->result = DID_OK << 16;
 630        if (sensebuffer) {
 631                int sense_data_length =
 632                        sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
 633                        ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
 634                memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
 635                memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
 636                sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
 637                sensebuffer->Valid = 1;
 638        }
 639}
 640
 641static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
 642{
 643        u32 orig_mask = 0;
 644        switch (acb->adapter_type) {
 645
 646        case ACB_ADAPTER_TYPE_A : {
 647                struct MessageUnit_A __iomem *reg = acb->pmuA;
 648                orig_mask = readl(&reg->outbound_intmask)|\
 649                                ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
 650                writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
 651                                                &reg->outbound_intmask);
 652                }
 653                break;
 654
 655        case ACB_ADAPTER_TYPE_B : {
 656                struct MessageUnit_B *reg = acb->pmuB;
 657                orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \
 658                                        (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
 659                writel(0, reg->iop2drv_doorbell_mask_reg);
 660                }
 661                break;
 662        }
 663        return orig_mask;
 664}
 665
 666static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \
 667                        struct CommandControlBlock *ccb, uint32_t flag_ccb)
 668{
 669
 670        uint8_t id, lun;
 671        id = ccb->pcmd->device->id;
 672        lun = ccb->pcmd->device->lun;
 673        if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
 674                if (acb->devstate[id][lun] == ARECA_RAID_GONE)
 675                        acb->devstate[id][lun] = ARECA_RAID_GOOD;
 676                        ccb->pcmd->result = DID_OK << 16;
 677                        arcmsr_ccb_complete(ccb, 1);
 678        } else {
 679                switch (ccb->arcmsr_cdb.DeviceStatus) {
 680                case ARCMSR_DEV_SELECT_TIMEOUT: {
 681                        acb->devstate[id][lun] = ARECA_RAID_GONE;
 682                        ccb->pcmd->result = DID_NO_CONNECT << 16;
 683                        arcmsr_ccb_complete(ccb, 1);
 684                        }
 685                        break;
 686
 687                case ARCMSR_DEV_ABORTED:
 688
 689                case ARCMSR_DEV_INIT_FAIL: {
 690                        acb->devstate[id][lun] = ARECA_RAID_GONE;
 691                        ccb->pcmd->result = DID_BAD_TARGET << 16;
 692                        arcmsr_ccb_complete(ccb, 1);
 693                        }
 694                        break;
 695
 696                case ARCMSR_DEV_CHECK_CONDITION: {
 697                        acb->devstate[id][lun] = ARECA_RAID_GOOD;
 698                        arcmsr_report_sense_info(ccb);
 699                        arcmsr_ccb_complete(ccb, 1);
 700                        }
 701                        break;
 702
 703                default:
 704                                printk(KERN_NOTICE
 705                                        "arcmsr%d: scsi id = %d lun = %d"
 706                                        " isr get command error done, "
 707                                        "but got unknown DeviceStatus = 0x%x \n"
 708                                        , acb->host->host_no
 709                                        , id
 710                                        , lun
 711                                        , ccb->arcmsr_cdb.DeviceStatus);
 712                                        acb->devstate[id][lun] = ARECA_RAID_GONE;
 713                                        ccb->pcmd->result = DID_NO_CONNECT << 16;
 714                                        arcmsr_ccb_complete(ccb, 1);
 715                        break;
 716                }
 717        }
 718}
 719
 720static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t flag_ccb)
 721
 722{
 723        struct CommandControlBlock *ccb;
 724
 725        ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
 726        if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
 727                if (ccb->startdone == ARCMSR_CCB_ABORTED) {
 728                        struct scsi_cmnd *abortcmd = ccb->pcmd;
 729                        if (abortcmd) {
 730                                abortcmd->result |= DID_ABORT << 16;
 731                                arcmsr_ccb_complete(ccb, 1);
 732                                printk(KERN_NOTICE "arcmsr%d: ccb ='0x%p' \
 733                                isr got aborted command \n", acb->host->host_no, ccb);
 734                        }
 735                }
 736                printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
 737                                done acb = '0x%p'"
 738                                "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
 739                                " ccboutstandingcount = %d \n"
 740                                , acb->host->host_no
 741                                , acb
 742                                , ccb
 743                                , ccb->acb
 744                                , ccb->startdone
 745                                , atomic_read(&acb->ccboutstandingcount));
 746                }
 747        else
 748        arcmsr_report_ccb_state(acb, ccb, flag_ccb);
 749}
 750
 751static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
 752{
 753        int i = 0;
 754        uint32_t flag_ccb;
 755
 756        switch (acb->adapter_type) {
 757
 758        case ACB_ADAPTER_TYPE_A: {
 759                struct MessageUnit_A __iomem *reg = acb->pmuA;
 760                uint32_t outbound_intstatus;
 761                outbound_intstatus = readl(&reg->outbound_intstatus) &
 762                                        acb->outbound_int_enable;
 763                /*clear and abort all outbound posted Q*/
 764                writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
 765                while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
 766                                && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
 767                        arcmsr_drain_donequeue(acb, flag_ccb);
 768                }
 769                }
 770                break;
 771
 772        case ACB_ADAPTER_TYPE_B: {
 773                struct MessageUnit_B *reg = acb->pmuB;
 774                /*clear all outbound posted Q*/
 775                for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
 776                        if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
 777                                writel(0, &reg->done_qbuffer[i]);
 778                                arcmsr_drain_donequeue(acb, flag_ccb);
 779                        }
 780                        writel(0, &reg->post_qbuffer[i]);
 781                }
 782                reg->doneq_index = 0;
 783                reg->postq_index = 0;
 784                }
 785                break;
 786        }
 787}
 788static void arcmsr_remove(struct pci_dev *pdev)
 789{
 790        struct Scsi_Host *host = pci_get_drvdata(pdev);
 791        struct AdapterControlBlock *acb =
 792                (struct AdapterControlBlock *) host->hostdata;
 793        int poll_count = 0;
 794
 795        arcmsr_free_sysfs_attr(acb);
 796        scsi_remove_host(host);
 797        arcmsr_stop_adapter_bgrb(acb);
 798        arcmsr_flush_adapter_cache(acb);
 799        arcmsr_disable_outbound_ints(acb);
 800        acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
 801        acb->acb_flags &= ~ACB_F_IOP_INITED;
 802
 803        for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++) {
 804                if (!atomic_read(&acb->ccboutstandingcount))
 805                        break;
 806                arcmsr_interrupt(acb);/* FIXME: need spinlock */
 807                msleep(25);
 808        }
 809
 810        if (atomic_read(&acb->ccboutstandingcount)) {
 811                int i;
 812
 813                arcmsr_abort_allcmd(acb);
 814                arcmsr_done4abort_postqueue(acb);
 815                for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
 816                        struct CommandControlBlock *ccb = acb->pccb_pool[i];
 817                        if (ccb->startdone == ARCMSR_CCB_START) {
 818                                ccb->startdone = ARCMSR_CCB_ABORTED;
 819                                ccb->pcmd->result = DID_ABORT << 16;
 820                                arcmsr_ccb_complete(ccb, 1);
 821                        }
 822                }
 823        }
 824
 825        free_irq(pdev->irq, acb);
 826        arcmsr_free_ccb_pool(acb);
 827        pci_release_regions(pdev);
 828
 829        scsi_host_put(host);
 830
 831        pci_disable_device(pdev);
 832        pci_set_drvdata(pdev, NULL);
 833}
 834
 835static void arcmsr_shutdown(struct pci_dev *pdev)
 836{
 837        struct Scsi_Host *host = pci_get_drvdata(pdev);
 838        struct AdapterControlBlock *acb =
 839                (struct AdapterControlBlock *)host->hostdata;
 840
 841        arcmsr_stop_adapter_bgrb(acb);
 842        arcmsr_flush_adapter_cache(acb);
 843}
 844
 845static int arcmsr_module_init(void)
 846{
 847        int error = 0;
 848
 849        error = pci_register_driver(&arcmsr_pci_driver);
 850        return error;
 851}
 852
 853static void arcmsr_module_exit(void)
 854{
 855        pci_unregister_driver(&arcmsr_pci_driver);
 856}
 857module_init(arcmsr_module_init);
 858module_exit(arcmsr_module_exit);
 859
 860static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
 861                                                u32 intmask_org)
 862{
 863        u32 mask;
 864
 865        switch (acb->adapter_type) {
 866
 867        case ACB_ADAPTER_TYPE_A : {
 868                struct MessageUnit_A __iomem *reg = acb->pmuA;
 869                mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
 870                             ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
 871                writel(mask, &reg->outbound_intmask);
 872                acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
 873                }
 874                break;
 875
 876        case ACB_ADAPTER_TYPE_B : {
 877                struct MessageUnit_B *reg = acb->pmuB;
 878                mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \
 879                        ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE);
 880                writel(mask, reg->iop2drv_doorbell_mask_reg);
 881                acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
 882                }
 883        }
 884}
 885
 886static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
 887        struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
 888{
 889        struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
 890        int8_t *psge = (int8_t *)&arcmsr_cdb->u;
 891        __le32 address_lo, address_hi;
 892        int arccdbsize = 0x30;
 893        int nseg;
 894
 895        ccb->pcmd = pcmd;
 896        memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
 897        arcmsr_cdb->Bus = 0;
 898        arcmsr_cdb->TargetID = pcmd->device->id;
 899        arcmsr_cdb->LUN = pcmd->device->lun;
 900        arcmsr_cdb->Function = 1;
 901        arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
 902        arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
 903        memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
 904
 905        nseg = scsi_dma_map(pcmd);
 906        if (nseg > ARCMSR_MAX_SG_ENTRIES)
 907                return FAILED;
 908        BUG_ON(nseg < 0);
 909
 910        if (nseg) {
 911                __le32 length;
 912                int i, cdb_sgcount = 0;
 913                struct scatterlist *sg;
 914
 915                /* map stor port SG list to our iop SG List. */
 916                scsi_for_each_sg(pcmd, sg, nseg, i) {
 917                        /* Get the physical address of the current data pointer */
 918                        length = cpu_to_le32(sg_dma_len(sg));
 919                        address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
 920                        address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
 921                        if (address_hi == 0) {
 922                                struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
 923
 924                                pdma_sg->address = address_lo;
 925                                pdma_sg->length = length;
 926                                psge += sizeof (struct SG32ENTRY);
 927                                arccdbsize += sizeof (struct SG32ENTRY);
 928                        } else {
 929                                struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
 930
 931                                pdma_sg->addresshigh = address_hi;
 932                                pdma_sg->address = address_lo;
 933                                pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
 934                                psge += sizeof (struct SG64ENTRY);
 935                                arccdbsize += sizeof (struct SG64ENTRY);
 936                        }
 937                        cdb_sgcount++;
 938                }
 939                arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
 940                arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
 941                if ( arccdbsize > 256)
 942                        arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
 943        }
 944        if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
 945                arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
 946                ccb->ccb_flags |= CCB_FLAG_WRITE;
 947        }
 948        return SUCCESS;
 949}
 950
 951static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
 952{
 953        uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
 954        struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
 955        atomic_inc(&acb->ccboutstandingcount);
 956        ccb->startdone = ARCMSR_CCB_START;
 957
 958        switch (acb->adapter_type) {
 959        case ACB_ADAPTER_TYPE_A: {
 960                struct MessageUnit_A __iomem *reg = acb->pmuA;
 961
 962                if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
 963                        writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
 964                        &reg->inbound_queueport);
 965                else {
 966                                writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
 967                }
 968                }
 969                break;
 970
 971        case ACB_ADAPTER_TYPE_B: {
 972                struct MessageUnit_B *reg = acb->pmuB;
 973                uint32_t ending_index, index = reg->postq_index;
 974
 975                ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
 976                writel(0, &reg->post_qbuffer[ending_index]);
 977                if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
 978                        writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
 979                                                 &reg->post_qbuffer[index]);
 980                }
 981                else {
 982                        writel(cdb_shifted_phyaddr, &reg->post_qbuffer[index]);
 983                }
 984                index++;
 985                index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
 986                reg->postq_index = index;
 987                writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell_reg);
 988                }
 989                break;
 990        }
 991}
 992
 993static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
 994{
 995        struct MessageUnit_A __iomem *reg = acb->pmuA;
 996        acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
 997        writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
 998
 999        if (arcmsr_hba_wait_msgint_ready(acb)) {
1000                printk(KERN_NOTICE
1001                        "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1002                        , acb->host->host_no);
1003        }
1004}
1005
1006static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1007{
1008        struct MessageUnit_B *reg = acb->pmuB;
1009        acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1010        writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg);
1011
1012        if (arcmsr_hbb_wait_msgint_ready(acb)) {
1013                printk(KERN_NOTICE
1014                        "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1015                        , acb->host->host_no);
1016        }
1017}
1018
1019static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1020{
1021        switch (acb->adapter_type) {
1022        case ACB_ADAPTER_TYPE_A: {
1023                arcmsr_stop_hba_bgrb(acb);
1024                }
1025                break;
1026
1027        case ACB_ADAPTER_TYPE_B: {
1028                arcmsr_stop_hbb_bgrb(acb);
1029                }
1030                break;
1031        }
1032}
1033
1034static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
1035{
1036        switch (acb->adapter_type) {
1037        case ACB_ADAPTER_TYPE_A: {
1038                iounmap(acb->pmuA);
1039                dma_free_coherent(&acb->pdev->dev,
1040                ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
1041                acb->dma_coherent,
1042                acb->dma_coherent_handle);
1043                break;
1044        }
1045        case ACB_ADAPTER_TYPE_B: {
1046                struct MessageUnit_B *reg = acb->pmuB;
1047                iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
1048                iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
1049                dma_free_coherent(&acb->pdev->dev,
1050                (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
1051                sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
1052        }
1053        }
1054
1055}
1056
1057void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1058{
1059        switch (acb->adapter_type) {
1060        case ACB_ADAPTER_TYPE_A: {
1061                struct MessageUnit_A __iomem *reg = acb->pmuA;
1062                writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
1063                }
1064                break;
1065
1066        case ACB_ADAPTER_TYPE_B: {
1067                struct MessageUnit_B *reg = acb->pmuB;
1068                writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
1069                }
1070                break;
1071        }
1072}
1073
1074static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1075{
1076        switch (acb->adapter_type) {
1077        case ACB_ADAPTER_TYPE_A: {
1078                struct MessageUnit_A __iomem *reg = acb->pmuA;
1079                /*
1080                ** push inbound doorbell tell iop, driver data write ok
1081                ** and wait reply on next hwinterrupt for next Qbuffer post
1082                */
1083                writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
1084                }
1085                break;
1086
1087        case ACB_ADAPTER_TYPE_B: {
1088                struct MessageUnit_B *reg = acb->pmuB;
1089                /*
1090                ** push inbound doorbell tell iop, driver data write ok
1091                ** and wait reply on next hwinterrupt for next Qbuffer post
1092                */
1093                writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell_reg);
1094                }
1095                break;
1096        }
1097}
1098
1099struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
1100{
1101        struct QBUFFER __iomem *qbuffer = NULL;
1102
1103        switch (acb->adapter_type) {
1104
1105        case ACB_ADAPTER_TYPE_A: {
1106                struct MessageUnit_A __iomem *reg = acb->pmuA;
1107                qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
1108                }
1109                break;
1110
1111        case ACB_ADAPTER_TYPE_B: {
1112                struct MessageUnit_B *reg = acb->pmuB;
1113                qbuffer = (struct QBUFFER __iomem *)reg->ioctl_rbuffer_reg;
1114                }
1115                break;
1116        }
1117        return qbuffer;
1118}
1119
1120static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
1121{
1122        struct QBUFFER __iomem *pqbuffer = NULL;
1123
1124        switch (acb->adapter_type) {
1125
1126        case ACB_ADAPTER_TYPE_A: {
1127                struct MessageUnit_A __iomem *reg = acb->pmuA;
1128                pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
1129                }
1130                break;
1131
1132        case ACB_ADAPTER_TYPE_B: {
1133                struct MessageUnit_B  *reg = acb->pmuB;
1134                pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg;
1135                }
1136                break;
1137        }
1138        return pqbuffer;
1139}
1140
1141static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1142{
1143        struct QBUFFER __iomem *prbuffer;
1144        struct QBUFFER *pQbuffer;
1145        uint8_t __iomem *iop_data;
1146        int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1147
1148        rqbuf_lastindex = acb->rqbuf_lastindex;
1149        rqbuf_firstindex = acb->rqbuf_firstindex;
1150        prbuffer = arcmsr_get_iop_rqbuffer(acb);
1151        iop_data = (uint8_t __iomem *)prbuffer->data;
1152        iop_len = prbuffer->data_len;
1153        my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1);
1154
1155        if (my_empty_len >= iop_len)
1156        {
1157                while (iop_len > 0) {
1158                        pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
1159                        memcpy(pQbuffer, iop_data,1);
1160                        rqbuf_lastindex++;
1161                        rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1162                        iop_data++;
1163                        iop_len--;
1164                }
1165                acb->rqbuf_lastindex = rqbuf_lastindex;
1166                arcmsr_iop_message_read(acb);
1167        }
1168
1169        else {
1170                acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1171        }
1172}
1173
1174static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1175{
1176        acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
1177        if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
1178                uint8_t *pQbuffer;
1179                struct QBUFFER __iomem *pwbuffer;
1180                uint8_t __iomem *iop_data;
1181                int32_t allxfer_len = 0;
1182
1183                acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1184                pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1185                iop_data = (uint8_t __iomem *)pwbuffer->data;
1186
1187                while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
1188                                                        (allxfer_len < 124)) {
1189                        pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
1190                        memcpy(iop_data, pQbuffer, 1);
1191                        acb->wqbuf_firstindex++;
1192                        acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1193                        iop_data++;
1194                        allxfer_len++;
1195                }
1196                pwbuffer->data_len = allxfer_len;
1197
1198                arcmsr_iop_message_wrote(acb);
1199        }
1200
1201        if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
1202                acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1203        }
1204}
1205
1206static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1207{
1208        uint32_t outbound_doorbell;
1209        struct MessageUnit_A __iomem *reg = acb->pmuA;
1210
1211        outbound_doorbell = readl(&reg->outbound_doorbell);
1212        writel(outbound_doorbell, &reg->outbound_doorbell);
1213        if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1214                arcmsr_iop2drv_data_wrote_handle(acb);
1215        }
1216
1217        if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)    {
1218                arcmsr_iop2drv_data_read_handle(acb);
1219        }
1220}
1221
1222static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1223{
1224        uint32_t flag_ccb;
1225        struct MessageUnit_A __iomem *reg = acb->pmuA;
1226
1227        while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
1228                arcmsr_drain_donequeue(acb, flag_ccb);
1229        }
1230}
1231
1232static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1233{
1234        uint32_t index;
1235        uint32_t flag_ccb;
1236        struct MessageUnit_B *reg = acb->pmuB;
1237
1238        index = reg->doneq_index;
1239
1240        while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
1241                writel(0, &reg->done_qbuffer[index]);
1242                arcmsr_drain_donequeue(acb, flag_ccb);
1243                index++;
1244                index %= ARCMSR_MAX_HBB_POSTQUEUE;
1245                reg->doneq_index = index;
1246        }
1247}
1248
1249static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
1250{
1251        uint32_t outbound_intstatus;
1252        struct MessageUnit_A __iomem *reg = acb->pmuA;
1253
1254        outbound_intstatus = readl(&reg->outbound_intstatus) & \
1255                                                        acb->outbound_int_enable;
1256        if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))      {
1257                return 1;
1258        }
1259        writel(outbound_intstatus, &reg->outbound_intstatus);
1260        if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)       {
1261                arcmsr_hba_doorbell_isr(acb);
1262        }
1263        if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1264                arcmsr_hba_postqueue_isr(acb);
1265        }
1266        return 0;
1267}
1268
1269static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
1270{
1271        uint32_t outbound_doorbell;
1272        struct MessageUnit_B *reg = acb->pmuB;
1273
1274        outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \
1275                                                        acb->outbound_int_enable;
1276        if (!outbound_doorbell)
1277                return 1;
1278
1279        writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
1280        /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/
1281        readl(reg->iop2drv_doorbell_reg);
1282        writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
1283        if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)   {
1284                arcmsr_iop2drv_data_wrote_handle(acb);
1285        }
1286        if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1287                arcmsr_iop2drv_data_read_handle(acb);
1288        }
1289        if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1290                arcmsr_hbb_postqueue_isr(acb);
1291        }
1292
1293        return 0;
1294}
1295
1296static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
1297{
1298        switch (acb->adapter_type) {
1299        case ACB_ADAPTER_TYPE_A: {
1300                if (arcmsr_handle_hba_isr(acb)) {
1301                        return IRQ_NONE;
1302                }
1303                }
1304                break;
1305
1306        case ACB_ADAPTER_TYPE_B: {
1307                if (arcmsr_handle_hbb_isr(acb)) {
1308                        return IRQ_NONE;
1309                }
1310                }
1311                break;
1312        }
1313        return IRQ_HANDLED;
1314}
1315
1316static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1317{
1318        if (acb) {
1319                /* stop adapter background rebuild */
1320                if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
1321                        uint32_t intmask_org;
1322                        acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1323                        intmask_org = arcmsr_disable_outbound_ints(acb);
1324                        arcmsr_stop_adapter_bgrb(acb);
1325                        arcmsr_flush_adapter_cache(acb);
1326                        arcmsr_enable_outbound_ints(acb, intmask_org);
1327                }
1328        }
1329}
1330
1331void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1332{
1333        int32_t wqbuf_firstindex, wqbuf_lastindex;
1334        uint8_t *pQbuffer;
1335        struct QBUFFER __iomem *pwbuffer;
1336        uint8_t __iomem *iop_data;
1337        int32_t allxfer_len = 0;
1338
1339        pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1340        iop_data = (uint8_t __iomem *)pwbuffer->data;
1341        if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
1342                acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1343                wqbuf_firstindex = acb->wqbuf_firstindex;
1344                wqbuf_lastindex = acb->wqbuf_lastindex;
1345                while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
1346                        pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
1347                        memcpy(iop_data, pQbuffer, 1);
1348                        wqbuf_firstindex++;
1349                        wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1350                        iop_data++;
1351                        allxfer_len++;
1352                }
1353                acb->wqbuf_firstindex = wqbuf_firstindex;
1354                pwbuffer->data_len = allxfer_len;
1355                arcmsr_iop_message_wrote(acb);
1356        }
1357}
1358
1359static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1360                                        struct scsi_cmnd *cmd)
1361{
1362        struct CMD_MESSAGE_FIELD *pcmdmessagefld;
1363        int retvalue = 0, transfer_len = 0;
1364        char *buffer;
1365        struct scatterlist *sg;
1366        uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
1367                                                (uint32_t ) cmd->cmnd[6] << 16 |
1368                                                (uint32_t ) cmd->cmnd[7] << 8  |
1369                                                (uint32_t ) cmd->cmnd[8];
1370                                                /* 4 bytes: Areca io control code */
1371
1372        sg = scsi_sglist(cmd);
1373        buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1374        if (scsi_sg_count(cmd) > 1) {
1375                retvalue = ARCMSR_MESSAGE_FAIL;
1376                goto message_out;
1377        }
1378        transfer_len += sg->length;
1379
1380        if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
1381                retvalue = ARCMSR_MESSAGE_FAIL;
1382                goto message_out;
1383        }
1384        pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
1385        switch(controlcode) {
1386
1387        case ARCMSR_MESSAGE_READ_RQBUFFER: {
1388                unsigned char *ver_addr;
1389                uint8_t *pQbuffer, *ptmpQbuffer;
1390                int32_t allxfer_len = 0;
1391
1392                ver_addr = kmalloc(1032, GFP_ATOMIC);
1393                if (!ver_addr) {
1394                        retvalue = ARCMSR_MESSAGE_FAIL;
1395                        goto message_out;
1396                }
1397                ptmpQbuffer = ver_addr;
1398                while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1399                        && (allxfer_len < 1031)) {
1400                        pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
1401                        memcpy(ptmpQbuffer, pQbuffer, 1);
1402                        acb->rqbuf_firstindex++;
1403                        acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1404                        ptmpQbuffer++;
1405                        allxfer_len++;
1406                }
1407                if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1408
1409                        struct QBUFFER __iomem *prbuffer;
1410                        uint8_t __iomem *iop_data;
1411                        int32_t iop_len;
1412
1413                        acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1414                        prbuffer = arcmsr_get_iop_rqbuffer(acb);
1415                        iop_data = prbuffer->data;
1416                        iop_len = readl(&prbuffer->data_len);
1417                        while (iop_len > 0) {
1418                                acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
1419                                acb->rqbuf_lastindex++;
1420                                acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1421                                iop_data++;
1422                                iop_len--;
1423                        }
1424                        arcmsr_iop_message_read(acb);
1425                }
1426                memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
1427                pcmdmessagefld->cmdmessage.Length = allxfer_len;
1428                pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1429                kfree(ver_addr);
1430                }
1431                break;
1432
1433        case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1434                unsigned char *ver_addr;
1435                int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1436                uint8_t *pQbuffer, *ptmpuserbuffer;
1437
1438                ver_addr = kmalloc(1032, GFP_ATOMIC);
1439                if (!ver_addr) {
1440                        retvalue = ARCMSR_MESSAGE_FAIL;
1441                        goto message_out;
1442                }
1443                ptmpuserbuffer = ver_addr;
1444                user_len = pcmdmessagefld->cmdmessage.Length;
1445                memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
1446                wqbuf_lastindex = acb->wqbuf_lastindex;
1447                wqbuf_firstindex = acb->wqbuf_firstindex;
1448                if (wqbuf_lastindex != wqbuf_firstindex) {
1449                        struct SENSE_DATA *sensebuffer =
1450                                (struct SENSE_DATA *)cmd->sense_buffer;
1451                        arcmsr_post_ioctldata2iop(acb);
1452                        /* has error report sensedata */
1453                        sensebuffer->ErrorCode = 0x70;
1454                        sensebuffer->SenseKey = ILLEGAL_REQUEST;
1455                        sensebuffer->AdditionalSenseLength = 0x0A;
1456                        sensebuffer->AdditionalSenseCode = 0x20;
1457                        sensebuffer->Valid = 1;
1458                        retvalue = ARCMSR_MESSAGE_FAIL;
1459                } else {
1460                        my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
1461                                &(ARCMSR_MAX_QBUFFER - 1);
1462                        if (my_empty_len >= user_len) {
1463                                while (user_len > 0) {
1464                                        pQbuffer =
1465                                        &acb->wqbuffer[acb->wqbuf_lastindex];
1466                                        memcpy(pQbuffer, ptmpuserbuffer, 1);
1467                                        acb->wqbuf_lastindex++;
1468                                        acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1469                                        ptmpuserbuffer++;
1470                                        user_len--;
1471                                }
1472                                if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1473                                        acb->acb_flags &=
1474                                                ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1475                                        arcmsr_post_ioctldata2iop(acb);
1476                                }
1477                        } else {
1478                                /* has error report sensedata */
1479                                struct SENSE_DATA *sensebuffer =
1480                                        (struct SENSE_DATA *)cmd->sense_buffer;
1481                                sensebuffer->ErrorCode = 0x70;
1482                                sensebuffer->SenseKey = ILLEGAL_REQUEST;
1483                                sensebuffer->AdditionalSenseLength = 0x0A;
1484                                sensebuffer->AdditionalSenseCode = 0x20;
1485                                sensebuffer->Valid = 1;
1486                                retvalue = ARCMSR_MESSAGE_FAIL;
1487                        }
1488                        }
1489                        kfree(ver_addr);
1490                }
1491                break;
1492
1493        case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1494                uint8_t *pQbuffer = acb->rqbuffer;
1495
1496                if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1497                        acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1498                        arcmsr_iop_message_read(acb);
1499                }
1500                acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1501                acb->rqbuf_firstindex = 0;
1502                acb->rqbuf_lastindex = 0;
1503                memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1504                pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1505                }
1506                break;
1507
1508        case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
1509                uint8_t *pQbuffer = acb->wqbuffer;
1510
1511                if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1512                        acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1513                        arcmsr_iop_message_read(acb);
1514                }
1515                acb->acb_flags |=
1516                        (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1517                                ACB_F_MESSAGE_WQBUFFER_READED);
1518                acb->wqbuf_firstindex = 0;
1519                acb->wqbuf_lastindex = 0;
1520                memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1521                pcmdmessagefld->cmdmessage.ReturnCode =
1522                        ARCMSR_MESSAGE_RETURNCODE_OK;
1523                }
1524                break;
1525
1526        case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1527                uint8_t *pQbuffer;
1528
1529                if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1530                        acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1531                        arcmsr_iop_message_read(acb);
1532                }
1533                acb->acb_flags |=
1534                        (ACB_F_MESSAGE_WQBUFFER_CLEARED
1535                        | ACB_F_MESSAGE_RQBUFFER_CLEARED
1536                        | ACB_F_MESSAGE_WQBUFFER_READED);
1537                acb->rqbuf_firstindex = 0;
1538                acb->rqbuf_lastindex = 0;
1539                acb->wqbuf_firstindex = 0;
1540                acb->wqbuf_lastindex = 0;
1541                pQbuffer = acb->rqbuffer;
1542                memset(pQbuffer, 0, sizeof(struct QBUFFER));
1543                pQbuffer = acb->wqbuffer;
1544                memset(pQbuffer, 0, sizeof(struct QBUFFER));
1545                pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1546                }
1547                break;
1548
1549        case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1550                pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
1551                }
1552                break;
1553
1554        case ARCMSR_MESSAGE_SAY_HELLO: {
1555                int8_t *hello_string = "Hello! I am ARCMSR";
1556
1557                memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1558                        , (int16_t)strlen(hello_string));
1559                pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1560                }
1561                break;
1562
1563        case ARCMSR_MESSAGE_SAY_GOODBYE:
1564                arcmsr_iop_parking(acb);
1565                break;
1566
1567        case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1568                arcmsr_flush_adapter_cache(acb);
1569                break;
1570
1571        default:
1572                retvalue = ARCMSR_MESSAGE_FAIL;
1573        }
1574        message_out:
1575        sg = scsi_sglist(cmd);
1576        kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1577        return retvalue;
1578}
1579
1580static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
1581{
1582        struct list_head *head = &acb->ccb_free_list;
1583        struct CommandControlBlock *ccb = NULL;
1584
1585        if (!list_empty(head)) {
1586                ccb = list_entry(head->next, struct CommandControlBlock, list);
1587                list_del(head->next);
1588        }
1589        return ccb;
1590}
1591
1592static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1593                struct scsi_cmnd *cmd)
1594{
1595        switch (cmd->cmnd[0]) {
1596        case INQUIRY: {
1597                unsigned char inqdata[36];
1598                char *buffer;
1599                struct scatterlist *sg;
1600
1601                if (cmd->device->lun) {
1602                        cmd->result = (DID_TIME_OUT << 16);
1603                        cmd->scsi_done(cmd);
1604                        return;
1605                }
1606                inqdata[0] = TYPE_PROCESSOR;
1607                /* Periph Qualifier & Periph Dev Type */
1608                inqdata[1] = 0;
1609                /* rem media bit & Dev Type Modifier */
1610                inqdata[2] = 0;
1611                /* ISO, ECMA, & ANSI versions */
1612                inqdata[4] = 31;
1613                /* length of additional data */
1614                strncpy(&inqdata[8], "Areca   ", 8);
1615                /* Vendor Identification */
1616                strncpy(&inqdata[16], "RAID controller ", 16);
1617                /* Product Identification */
1618                strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1619
1620                sg = scsi_sglist(cmd);
1621                buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1622
1623                memcpy(buffer, inqdata, sizeof(inqdata));
1624                sg = scsi_sglist(cmd);
1625                kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1626
1627                cmd->scsi_done(cmd);
1628        }
1629        break;
1630        case WRITE_BUFFER:
1631        case READ_BUFFER: {
1632                if (arcmsr_iop_message_xfer(acb, cmd))
1633                        cmd->result = (DID_ERROR << 16);
1634                cmd->scsi_done(cmd);
1635        }
1636        break;
1637        default:
1638                cmd->scsi_done(cmd);
1639        }
1640}
1641
1642static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1643        void (* done)(struct scsi_cmnd *))
1644{
1645        struct Scsi_Host *host = cmd->device->host;
1646        struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
1647        struct CommandControlBlock *ccb;
1648        int target = cmd->device->id;
1649        int lun = cmd->device->lun;
1650
1651        cmd->scsi_done = done;
1652        cmd->host_scribble = NULL;
1653        cmd->result = 0;
1654        if (acb->acb_flags & ACB_F_BUS_RESET) {
1655                printk(KERN_NOTICE "arcmsr%d: bus reset"
1656                        " and return busy \n"
1657                        , acb->host->host_no);
1658                return SCSI_MLQUEUE_HOST_BUSY;
1659        }
1660        if (target == 16) {
1661                /* virtual device for iop message transfer */
1662                arcmsr_handle_virtual_command(acb, cmd);
1663                return 0;
1664        }
1665        if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1666                uint8_t block_cmd;
1667
1668                block_cmd = cmd->cmnd[0] & 0x0f;
1669                if (block_cmd == 0x08 || block_cmd == 0x0a) {
1670                        printk(KERN_NOTICE
1671                                "arcmsr%d: block 'read/write'"
1672                                "command with gone raid volume"
1673                                " Cmd = %2x, TargetId = %d, Lun = %d \n"
1674                                , acb->host->host_no
1675                                , cmd->cmnd[0]
1676                                , target, lun);
1677                        cmd->result = (DID_NO_CONNECT << 16);
1678                        cmd->scsi_done(cmd);
1679                        return 0;
1680                }
1681        }
1682        if (atomic_read(&acb->ccboutstandingcount) >=
1683                        ARCMSR_MAX_OUTSTANDING_CMD)
1684                return SCSI_MLQUEUE_HOST_BUSY;
1685
1686        ccb = arcmsr_get_freeccb(acb);
1687        if (!ccb)
1688                return SCSI_MLQUEUE_HOST_BUSY;
1689        if ( arcmsr_build_ccb( acb, ccb, cmd ) == FAILED ) {
1690                cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
1691                cmd->scsi_done(cmd);
1692                return 0;
1693        }
1694        arcmsr_post_ccb(acb, ccb);
1695        return 0;
1696}
1697
1698static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
1699{
1700        struct MessageUnit_A __iomem *reg = acb->pmuA;
1701        char *acb_firm_model = acb->firm_model;
1702        char *acb_firm_version = acb->firm_version;
1703        char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
1704        char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
1705        int count;
1706
1707        writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
1708        if (arcmsr_hba_wait_msgint_ready(acb)) {
1709                printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1710                        miscellaneous data' timeout \n", acb->host->host_no);
1711        }
1712
1713        count = 8;
1714        while (count) {
1715                *acb_firm_model = readb(iop_firm_model);
1716                acb_firm_model++;
1717                iop_firm_model++;
1718                count--;
1719        }
1720
1721        count = 16;
1722        while (count) {
1723                *acb_firm_version = readb(iop_firm_version);
1724                acb_firm_version++;
1725                iop_firm_version++;
1726                count--;
1727        }
1728
1729        printk(KERN_INFO        "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
1730                , acb->host->host_no
1731                , acb->firm_version);
1732
1733        acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
1734        acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
1735        acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
1736        acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
1737}
1738
1739static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
1740{
1741        struct MessageUnit_B *reg = acb->pmuB;
1742        uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg;
1743        char *acb_firm_model = acb->firm_model;
1744        char *acb_firm_version = acb->firm_version;
1745        char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]);
1746        /*firm_model,15,60-67*/
1747        char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]);
1748        /*firm_version,17,68-83*/
1749        int count;
1750
1751        writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
1752        if (arcmsr_hbb_wait_msgint_ready(acb)) {
1753                printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1754                        miscellaneous data' timeout \n", acb->host->host_no);
1755        }
1756
1757        count = 8;
1758        while (count)
1759        {
1760                *acb_firm_model = readb(iop_firm_model);
1761                acb_firm_model++;
1762                iop_firm_model++;
1763                count--;
1764        }
1765
1766        count = 16;
1767        while (count)
1768        {
1769                *acb_firm_version = readb(iop_firm_version);
1770                acb_firm_version++;
1771                iop_firm_version++;
1772                count--;
1773        }
1774
1775        printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n",
1776                        acb->host->host_no,
1777                        acb->firm_version);
1778
1779        lrwbuffer++;
1780        acb->firm_request_len = readl(lrwbuffer++);
1781        /*firm_request_len,1,04-07*/
1782        acb->firm_numbers_queue = readl(lrwbuffer++);
1783        /*firm_numbers_queue,2,08-11*/
1784        acb->firm_sdram_size = readl(lrwbuffer++);
1785        /*firm_sdram_size,3,12-15*/
1786        acb->firm_hd_channels = readl(lrwbuffer);
1787        /*firm_ide_channels,4,16-19*/
1788}
1789
1790static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
1791{
1792        switch (acb->adapter_type) {
1793        case ACB_ADAPTER_TYPE_A: {
1794                arcmsr_get_hba_config(acb);
1795                }
1796                break;
1797
1798        case ACB_ADAPTER_TYPE_B: {
1799                arcmsr_get_hbb_config(acb);
1800                }
1801                break;
1802        }
1803}
1804
1805static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
1806        struct CommandControlBlock *poll_ccb)
1807{
1808        struct MessageUnit_A __iomem *reg = acb->pmuA;
1809        struct CommandControlBlock *ccb;
1810        uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
1811
1812        polling_hba_ccb_retry:
1813        poll_count++;
1814        outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
1815        writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
1816        while (1) {
1817                if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
1818                        if (poll_ccb_done)
1819                                break;
1820                        else {
1821                                msleep(25);
1822                                if (poll_count > 100)
1823                                        break;
1824                                goto polling_hba_ccb_retry;
1825                        }
1826                }
1827                ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
1828                poll_ccb_done = (ccb == poll_ccb) ? 1:0;
1829                if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
1830                        if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
1831                                printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
1832                                        " poll command abort successfully \n"
1833                                        , acb->host->host_no
1834                                        , ccb->pcmd->device->id
1835                                        , ccb->pcmd->device->lun
1836                                        , ccb);
1837                                ccb->pcmd->result = DID_ABORT << 16;
1838                                arcmsr_ccb_complete(ccb, 1);
1839                                poll_ccb_done = 1;
1840                                continue;
1841                        }
1842                        printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
1843                                " command done ccb = '0x%p'"
1844                                "ccboutstandingcount = %d \n"
1845                                , acb->host->host_no
1846                                , ccb
1847                                , atomic_read(&acb->ccboutstandingcount));
1848                        continue;
1849                }
1850                arcmsr_report_ccb_state(acb, ccb, flag_ccb);
1851        }
1852}
1853
1854static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
1855                                        struct CommandControlBlock *poll_ccb)
1856{
1857                struct MessageUnit_B *reg = acb->pmuB;
1858                struct CommandControlBlock *ccb;
1859                uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
1860                int index;
1861
1862        polling_hbb_ccb_retry:
1863                poll_count++;
1864                /* clear doorbell interrupt */
1865                writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
1866                while (1) {
1867                        index = reg->doneq_index;
1868                        if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
1869                                if (poll_ccb_done)
1870                                        break;
1871                                else {
1872                                        msleep(25);
1873                                        if (poll_count > 100)
1874                                                break;
1875                                        goto polling_hbb_ccb_retry;
1876                                }
1877                        }
1878                        writel(0, &reg->done_qbuffer[index]);
1879                        index++;
1880                        /*if last index number set it to 0 */
1881                        index %= ARCMSR_MAX_HBB_POSTQUEUE;
1882                        reg->doneq_index = index;
1883                        /* check ifcommand done with no error*/
1884                        ccb = (struct CommandControlBlock *)\
1885      (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
1886                        poll_ccb_done = (ccb == poll_ccb) ? 1:0;
1887                        if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
1888                                if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
1889                                        printk(KERN_NOTICE "arcmsr%d: \
1890                scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n"
1891                                                ,acb->host->host_no
1892                                                ,ccb->pcmd->device->id
1893                                                ,ccb->pcmd->device->lun
1894                                                ,ccb);
1895                                        ccb->pcmd->result = DID_ABORT << 16;
1896                                        arcmsr_ccb_complete(ccb, 1);
1897                                        continue;
1898                                }
1899                                printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
1900                                        " command done ccb = '0x%p'"
1901                                        "ccboutstandingcount = %d \n"
1902                                        , acb->host->host_no
1903                                        , ccb
1904                                        , atomic_read(&acb->ccboutstandingcount));
1905                                continue;
1906                        }
1907                        arcmsr_report_ccb_state(acb, ccb, flag_ccb);
1908                }       /*drain reply FIFO*/
1909}
1910
1911static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1912                                        struct CommandControlBlock *poll_ccb)
1913{
1914        switch (acb->adapter_type) {
1915
1916        case ACB_ADAPTER_TYPE_A: {
1917                arcmsr_polling_hba_ccbdone(acb,poll_ccb);
1918                }
1919                break;
1920
1921        case ACB_ADAPTER_TYPE_B: {
1922                arcmsr_polling_hbb_ccbdone(acb,poll_ccb);
1923                }
1924        }
1925}
1926
1927static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
1928{
1929        uint32_t cdb_phyaddr, ccb_phyaddr_hi32;
1930        dma_addr_t dma_coherent_handle;
1931        /*
1932        ********************************************************************
1933        ** here we need to tell iop 331 our freeccb.HighPart
1934        ** if freeccb.HighPart is not zero
1935        ********************************************************************
1936        */
1937        dma_coherent_handle = acb->dma_coherent_handle;
1938        cdb_phyaddr = (uint32_t)(dma_coherent_handle);
1939        ccb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
1940        /*
1941        ***********************************************************************
1942        **    if adapter type B, set window of "post command Q"
1943        ***********************************************************************
1944        */
1945        switch (acb->adapter_type) {
1946
1947        case ACB_ADAPTER_TYPE_A: {
1948                if (ccb_phyaddr_hi32 != 0) {
1949                        struct MessageUnit_A __iomem *reg = acb->pmuA;
1950                        uint32_t intmask_org;
1951                        intmask_org = arcmsr_disable_outbound_ints(acb);
1952                        writel(ARCMSR_SIGNATURE_SET_CONFIG, \
1953                                                &reg->message_rwbuffer[0]);
1954                        writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
1955                        writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
1956                                                        &reg->inbound_msgaddr0);
1957                        if (arcmsr_hba_wait_msgint_ready(acb)) {
1958                                printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
1959                                part physical address timeout\n",
1960                                acb->host->host_no);
1961                                return 1;
1962                        }
1963                        arcmsr_enable_outbound_ints(acb, intmask_org);
1964                }
1965                }
1966                break;
1967
1968        case ACB_ADAPTER_TYPE_B: {
1969                unsigned long post_queue_phyaddr;
1970                uint32_t __iomem *rwbuffer;
1971
1972                struct MessageUnit_B *reg = acb->pmuB;
1973                uint32_t intmask_org;
1974                intmask_org = arcmsr_disable_outbound_ints(acb);
1975                reg->postq_index = 0;
1976                reg->doneq_index = 0;
1977                writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell_reg);
1978                if (arcmsr_hbb_wait_msgint_ready(acb)) {
1979                        printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
1980                                acb->host->host_no);
1981                        return 1;
1982                }
1983                post_queue_phyaddr = cdb_phyaddr + ARCMSR_MAX_FREECCB_NUM * \
1984                sizeof(struct CommandControlBlock) + offsetof(struct MessageUnit_B, post_qbuffer) ;
1985                rwbuffer = reg->msgcode_rwbuffer_reg;
1986                /* driver "set config" signature */
1987                writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
1988                /* normal should be zero */
1989                writel(ccb_phyaddr_hi32, rwbuffer++);
1990                /* postQ size (256 + 8)*4        */
1991                writel(post_queue_phyaddr, rwbuffer++);
1992                /* doneQ size (256 + 8)*4        */
1993                writel(post_queue_phyaddr + 1056, rwbuffer++);
1994                /* ccb maxQ size must be --> [(256 + 8)*4]*/
1995                writel(1056, rwbuffer);
1996
1997                writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell_reg);
1998                if (arcmsr_hbb_wait_msgint_ready(acb)) {
1999                        printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
2000                        timeout \n",acb->host->host_no);
2001                        return 1;
2002                }
2003
2004                writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell_reg);
2005                if (arcmsr_hbb_wait_msgint_ready(acb)) {
2006                        printk(KERN_NOTICE "arcmsr%d: 'can not set diver mode \n"\
2007                        ,acb->host->host_no);
2008                        return 1;
2009                }
2010                arcmsr_enable_outbound_ints(acb, intmask_org);
2011                }
2012                break;
2013        }
2014        return 0;
2015}
2016
2017static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
2018{
2019        uint32_t firmware_state = 0;
2020
2021        switch (acb->adapter_type) {
2022
2023        case ACB_ADAPTER_TYPE_A: {
2024                struct MessageUnit_A __iomem *reg = acb->pmuA;
2025                do {
2026                        firmware_state = readl(&reg->outbound_msgaddr1);
2027                } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
2028                }
2029                break;
2030
2031        case ACB_ADAPTER_TYPE_B: {
2032                struct MessageUnit_B *reg = acb->pmuB;
2033                do {
2034                        firmware_state = readl(reg->iop2drv_doorbell_reg);
2035                } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
2036                writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
2037                }
2038                break;
2039        }
2040}
2041
2042static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2043{
2044        struct MessageUnit_A __iomem *reg = acb->pmuA;
2045        acb->acb_flags |= ACB_F_MSG_START_BGRB;
2046        writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
2047        if (arcmsr_hba_wait_msgint_ready(acb)) {
2048                printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2049                                rebulid' timeout \n", acb->host->host_no);
2050        }
2051}
2052
2053static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2054{
2055        struct MessageUnit_B *reg = acb->pmuB;
2056        acb->acb_flags |= ACB_F_MSG_START_BGRB;
2057        writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg);
2058        if (arcmsr_hbb_wait_msgint_ready(acb)) {
2059                printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2060                                rebulid' timeout \n",acb->host->host_no);
2061        }
2062}
2063
2064static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2065{
2066        switch (acb->adapter_type) {
2067        case ACB_ADAPTER_TYPE_A:
2068                arcmsr_start_hba_bgrb(acb);
2069                break;
2070        case ACB_ADAPTER_TYPE_B:
2071                arcmsr_start_hbb_bgrb(acb);
2072                break;
2073        }
2074}
2075
2076static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
2077{
2078        switch (acb->adapter_type) {
2079        case ACB_ADAPTER_TYPE_A: {
2080                struct MessageUnit_A __iomem *reg = acb->pmuA;
2081                uint32_t outbound_doorbell;
2082                /* empty doorbell Qbuffer if door bell ringed */
2083                outbound_doorbell = readl(&reg->outbound_doorbell);
2084                /*clear doorbell interrupt */
2085                writel(outbound_doorbell, &reg->outbound_doorbell);
2086                writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
2087                }
2088                break;
2089
2090        case ACB_ADAPTER_TYPE_B: {
2091                struct MessageUnit_B *reg = acb->pmuB;
2092                /*clear interrupt and message state*/
2093                writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
2094                writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
2095                /* let IOP know data has been read */
2096                }
2097                break;
2098        }
2099}
2100
2101static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
2102{
2103        switch (acb->adapter_type) {
2104        case ACB_ADAPTER_TYPE_A:
2105                return;
2106        case ACB_ADAPTER_TYPE_B:
2107                {
2108                        struct MessageUnit_B *reg = acb->pmuB;
2109                        writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell_reg);
2110                        if(arcmsr_hbb_wait_msgint_ready(acb)) {
2111                                printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
2112                                return;
2113                        }
2114                }
2115                break;
2116        }
2117        return;
2118}
2119
2120static void arcmsr_iop_init(struct AdapterControlBlock *acb)
2121{
2122        uint32_t intmask_org;
2123
2124       /* disable all outbound interrupt */
2125       intmask_org = arcmsr_disable_outbound_ints(acb);
2126        arcmsr_wait_firmware_ready(acb);
2127        arcmsr_iop_confirm(acb);
2128        arcmsr_get_firmware_spec(acb);
2129        /*start background rebuild*/
2130        arcmsr_start_adapter_bgrb(acb);
2131        /* empty doorbell Qbuffer if door bell ringed */
2132        arcmsr_clear_doorbell_queue_buffer(acb);
2133        arcmsr_enable_eoi_mode(acb);
2134        /* enable outbound Post Queue,outbound doorbell Interrupt */
2135        arcmsr_enable_outbound_ints(acb, intmask_org);
2136        acb->acb_flags |= ACB_F_IOP_INITED;
2137}
2138
2139static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
2140{
2141        struct CommandControlBlock *ccb;
2142        uint32_t intmask_org;
2143        int i = 0;
2144
2145        if (atomic_read(&acb->ccboutstandingcount) != 0) {
2146                /* talk to iop 331 outstanding command aborted */
2147                arcmsr_abort_allcmd(acb);
2148
2149                /* wait for 3 sec for all command aborted*/
2150                ssleep(3);
2151
2152                /* disable all outbound interrupt */
2153                intmask_org = arcmsr_disable_outbound_ints(acb);
2154                /* clear all outbound posted Q */
2155                arcmsr_done4abort_postqueue(acb);
2156                for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2157                        ccb = acb->pccb_pool[i];
2158                        if (ccb->startdone == ARCMSR_CCB_START) {
2159                                ccb->startdone = ARCMSR_CCB_ABORTED;
2160                                arcmsr_ccb_complete(ccb, 1);
2161                        }
2162                }
2163                /* enable all outbound interrupt */
2164                arcmsr_enable_outbound_ints(acb, intmask_org);
2165        }
2166}
2167
2168static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
2169{
2170        struct AdapterControlBlock *acb =
2171                (struct AdapterControlBlock *)cmd->device->host->hostdata;
2172        int i;
2173
2174        acb->num_resets++;
2175        acb->acb_flags |= ACB_F_BUS_RESET;
2176        for (i = 0; i < 400; i++) {
2177                if (!atomic_read(&acb->ccboutstandingcount))
2178                        break;
2179                arcmsr_interrupt(acb);/* FIXME: need spinlock */
2180                msleep(25);
2181        }
2182        arcmsr_iop_reset(acb);
2183        acb->acb_flags &= ~ACB_F_BUS_RESET;
2184        return SUCCESS;
2185}
2186
2187static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
2188                struct CommandControlBlock *ccb)
2189{
2190        u32 intmask;
2191
2192        ccb->startdone = ARCMSR_CCB_ABORTED;
2193
2194        /*
2195        ** Wait for 3 sec for all command done.
2196        */
2197        ssleep(3);
2198
2199        intmask = arcmsr_disable_outbound_ints(acb);
2200        arcmsr_polling_ccbdone(acb, ccb);
2201        arcmsr_enable_outbound_ints(acb, intmask);
2202}
2203
2204static int arcmsr_abort(struct scsi_cmnd *cmd)
2205{
2206        struct AdapterControlBlock *acb =
2207                (struct AdapterControlBlock *)cmd->device->host->hostdata;
2208        int i = 0;
2209
2210        printk(KERN_NOTICE
2211                "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
2212                acb->host->host_no, cmd->device->id, cmd->device->lun);
2213        acb->num_aborts++;
2214        /*
2215        ************************************************
2216        ** the all interrupt service routine is locked
2217        ** we need to handle it as soon as possible and exit
2218        ************************************************
2219        */
2220        if (!atomic_read(&acb->ccboutstandingcount))
2221                return SUCCESS;
2222
2223        for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2224                struct CommandControlBlock *ccb = acb->pccb_pool[i];
2225                if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
2226                        arcmsr_abort_one_cmd(acb, ccb);
2227                        break;
2228                }
2229        }
2230
2231        return SUCCESS;
2232}
2233
2234static const char *arcmsr_info(struct Scsi_Host *host)
2235{
2236        struct AdapterControlBlock *acb =
2237                (struct AdapterControlBlock *) host->hostdata;
2238        static char buf[256];
2239        char *type;
2240        int raid6 = 1;
2241
2242        switch (acb->pdev->device) {
2243        case PCI_DEVICE_ID_ARECA_1110:
2244        case PCI_DEVICE_ID_ARECA_1200:
2245        case PCI_DEVICE_ID_ARECA_1202:
2246        case PCI_DEVICE_ID_ARECA_1210:
2247                raid6 = 0;
2248                /*FALLTHRU*/
2249        case PCI_DEVICE_ID_ARECA_1120:
2250        case PCI_DEVICE_ID_ARECA_1130:
2251        case PCI_DEVICE_ID_ARECA_1160:
2252        case PCI_DEVICE_ID_ARECA_1170:
2253        case PCI_DEVICE_ID_ARECA_1201:
2254        case PCI_DEVICE_ID_ARECA_1220:
2255        case PCI_DEVICE_ID_ARECA_1230:
2256        case PCI_DEVICE_ID_ARECA_1260:
2257        case PCI_DEVICE_ID_ARECA_1270:
2258        case PCI_DEVICE_ID_ARECA_1280:
2259                type = "SATA";
2260                break;
2261        case PCI_DEVICE_ID_ARECA_1380:
2262        case PCI_DEVICE_ID_ARECA_1381:
2263        case PCI_DEVICE_ID_ARECA_1680:
2264        case PCI_DEVICE_ID_ARECA_1681:
2265                type = "SAS";
2266                break;
2267        default:
2268                type = "X-TYPE";
2269                break;
2270        }
2271        sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
2272                        type, raid6 ? "( RAID6 capable)" : "",
2273                        ARCMSR_DRIVER_VERSION);
2274        return buf;
2275}
2276#ifdef CONFIG_SCSI_ARCMSR_AER
2277static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
2278{
2279        struct Scsi_Host *host = pci_get_drvdata(pdev);
2280        struct AdapterControlBlock *acb =
2281                (struct AdapterControlBlock *) host->hostdata;
2282        uint32_t intmask_org;
2283        int i, j;
2284
2285        if (pci_enable_device(pdev)) {
2286                return PCI_ERS_RESULT_DISCONNECT;
2287        }
2288        pci_set_master(pdev);
2289        intmask_org = arcmsr_disable_outbound_ints(acb);
2290        acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2291                           ACB_F_MESSAGE_RQBUFFER_CLEARED |
2292                           ACB_F_MESSAGE_WQBUFFER_READED);
2293        acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
2294        for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
2295                for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
2296                        acb->devstate[i][j] = ARECA_RAID_GONE;
2297
2298        arcmsr_wait_firmware_ready(acb);
2299        arcmsr_iop_confirm(acb);
2300       /* disable all outbound interrupt */
2301        arcmsr_get_firmware_spec(acb);
2302        /*start background rebuild*/
2303        arcmsr_start_adapter_bgrb(acb);
2304        /* empty doorbell Qbuffer if door bell ringed */
2305        arcmsr_clear_doorbell_queue_buffer(acb);
2306        arcmsr_enable_eoi_mode(acb);
2307        /* enable outbound Post Queue,outbound doorbell Interrupt */
2308        arcmsr_enable_outbound_ints(acb, intmask_org);
2309        acb->acb_flags |= ACB_F_IOP_INITED;
2310
2311        pci_enable_pcie_error_reporting(pdev);
2312        return PCI_ERS_RESULT_RECOVERED;
2313}
2314
2315static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev)
2316{
2317        struct Scsi_Host *host = pci_get_drvdata(pdev);
2318        struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata;
2319        struct CommandControlBlock *ccb;
2320        uint32_t intmask_org;
2321        int i = 0;
2322
2323        if (atomic_read(&acb->ccboutstandingcount) != 0) {
2324                /* talk to iop 331 outstanding command aborted */
2325                arcmsr_abort_allcmd(acb);
2326                /* wait for 3 sec for all command aborted*/
2327                ssleep(3);
2328                /* disable all outbound interrupt */
2329                intmask_org = arcmsr_disable_outbound_ints(acb);
2330                /* clear all outbound posted Q */
2331                arcmsr_done4abort_postqueue(acb);
2332                for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2333                        ccb = acb->pccb_pool[i];
2334                        if (ccb->startdone == ARCMSR_CCB_START) {
2335                                ccb->startdone = ARCMSR_CCB_ABORTED;
2336                                arcmsr_ccb_complete(ccb, 1);
2337                        }
2338                }
2339                /* enable all outbound interrupt */
2340                arcmsr_enable_outbound_ints(acb, intmask_org);
2341        }
2342        pci_disable_device(pdev);
2343}
2344
2345static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev)
2346{
2347                        struct Scsi_Host *host = pci_get_drvdata(pdev);
2348                        struct AdapterControlBlock *acb = \
2349                                (struct AdapterControlBlock *)host->hostdata;
2350
2351                        arcmsr_stop_adapter_bgrb(acb);
2352                        arcmsr_flush_adapter_cache(acb);
2353}
2354
2355static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
2356                                                pci_channel_state_t state)
2357{
2358        switch (state) {
2359        case pci_channel_io_frozen:
2360                        arcmsr_pci_ers_need_reset_forepart(pdev);
2361                        return PCI_ERS_RESULT_NEED_RESET;
2362        case pci_channel_io_perm_failure:
2363                        arcmsr_pci_ers_disconnect_forepart(pdev);
2364                        return PCI_ERS_RESULT_DISCONNECT;
2365                        break;
2366        default:
2367                        return PCI_ERS_RESULT_NEED_RESET;
2368          }
2369}
2370#endif
2371