linux/drivers/scsi/mvsas/mv_init.c
<<
>>
Prefs
   1/*
   2 * Marvell 88SE64xx/88SE94xx pci init
   3 *
   4 * Copyright 2007 Red Hat, Inc.
   5 * Copyright 2008 Marvell. <kewei@marvell.com>
   6 *
   7 * This file is licensed under GPLv2.
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License as
  11 * published by the Free Software Foundation; version 2 of the
  12 * License.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  17 * General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  22 * USA
  23*/
  24
  25
  26#include "mv_sas.h"
  27
  28static struct scsi_transport_template *mvs_stt;
  29static const struct mvs_chip_info mvs_chips[] = {
  30        [chip_6320] =   { 1, 2, 0x400, 17, 16,  9, &mvs_64xx_dispatch, },
  31        [chip_6440] =   { 1, 4, 0x400, 17, 16,  9, &mvs_64xx_dispatch, },
  32        [chip_6485] =   { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
  33        [chip_9180] =   { 2, 4, 0x800, 17, 64,  9, &mvs_94xx_dispatch, },
  34        [chip_9480] =   { 2, 4, 0x800, 17, 64,  9, &mvs_94xx_dispatch, },
  35        [chip_1300] =   { 1, 4, 0x400, 17, 16,  9, &mvs_64xx_dispatch, },
  36        [chip_1320] =   { 2, 4, 0x800, 17, 64,  9, &mvs_94xx_dispatch, },
  37};
  38
  39#define SOC_SAS_NUM 2
  40
  41static struct scsi_host_template mvs_sht = {
  42        .module                 = THIS_MODULE,
  43        .name                   = DRV_NAME,
  44        .queuecommand           = sas_queuecommand,
  45        .target_alloc           = sas_target_alloc,
  46        .slave_configure        = mvs_slave_configure,
  47        .slave_destroy          = sas_slave_destroy,
  48        .scan_finished          = mvs_scan_finished,
  49        .scan_start             = mvs_scan_start,
  50        .change_queue_depth     = sas_change_queue_depth,
  51        .change_queue_type      = sas_change_queue_type,
  52        .bios_param             = sas_bios_param,
  53        .can_queue              = 1,
  54        .cmd_per_lun            = 1,
  55        .this_id                = -1,
  56        .sg_tablesize           = SG_ALL,
  57        .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
  58        .use_clustering         = ENABLE_CLUSTERING,
  59        .eh_device_reset_handler        = sas_eh_device_reset_handler,
  60        .eh_bus_reset_handler   = sas_eh_bus_reset_handler,
  61        .slave_alloc            = mvs_slave_alloc,
  62        .target_destroy         = sas_target_destroy,
  63        .ioctl                  = sas_ioctl,
  64};
  65
  66static struct sas_domain_function_template mvs_transport_ops = {
  67        .lldd_dev_found         = mvs_dev_found,
  68        .lldd_dev_gone  = mvs_dev_gone,
  69
  70        .lldd_execute_task      = mvs_queue_command,
  71        .lldd_control_phy       = mvs_phy_control,
  72
  73        .lldd_abort_task        = mvs_abort_task,
  74        .lldd_abort_task_set    = mvs_abort_task_set,
  75        .lldd_clear_aca         = mvs_clear_aca,
  76       .lldd_clear_task_set    = mvs_clear_task_set,
  77        .lldd_I_T_nexus_reset   = mvs_I_T_nexus_reset,
  78        .lldd_lu_reset          = mvs_lu_reset,
  79        .lldd_query_task        = mvs_query_task,
  80
  81        .lldd_port_formed       = mvs_port_formed,
  82        .lldd_port_deformed     = mvs_port_deformed,
  83
  84};
  85
  86static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
  87{
  88        struct mvs_phy *phy = &mvi->phy[phy_id];
  89        struct asd_sas_phy *sas_phy = &phy->sas_phy;
  90
  91        phy->mvi = mvi;
  92        init_timer(&phy->timer);
  93        sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
  94        sas_phy->class = SAS;
  95        sas_phy->iproto = SAS_PROTOCOL_ALL;
  96        sas_phy->tproto = 0;
  97        sas_phy->type = PHY_TYPE_PHYSICAL;
  98        sas_phy->role = PHY_ROLE_INITIATOR;
  99        sas_phy->oob_mode = OOB_NOT_CONNECTED;
 100        sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
 101
 102        sas_phy->id = phy_id;
 103        sas_phy->sas_addr = &mvi->sas_addr[0];
 104        sas_phy->frame_rcvd = &phy->frame_rcvd[0];
 105        sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
 106        sas_phy->lldd_phy = phy;
 107}
 108
 109static void mvs_free(struct mvs_info *mvi)
 110{
 111        int i;
 112        struct mvs_wq *mwq;
 113        int slot_nr;
 114
 115        if (!mvi)
 116                return;
 117
 118        if (mvi->flags & MVF_FLAG_SOC)
 119                slot_nr = MVS_SOC_SLOTS;
 120        else
 121                slot_nr = MVS_SLOTS;
 122
 123        for (i = 0; i < mvi->tags_num; i++) {
 124                struct mvs_slot_info *slot = &mvi->slot_info[i];
 125                if (slot->buf)
 126                        dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
 127                                          slot->buf, slot->buf_dma);
 128        }
 129
 130        if (mvi->tx)
 131                dma_free_coherent(mvi->dev,
 132                                  sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
 133                                  mvi->tx, mvi->tx_dma);
 134        if (mvi->rx_fis)
 135                dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
 136                                  mvi->rx_fis, mvi->rx_fis_dma);
 137        if (mvi->rx)
 138                dma_free_coherent(mvi->dev,
 139                                  sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
 140                                  mvi->rx, mvi->rx_dma);
 141        if (mvi->slot)
 142                dma_free_coherent(mvi->dev,
 143                                  sizeof(*mvi->slot) * slot_nr,
 144                                  mvi->slot, mvi->slot_dma);
 145#ifndef DISABLE_HOTPLUG_DMA_FIX
 146        if (mvi->bulk_buffer)
 147                dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
 148                                  mvi->bulk_buffer, mvi->bulk_buffer_dma);
 149#endif
 150
 151        MVS_CHIP_DISP->chip_iounmap(mvi);
 152        if (mvi->shost)
 153                scsi_host_put(mvi->shost);
 154        list_for_each_entry(mwq, &mvi->wq_list, entry)
 155                cancel_delayed_work(&mwq->work_q);
 156        kfree(mvi);
 157}
 158
 159#ifdef MVS_USE_TASKLET
 160struct tasklet_struct   mv_tasklet;
 161static void mvs_tasklet(unsigned long opaque)
 162{
 163        unsigned long flags;
 164        u32 stat;
 165        u16 core_nr, i = 0;
 166
 167        struct mvs_info *mvi;
 168        struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
 169
 170        core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
 171        mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
 172
 173        if (unlikely(!mvi))
 174                BUG_ON(1);
 175
 176        for (i = 0; i < core_nr; i++) {
 177                mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
 178                stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
 179                if (stat)
 180                        MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
 181        }
 182
 183}
 184#endif
 185
 186static irqreturn_t mvs_interrupt(int irq, void *opaque)
 187{
 188        u32 core_nr, i = 0;
 189        u32 stat;
 190        struct mvs_info *mvi;
 191        struct sas_ha_struct *sha = opaque;
 192
 193        core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
 194        mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
 195
 196        if (unlikely(!mvi))
 197                return IRQ_NONE;
 198
 199        stat = MVS_CHIP_DISP->isr_status(mvi, irq);
 200        if (!stat)
 201                return IRQ_NONE;
 202
 203#ifdef MVS_USE_TASKLET
 204        tasklet_schedule(&mv_tasklet);
 205#else
 206        for (i = 0; i < core_nr; i++) {
 207                mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
 208                MVS_CHIP_DISP->isr(mvi, irq, stat);
 209        }
 210#endif
 211        return IRQ_HANDLED;
 212}
 213
 214static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
 215{
 216        int i, slot_nr;
 217
 218        if (mvi->flags & MVF_FLAG_SOC)
 219                slot_nr = MVS_SOC_SLOTS;
 220        else
 221                slot_nr = MVS_SLOTS;
 222
 223        spin_lock_init(&mvi->lock);
 224        for (i = 0; i < mvi->chip->n_phy; i++) {
 225                mvs_phy_init(mvi, i);
 226                mvi->port[i].wide_port_phymap = 0;
 227                mvi->port[i].port_attached = 0;
 228                INIT_LIST_HEAD(&mvi->port[i].list);
 229        }
 230        for (i = 0; i < MVS_MAX_DEVICES; i++) {
 231                mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
 232                mvi->devices[i].dev_type = NO_DEVICE;
 233                mvi->devices[i].device_id = i;
 234                mvi->devices[i].dev_status = MVS_DEV_NORMAL;
 235        }
 236
 237        /*
 238         * alloc and init our DMA areas
 239         */
 240        mvi->tx = dma_alloc_coherent(mvi->dev,
 241                                     sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
 242                                     &mvi->tx_dma, GFP_KERNEL);
 243        if (!mvi->tx)
 244                goto err_out;
 245        memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
 246        mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
 247                                         &mvi->rx_fis_dma, GFP_KERNEL);
 248        if (!mvi->rx_fis)
 249                goto err_out;
 250        memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
 251
 252        mvi->rx = dma_alloc_coherent(mvi->dev,
 253                                     sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
 254                                     &mvi->rx_dma, GFP_KERNEL);
 255        if (!mvi->rx)
 256                goto err_out;
 257        memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
 258        mvi->rx[0] = cpu_to_le32(0xfff);
 259        mvi->rx_cons = 0xfff;
 260
 261        mvi->slot = dma_alloc_coherent(mvi->dev,
 262                                       sizeof(*mvi->slot) * slot_nr,
 263                                       &mvi->slot_dma, GFP_KERNEL);
 264        if (!mvi->slot)
 265                goto err_out;
 266        memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
 267
 268#ifndef DISABLE_HOTPLUG_DMA_FIX
 269        mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
 270                                       TRASH_BUCKET_SIZE,
 271                                       &mvi->bulk_buffer_dma, GFP_KERNEL);
 272        if (!mvi->bulk_buffer)
 273                goto err_out;
 274#endif
 275        for (i = 0; i < slot_nr; i++) {
 276                struct mvs_slot_info *slot = &mvi->slot_info[i];
 277
 278                slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
 279                                               &slot->buf_dma, GFP_KERNEL);
 280                if (!slot->buf) {
 281                        printk(KERN_DEBUG"failed to allocate slot->buf.\n");
 282                        goto err_out;
 283                }
 284                memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
 285                ++mvi->tags_num;
 286        }
 287        /* Initialize tags */
 288        mvs_tag_init(mvi);
 289        return 0;
 290err_out:
 291        return 1;
 292}
 293
 294
 295int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
 296{
 297        unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
 298        struct pci_dev *pdev = mvi->pdev;
 299        if (bar_ex != -1) {
 300                /*
 301                 * ioremap main and peripheral registers
 302                 */
 303                res_start = pci_resource_start(pdev, bar_ex);
 304                res_len = pci_resource_len(pdev, bar_ex);
 305                if (!res_start || !res_len)
 306                        goto err_out;
 307
 308                res_flag_ex = pci_resource_flags(pdev, bar_ex);
 309                if (res_flag_ex & IORESOURCE_MEM) {
 310                        if (res_flag_ex & IORESOURCE_CACHEABLE)
 311                                mvi->regs_ex = ioremap(res_start, res_len);
 312                        else
 313                                mvi->regs_ex = ioremap_nocache(res_start,
 314                                                res_len);
 315                } else
 316                        mvi->regs_ex = (void *)res_start;
 317                if (!mvi->regs_ex)
 318                        goto err_out;
 319        }
 320
 321        res_start = pci_resource_start(pdev, bar);
 322        res_len = pci_resource_len(pdev, bar);
 323        if (!res_start || !res_len)
 324                goto err_out;
 325
 326        res_flag = pci_resource_flags(pdev, bar);
 327        if (res_flag & IORESOURCE_CACHEABLE)
 328                mvi->regs = ioremap(res_start, res_len);
 329        else
 330                mvi->regs = ioremap_nocache(res_start, res_len);
 331
 332        if (!mvi->regs) {
 333                if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
 334                        iounmap(mvi->regs_ex);
 335                mvi->regs_ex = NULL;
 336                goto err_out;
 337        }
 338
 339        return 0;
 340err_out:
 341        return -1;
 342}
 343
 344void mvs_iounmap(void __iomem *regs)
 345{
 346        iounmap(regs);
 347}
 348
 349static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
 350                                const struct pci_device_id *ent,
 351                                struct Scsi_Host *shost, unsigned int id)
 352{
 353        struct mvs_info *mvi;
 354        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 355
 356        mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
 357                        GFP_KERNEL);
 358        if (!mvi)
 359                return NULL;
 360
 361        mvi->pdev = pdev;
 362        mvi->dev = &pdev->dev;
 363        mvi->chip_id = ent->driver_data;
 364        mvi->chip = &mvs_chips[mvi->chip_id];
 365        INIT_LIST_HEAD(&mvi->wq_list);
 366        mvi->irq = pdev->irq;
 367
 368        ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
 369        ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
 370
 371        mvi->id = id;
 372        mvi->sas = sha;
 373        mvi->shost = shost;
 374#ifdef MVS_USE_TASKLET
 375        tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
 376#endif
 377
 378        if (MVS_CHIP_DISP->chip_ioremap(mvi))
 379                goto err_out;
 380        if (!mvs_alloc(mvi, shost))
 381                return mvi;
 382err_out:
 383        mvs_free(mvi);
 384        return NULL;
 385}
 386
 387/* move to PCI layer or libata core? */
 388static int pci_go_64(struct pci_dev *pdev)
 389{
 390        int rc;
 391
 392        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
 393                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 394                if (rc) {
 395                        rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 396                        if (rc) {
 397                                dev_printk(KERN_ERR, &pdev->dev,
 398                                           "64-bit DMA enable failed\n");
 399                                return rc;
 400                        }
 401                }
 402        } else {
 403                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 404                if (rc) {
 405                        dev_printk(KERN_ERR, &pdev->dev,
 406                                   "32-bit DMA enable failed\n");
 407                        return rc;
 408                }
 409                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 410                if (rc) {
 411                        dev_printk(KERN_ERR, &pdev->dev,
 412                                   "32-bit consistent DMA enable failed\n");
 413                        return rc;
 414                }
 415        }
 416
 417        return rc;
 418}
 419
 420static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
 421                                const struct mvs_chip_info *chip_info)
 422{
 423        int phy_nr, port_nr; unsigned short core_nr;
 424        struct asd_sas_phy **arr_phy;
 425        struct asd_sas_port **arr_port;
 426        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 427
 428        core_nr = chip_info->n_host;
 429        phy_nr  = core_nr * chip_info->n_phy;
 430        port_nr = phy_nr;
 431
 432        memset(sha, 0x00, sizeof(struct sas_ha_struct));
 433        arr_phy  = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
 434        arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
 435        if (!arr_phy || !arr_port)
 436                goto exit_free;
 437
 438        sha->sas_phy = arr_phy;
 439        sha->sas_port = arr_port;
 440
 441        sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
 442        if (!sha->lldd_ha)
 443                goto exit_free;
 444
 445        ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
 446
 447        shost->transportt = mvs_stt;
 448        shost->max_id = 128;
 449        shost->max_lun = ~0;
 450        shost->max_channel = 1;
 451        shost->max_cmd_len = 16;
 452
 453        return 0;
 454exit_free:
 455        kfree(arr_phy);
 456        kfree(arr_port);
 457        return -1;
 458
 459}
 460
 461static void  __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
 462                        const struct mvs_chip_info *chip_info)
 463{
 464        int can_queue, i = 0, j = 0;
 465        struct mvs_info *mvi = NULL;
 466        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 467        unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
 468
 469        for (j = 0; j < nr_core; j++) {
 470                mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
 471                for (i = 0; i < chip_info->n_phy; i++) {
 472                        sha->sas_phy[j * chip_info->n_phy  + i] =
 473                                &mvi->phy[i].sas_phy;
 474                        sha->sas_port[j * chip_info->n_phy + i] =
 475                                &mvi->port[i].sas_port;
 476                }
 477        }
 478
 479        sha->sas_ha_name = DRV_NAME;
 480        sha->dev = mvi->dev;
 481        sha->lldd_module = THIS_MODULE;
 482        sha->sas_addr = &mvi->sas_addr[0];
 483
 484        sha->num_phys = nr_core * chip_info->n_phy;
 485
 486        sha->lldd_max_execute_num = 1;
 487
 488        if (mvi->flags & MVF_FLAG_SOC)
 489                can_queue = MVS_SOC_CAN_QUEUE;
 490        else
 491                can_queue = MVS_CAN_QUEUE;
 492
 493        sha->lldd_queue_size = can_queue;
 494        shost->can_queue = can_queue;
 495        mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
 496        sha->core.shost = mvi->shost;
 497}
 498
 499static void mvs_init_sas_add(struct mvs_info *mvi)
 500{
 501        u8 i;
 502        for (i = 0; i < mvi->chip->n_phy; i++) {
 503                mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
 504                mvi->phy[i].dev_sas_addr =
 505                        cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
 506        }
 507
 508        memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
 509}
 510
 511static int __devinit mvs_pci_init(struct pci_dev *pdev,
 512                                  const struct pci_device_id *ent)
 513{
 514        unsigned int rc, nhost = 0;
 515        struct mvs_info *mvi;
 516        irq_handler_t irq_handler = mvs_interrupt;
 517        struct Scsi_Host *shost = NULL;
 518        const struct mvs_chip_info *chip;
 519
 520        dev_printk(KERN_INFO, &pdev->dev,
 521                "mvsas: driver version %s\n", DRV_VERSION);
 522        rc = pci_enable_device(pdev);
 523        if (rc)
 524                goto err_out_enable;
 525
 526        pci_set_master(pdev);
 527
 528        rc = pci_request_regions(pdev, DRV_NAME);
 529        if (rc)
 530                goto err_out_disable;
 531
 532        rc = pci_go_64(pdev);
 533        if (rc)
 534                goto err_out_regions;
 535
 536        shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
 537        if (!shost) {
 538                rc = -ENOMEM;
 539                goto err_out_regions;
 540        }
 541
 542        chip = &mvs_chips[ent->driver_data];
 543        SHOST_TO_SAS_HA(shost) =
 544                kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
 545        if (!SHOST_TO_SAS_HA(shost)) {
 546                kfree(shost);
 547                rc = -ENOMEM;
 548                goto err_out_regions;
 549        }
 550
 551        rc = mvs_prep_sas_ha_init(shost, chip);
 552        if (rc) {
 553                kfree(shost);
 554                rc = -ENOMEM;
 555                goto err_out_regions;
 556        }
 557
 558        pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
 559
 560        do {
 561                mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
 562                if (!mvi) {
 563                        rc = -ENOMEM;
 564                        goto err_out_regions;
 565                }
 566
 567                mvs_init_sas_add(mvi);
 568
 569                mvi->instance = nhost;
 570                rc = MVS_CHIP_DISP->chip_init(mvi);
 571                if (rc) {
 572                        mvs_free(mvi);
 573                        goto err_out_regions;
 574                }
 575                nhost++;
 576        } while (nhost < chip->n_host);
 577
 578        mvs_post_sas_ha_init(shost, chip);
 579
 580        rc = scsi_add_host(shost, &pdev->dev);
 581        if (rc)
 582                goto err_out_shost;
 583
 584        rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
 585        if (rc)
 586                goto err_out_shost;
 587        rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
 588                DRV_NAME, SHOST_TO_SAS_HA(shost));
 589        if (rc)
 590                goto err_not_sas;
 591
 592        MVS_CHIP_DISP->interrupt_enable(mvi);
 593
 594        scsi_scan_host(mvi->shost);
 595
 596        return 0;
 597
 598err_not_sas:
 599        sas_unregister_ha(SHOST_TO_SAS_HA(shost));
 600err_out_shost:
 601        scsi_remove_host(mvi->shost);
 602err_out_regions:
 603        pci_release_regions(pdev);
 604err_out_disable:
 605        pci_disable_device(pdev);
 606err_out_enable:
 607        return rc;
 608}
 609
 610static void __devexit mvs_pci_remove(struct pci_dev *pdev)
 611{
 612        unsigned short core_nr, i = 0;
 613        struct sas_ha_struct *sha = pci_get_drvdata(pdev);
 614        struct mvs_info *mvi = NULL;
 615
 616        core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
 617        mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
 618
 619#ifdef MVS_USE_TASKLET
 620        tasklet_kill(&mv_tasklet);
 621#endif
 622
 623        pci_set_drvdata(pdev, NULL);
 624        sas_unregister_ha(sha);
 625        sas_remove_host(mvi->shost);
 626        scsi_remove_host(mvi->shost);
 627
 628        MVS_CHIP_DISP->interrupt_disable(mvi);
 629        free_irq(mvi->irq, sha);
 630        for (i = 0; i < core_nr; i++) {
 631                mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
 632                mvs_free(mvi);
 633        }
 634        kfree(sha->sas_phy);
 635        kfree(sha->sas_port);
 636        kfree(sha);
 637        pci_release_regions(pdev);
 638        pci_disable_device(pdev);
 639        return;
 640}
 641
 642static struct pci_device_id __devinitdata mvs_pci_table[] = {
 643        { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
 644        { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
 645        {
 646                .vendor         = PCI_VENDOR_ID_MARVELL,
 647                .device         = 0x6440,
 648                .subvendor      = PCI_ANY_ID,
 649                .subdevice      = 0x6480,
 650                .class          = 0,
 651                .class_mask     = 0,
 652                .driver_data    = chip_6485,
 653        },
 654        { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
 655        { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
 656        { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
 657        { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
 658        { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
 659        { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
 660
 661        { }     /* terminate list */
 662};
 663
 664static struct pci_driver mvs_pci_driver = {
 665        .name           = DRV_NAME,
 666        .id_table       = mvs_pci_table,
 667        .probe          = mvs_pci_init,
 668        .remove         = __devexit_p(mvs_pci_remove),
 669};
 670
 671/* task handler */
 672struct task_struct *mvs_th;
 673static int __init mvs_init(void)
 674{
 675        int rc;
 676        mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
 677        if (!mvs_stt)
 678                return -ENOMEM;
 679
 680        rc = pci_register_driver(&mvs_pci_driver);
 681
 682        if (rc)
 683                goto err_out;
 684
 685        return 0;
 686
 687err_out:
 688        sas_release_transport(mvs_stt);
 689        return rc;
 690}
 691
 692static void __exit mvs_exit(void)
 693{
 694        pci_unregister_driver(&mvs_pci_driver);
 695        sas_release_transport(mvs_stt);
 696}
 697
 698module_init(mvs_init);
 699module_exit(mvs_exit);
 700
 701MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
 702MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
 703MODULE_VERSION(DRV_VERSION);
 704MODULE_LICENSE("GPL");
 705#ifdef CONFIG_PCI
 706MODULE_DEVICE_TABLE(pci, mvs_pci_table);
 707#endif
 708