linux/drivers/ata/sata_sil.c
<<
>>
Prefs
   1/*
   2 *  sata_sil.c - Silicon Image SATA
   3 *
   4 *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
   5 *                  Please ALWAYS copy linux-ide@vger.kernel.org
   6 *                  on emails.
   7 *
   8 *  Copyright 2003-2005 Red Hat, Inc.
   9 *  Copyright 2003 Benjamin Herrenschmidt
  10 *
  11 *
  12 *  This program is free software; you can redistribute it and/or modify
  13 *  it under the terms of the GNU General Public License as published by
  14 *  the Free Software Foundation; either version 2, or (at your option)
  15 *  any later version.
  16 *
  17 *  This program is distributed in the hope that it will be useful,
  18 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 *  GNU General Public License for more details.
  21 *
  22 *  You should have received a copy of the GNU General Public License
  23 *  along with this program; see the file COPYING.  If not, write to
  24 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25 *
  26 *
  27 *  libata documentation is available via 'make {ps|pdf}docs',
  28 *  as Documentation/DocBook/libata.*
  29 *
  30 *  Documentation for SiI 3112:
  31 *  http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
  32 *
  33 *  Other errata and documentation available under NDA.
  34 *
  35 */
  36
  37#include <linux/kernel.h>
  38#include <linux/module.h>
  39#include <linux/pci.h>
  40#include <linux/init.h>
  41#include <linux/blkdev.h>
  42#include <linux/delay.h>
  43#include <linux/interrupt.h>
  44#include <linux/device.h>
  45#include <scsi/scsi_host.h>
  46#include <linux/libata.h>
  47#include <linux/dmi.h>
  48
  49#define DRV_NAME        "sata_sil"
  50#define DRV_VERSION     "2.4"
  51
  52#define SIL_DMA_BOUNDARY        0x7fffffffUL
  53
  54enum {
  55        SIL_MMIO_BAR            = 5,
  56
  57        /*
  58         * host flags
  59         */
  60        SIL_FLAG_NO_SATA_IRQ    = (1 << 28),
  61        SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
  62        SIL_FLAG_MOD15WRITE     = (1 << 30),
  63
  64        SIL_DFL_PORT_FLAGS      = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  65                                  ATA_FLAG_MMIO,
  66
  67        /*
  68         * Controller IDs
  69         */
  70        sil_3112                = 0,
  71        sil_3112_no_sata_irq    = 1,
  72        sil_3512                = 2,
  73        sil_3114                = 3,
  74
  75        /*
  76         * Register offsets
  77         */
  78        SIL_SYSCFG              = 0x48,
  79
  80        /*
  81         * Register bits
  82         */
  83        /* SYSCFG */
  84        SIL_MASK_IDE0_INT       = (1 << 22),
  85        SIL_MASK_IDE1_INT       = (1 << 23),
  86        SIL_MASK_IDE2_INT       = (1 << 24),
  87        SIL_MASK_IDE3_INT       = (1 << 25),
  88        SIL_MASK_2PORT          = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
  89        SIL_MASK_4PORT          = SIL_MASK_2PORT |
  90                                  SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
  91
  92        /* BMDMA/BMDMA2 */
  93        SIL_INTR_STEERING       = (1 << 1),
  94
  95        SIL_DMA_ENABLE          = (1 << 0),  /* DMA run switch */
  96        SIL_DMA_RDWR            = (1 << 3),  /* DMA Rd-Wr */
  97        SIL_DMA_SATA_IRQ        = (1 << 4),  /* OR of all SATA IRQs */
  98        SIL_DMA_ACTIVE          = (1 << 16), /* DMA running */
  99        SIL_DMA_ERROR           = (1 << 17), /* PCI bus error */
 100        SIL_DMA_COMPLETE        = (1 << 18), /* cmd complete / IRQ pending */
 101        SIL_DMA_N_SATA_IRQ      = (1 << 6),  /* SATA_IRQ for the next channel */
 102        SIL_DMA_N_ACTIVE        = (1 << 24), /* ACTIVE for the next channel */
 103        SIL_DMA_N_ERROR         = (1 << 25), /* ERROR for the next channel */
 104        SIL_DMA_N_COMPLETE      = (1 << 26), /* COMPLETE for the next channel */
 105
 106        /* SIEN */
 107        SIL_SIEN_N              = (1 << 16), /* triggered by SError.N */
 108
 109        /*
 110         * Others
 111         */
 112        SIL_QUIRK_MOD15WRITE    = (1 << 0),
 113        SIL_QUIRK_UDMA5MAX      = (1 << 1),
 114};
 115
 116static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 117#ifdef CONFIG_PM
 118static int sil_pci_device_resume(struct pci_dev *pdev);
 119#endif
 120static void sil_dev_config(struct ata_device *dev);
 121static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 122static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
 123static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
 124static void sil_qc_prep(struct ata_queued_cmd *qc);
 125static void sil_bmdma_setup(struct ata_queued_cmd *qc);
 126static void sil_bmdma_start(struct ata_queued_cmd *qc);
 127static void sil_bmdma_stop(struct ata_queued_cmd *qc);
 128static void sil_freeze(struct ata_port *ap);
 129static void sil_thaw(struct ata_port *ap);
 130
 131
 132static const struct pci_device_id sil_pci_tbl[] = {
 133        { PCI_VDEVICE(CMD, 0x3112), sil_3112 },
 134        { PCI_VDEVICE(CMD, 0x0240), sil_3112 },
 135        { PCI_VDEVICE(CMD, 0x3512), sil_3512 },
 136        { PCI_VDEVICE(CMD, 0x3114), sil_3114 },
 137        { PCI_VDEVICE(ATI, 0x436e), sil_3112 },
 138        { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq },
 139        { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq },
 140
 141        { }     /* terminate list */
 142};
 143
 144
 145/* TODO firmware versions should be added - eric */
 146static const struct sil_drivelist {
 147        const char *product;
 148        unsigned int quirk;
 149} sil_blacklist [] = {
 150        { "ST320012AS",         SIL_QUIRK_MOD15WRITE },
 151        { "ST330013AS",         SIL_QUIRK_MOD15WRITE },
 152        { "ST340017AS",         SIL_QUIRK_MOD15WRITE },
 153        { "ST360015AS",         SIL_QUIRK_MOD15WRITE },
 154        { "ST380023AS",         SIL_QUIRK_MOD15WRITE },
 155        { "ST3120023AS",        SIL_QUIRK_MOD15WRITE },
 156        { "ST340014ASL",        SIL_QUIRK_MOD15WRITE },
 157        { "ST360014ASL",        SIL_QUIRK_MOD15WRITE },
 158        { "ST380011ASL",        SIL_QUIRK_MOD15WRITE },
 159        { "ST3120022ASL",       SIL_QUIRK_MOD15WRITE },
 160        { "ST3160021ASL",       SIL_QUIRK_MOD15WRITE },
 161        { "Maxtor 4D060H3",     SIL_QUIRK_UDMA5MAX },
 162        { }
 163};
 164
 165static struct pci_driver sil_pci_driver = {
 166        .name                   = DRV_NAME,
 167        .id_table               = sil_pci_tbl,
 168        .probe                  = sil_init_one,
 169        .remove                 = ata_pci_remove_one,
 170#ifdef CONFIG_PM
 171        .suspend                = ata_pci_device_suspend,
 172        .resume                 = sil_pci_device_resume,
 173#endif
 174};
 175
 176static struct scsi_host_template sil_sht = {
 177        ATA_BASE_SHT(DRV_NAME),
 178        /** These controllers support Large Block Transfer which allows
 179            transfer chunks up to 2GB and which cross 64KB boundaries,
 180            therefore the DMA limits are more relaxed than standard ATA SFF. */
 181        .dma_boundary           = SIL_DMA_BOUNDARY,
 182        .sg_tablesize           = ATA_MAX_PRD
 183};
 184
 185static struct ata_port_operations sil_ops = {
 186        .inherits               = &ata_bmdma32_port_ops,
 187        .dev_config             = sil_dev_config,
 188        .set_mode               = sil_set_mode,
 189        .bmdma_setup            = sil_bmdma_setup,
 190        .bmdma_start            = sil_bmdma_start,
 191        .bmdma_stop             = sil_bmdma_stop,
 192        .qc_prep                = sil_qc_prep,
 193        .freeze                 = sil_freeze,
 194        .thaw                   = sil_thaw,
 195        .scr_read               = sil_scr_read,
 196        .scr_write              = sil_scr_write,
 197};
 198
 199static const struct ata_port_info sil_port_info[] = {
 200        /* sil_3112 */
 201        {
 202                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
 203                .pio_mask       = ATA_PIO4,
 204                .mwdma_mask     = ATA_MWDMA2,
 205                .udma_mask      = ATA_UDMA5,
 206                .port_ops       = &sil_ops,
 207        },
 208        /* sil_3112_no_sata_irq */
 209        {
 210                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
 211                                  SIL_FLAG_NO_SATA_IRQ,
 212                .pio_mask       = ATA_PIO4,
 213                .mwdma_mask     = ATA_MWDMA2,
 214                .udma_mask      = ATA_UDMA5,
 215                .port_ops       = &sil_ops,
 216        },
 217        /* sil_3512 */
 218        {
 219                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
 220                .pio_mask       = ATA_PIO4,
 221                .mwdma_mask     = ATA_MWDMA2,
 222                .udma_mask      = ATA_UDMA5,
 223                .port_ops       = &sil_ops,
 224        },
 225        /* sil_3114 */
 226        {
 227                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
 228                .pio_mask       = ATA_PIO4,
 229                .mwdma_mask     = ATA_MWDMA2,
 230                .udma_mask      = ATA_UDMA5,
 231                .port_ops       = &sil_ops,
 232        },
 233};
 234
 235/* per-port register offsets */
 236/* TODO: we can probably calculate rather than use a table */
 237static const struct {
 238        unsigned long tf;       /* ATA taskfile register block */
 239        unsigned long ctl;      /* ATA control/altstatus register block */
 240        unsigned long bmdma;    /* DMA register block */
 241        unsigned long bmdma2;   /* DMA register block #2 */
 242        unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
 243        unsigned long scr;      /* SATA control register block */
 244        unsigned long sien;     /* SATA Interrupt Enable register */
 245        unsigned long xfer_mode;/* data transfer mode register */
 246        unsigned long sfis_cfg; /* SATA FIS reception config register */
 247} sil_port[] = {
 248        /* port 0 ... */
 249        /*   tf    ctl  bmdma  bmdma2  fifo    scr   sien   mode   sfis */
 250        {  0x80,  0x8A,   0x0,  0x10,  0x40, 0x100, 0x148,  0xb4, 0x14c },
 251        {  0xC0,  0xCA,   0x8,  0x18,  0x44, 0x180, 0x1c8,  0xf4, 0x1cc },
 252        { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
 253        { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
 254        /* ... port 3 */
 255};
 256
 257MODULE_AUTHOR("Jeff Garzik");
 258MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
 259MODULE_LICENSE("GPL");
 260MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
 261MODULE_VERSION(DRV_VERSION);
 262
 263static int slow_down;
 264module_param(slow_down, int, 0444);
 265MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
 266
 267
 268static void sil_bmdma_stop(struct ata_queued_cmd *qc)
 269{
 270        struct ata_port *ap = qc->ap;
 271        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 272        void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
 273
 274        /* clear start/stop bit - can safely always write 0 */
 275        iowrite8(0, bmdma2);
 276
 277        /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
 278        ata_sff_dma_pause(ap);
 279}
 280
 281static void sil_bmdma_setup(struct ata_queued_cmd *qc)
 282{
 283        struct ata_port *ap = qc->ap;
 284        void __iomem *bmdma = ap->ioaddr.bmdma_addr;
 285
 286        /* load PRD table addr. */
 287        iowrite32(ap->prd_dma, bmdma + ATA_DMA_TABLE_OFS);
 288
 289        /* issue r/w command */
 290        ap->ops->sff_exec_command(ap, &qc->tf);
 291}
 292
 293static void sil_bmdma_start(struct ata_queued_cmd *qc)
 294{
 295        unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
 296        struct ata_port *ap = qc->ap;
 297        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 298        void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
 299        u8 dmactl = ATA_DMA_START;
 300
 301        /* set transfer direction, start host DMA transaction
 302           Note: For Large Block Transfer to work, the DMA must be started
 303           using the bmdma2 register. */
 304        if (!rw)
 305                dmactl |= ATA_DMA_WR;
 306        iowrite8(dmactl, bmdma2);
 307}
 308
 309/* The way God intended PCI IDE scatter/gather lists to look and behave... */
 310static void sil_fill_sg(struct ata_queued_cmd *qc)
 311{
 312        struct scatterlist *sg;
 313        struct ata_port *ap = qc->ap;
 314        struct ata_prd *prd, *last_prd = NULL;
 315        unsigned int si;
 316
 317        prd = &ap->prd[0];
 318        for_each_sg(qc->sg, sg, qc->n_elem, si) {
 319                /* Note h/w doesn't support 64-bit, so we unconditionally
 320                 * truncate dma_addr_t to u32.
 321                 */
 322                u32 addr = (u32) sg_dma_address(sg);
 323                u32 sg_len = sg_dma_len(sg);
 324
 325                prd->addr = cpu_to_le32(addr);
 326                prd->flags_len = cpu_to_le32(sg_len);
 327                VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len);
 328
 329                last_prd = prd;
 330                prd++;
 331        }
 332
 333        if (likely(last_prd))
 334                last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
 335}
 336
 337static void sil_qc_prep(struct ata_queued_cmd *qc)
 338{
 339        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
 340                return;
 341
 342        sil_fill_sg(qc);
 343}
 344
 345static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
 346{
 347        u8 cache_line = 0;
 348        pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
 349        return cache_line;
 350}
 351
 352/**
 353 *      sil_set_mode            -       wrap set_mode functions
 354 *      @link: link to set up
 355 *      @r_failed: returned device when we fail
 356 *
 357 *      Wrap the libata method for device setup as after the setup we need
 358 *      to inspect the results and do some configuration work
 359 */
 360
 361static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
 362{
 363        struct ata_port *ap = link->ap;
 364        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 365        void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
 366        struct ata_device *dev;
 367        u32 tmp, dev_mode[2] = { };
 368        int rc;
 369
 370        rc = ata_do_set_mode(link, r_failed);
 371        if (rc)
 372                return rc;
 373
 374        ata_for_each_dev(dev, link, ALL) {
 375                if (!ata_dev_enabled(dev))
 376                        dev_mode[dev->devno] = 0;       /* PIO0/1/2 */
 377                else if (dev->flags & ATA_DFLAG_PIO)
 378                        dev_mode[dev->devno] = 1;       /* PIO3/4 */
 379                else
 380                        dev_mode[dev->devno] = 3;       /* UDMA */
 381                /* value 2 indicates MDMA */
 382        }
 383
 384        tmp = readl(addr);
 385        tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
 386        tmp |= dev_mode[0];
 387        tmp |= (dev_mode[1] << 4);
 388        writel(tmp, addr);
 389        readl(addr);    /* flush */
 390        return 0;
 391}
 392
 393static inline void __iomem *sil_scr_addr(struct ata_port *ap,
 394                                         unsigned int sc_reg)
 395{
 396        void __iomem *offset = ap->ioaddr.scr_addr;
 397
 398        switch (sc_reg) {
 399        case SCR_STATUS:
 400                return offset + 4;
 401        case SCR_ERROR:
 402                return offset + 8;
 403        case SCR_CONTROL:
 404                return offset;
 405        default:
 406                /* do nothing */
 407                break;
 408        }
 409
 410        return NULL;
 411}
 412
 413static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
 414{
 415        void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
 416
 417        if (mmio) {
 418                *val = readl(mmio);
 419                return 0;
 420        }
 421        return -EINVAL;
 422}
 423
 424static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
 425{
 426        void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
 427
 428        if (mmio) {
 429                writel(val, mmio);
 430                return 0;
 431        }
 432        return -EINVAL;
 433}
 434
 435static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
 436{
 437        struct ata_eh_info *ehi = &ap->link.eh_info;
 438        struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
 439        u8 status;
 440
 441        if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
 442                u32 serror;
 443
 444                /* SIEN doesn't mask SATA IRQs on some 3112s.  Those
 445                 * controllers continue to assert IRQ as long as
 446                 * SError bits are pending.  Clear SError immediately.
 447                 */
 448                sil_scr_read(&ap->link, SCR_ERROR, &serror);
 449                sil_scr_write(&ap->link, SCR_ERROR, serror);
 450
 451                /* Sometimes spurious interrupts occur, double check
 452                 * it's PHYRDY CHG.
 453                 */
 454                if (serror & SERR_PHYRDY_CHG) {
 455                        ap->link.eh_info.serror |= serror;
 456                        goto freeze;
 457                }
 458
 459                if (!(bmdma2 & SIL_DMA_COMPLETE))
 460                        return;
 461        }
 462
 463        if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 464                /* this sometimes happens, just clear IRQ */
 465                ap->ops->sff_check_status(ap);
 466                return;
 467        }
 468
 469        /* Check whether we are expecting interrupt in this state */
 470        switch (ap->hsm_task_state) {
 471        case HSM_ST_FIRST:
 472                /* Some pre-ATAPI-4 devices assert INTRQ
 473                 * at this state when ready to receive CDB.
 474                 */
 475
 476                /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
 477                 * The flag was turned on only for atapi devices.  No
 478                 * need to check ata_is_atapi(qc->tf.protocol) again.
 479                 */
 480                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
 481                        goto err_hsm;
 482                break;
 483        case HSM_ST_LAST:
 484                if (ata_is_dma(qc->tf.protocol)) {
 485                        /* clear DMA-Start bit */
 486                        ap->ops->bmdma_stop(qc);
 487
 488                        if (bmdma2 & SIL_DMA_ERROR) {
 489                                qc->err_mask |= AC_ERR_HOST_BUS;
 490                                ap->hsm_task_state = HSM_ST_ERR;
 491                        }
 492                }
 493                break;
 494        case HSM_ST:
 495                break;
 496        default:
 497                goto err_hsm;
 498        }
 499
 500        /* check main status, clearing INTRQ */
 501        status = ap->ops->sff_check_status(ap);
 502        if (unlikely(status & ATA_BUSY))
 503                goto err_hsm;
 504
 505        /* ack bmdma irq events */
 506        ata_sff_irq_clear(ap);
 507
 508        /* kick HSM in the ass */
 509        ata_sff_hsm_move(ap, qc, status, 0);
 510
 511        if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
 512                ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
 513
 514        return;
 515
 516 err_hsm:
 517        qc->err_mask |= AC_ERR_HSM;
 518 freeze:
 519        ata_port_freeze(ap);
 520}
 521
 522static irqreturn_t sil_interrupt(int irq, void *dev_instance)
 523{
 524        struct ata_host *host = dev_instance;
 525        void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
 526        int handled = 0;
 527        int i;
 528
 529        spin_lock(&host->lock);
 530
 531        for (i = 0; i < host->n_ports; i++) {
 532                struct ata_port *ap = host->ports[i];
 533                u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
 534
 535                if (unlikely(ap->flags & ATA_FLAG_DISABLED))
 536                        continue;
 537
 538                /* turn off SATA_IRQ if not supported */
 539                if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
 540                        bmdma2 &= ~SIL_DMA_SATA_IRQ;
 541
 542                if (bmdma2 == 0xffffffff ||
 543                    !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
 544                        continue;
 545
 546                sil_host_intr(ap, bmdma2);
 547                handled = 1;
 548        }
 549
 550        spin_unlock(&host->lock);
 551
 552        return IRQ_RETVAL(handled);
 553}
 554
 555static void sil_freeze(struct ata_port *ap)
 556{
 557        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 558        u32 tmp;
 559
 560        /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
 561        writel(0, mmio_base + sil_port[ap->port_no].sien);
 562
 563        /* plug IRQ */
 564        tmp = readl(mmio_base + SIL_SYSCFG);
 565        tmp |= SIL_MASK_IDE0_INT << ap->port_no;
 566        writel(tmp, mmio_base + SIL_SYSCFG);
 567        readl(mmio_base + SIL_SYSCFG);  /* flush */
 568
 569        /* Ensure DMA_ENABLE is off.
 570         *
 571         * This is because the controller will not give us access to the
 572         * taskfile registers while a DMA is in progress
 573         */
 574        iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
 575                 ap->ioaddr.bmdma_addr);
 576
 577        /* According to ata_bmdma_stop, an HDMA transition requires
 578         * on PIO cycle. But we can't read a taskfile register.
 579         */
 580        ioread8(ap->ioaddr.bmdma_addr);
 581}
 582
 583static void sil_thaw(struct ata_port *ap)
 584{
 585        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 586        u32 tmp;
 587
 588        /* clear IRQ */
 589        ap->ops->sff_check_status(ap);
 590        ata_sff_irq_clear(ap);
 591
 592        /* turn on SATA IRQ if supported */
 593        if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
 594                writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
 595
 596        /* turn on IRQ */
 597        tmp = readl(mmio_base + SIL_SYSCFG);
 598        tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
 599        writel(tmp, mmio_base + SIL_SYSCFG);
 600}
 601
 602/**
 603 *      sil_dev_config - Apply device/host-specific errata fixups
 604 *      @dev: Device to be examined
 605 *
 606 *      After the IDENTIFY [PACKET] DEVICE step is complete, and a
 607 *      device is known to be present, this function is called.
 608 *      We apply two errata fixups which are specific to Silicon Image,
 609 *      a Seagate and a Maxtor fixup.
 610 *
 611 *      For certain Seagate devices, we must limit the maximum sectors
 612 *      to under 8K.
 613 *
 614 *      For certain Maxtor devices, we must not program the drive
 615 *      beyond udma5.
 616 *
 617 *      Both fixups are unfairly pessimistic.  As soon as I get more
 618 *      information on these errata, I will create a more exhaustive
 619 *      list, and apply the fixups to only the specific
 620 *      devices/hosts/firmwares that need it.
 621 *
 622 *      20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
 623 *      The Maxtor quirk is in the blacklist, but I'm keeping the original
 624 *      pessimistic fix for the following reasons...
 625 *      - There seems to be less info on it, only one device gleaned off the
 626 *      Windows driver, maybe only one is affected.  More info would be greatly
 627 *      appreciated.
 628 *      - But then again UDMA5 is hardly anything to complain about
 629 */
 630static void sil_dev_config(struct ata_device *dev)
 631{
 632        struct ata_port *ap = dev->link->ap;
 633        int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
 634        unsigned int n, quirks = 0;
 635        unsigned char model_num[ATA_ID_PROD_LEN + 1];
 636
 637        ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
 638
 639        for (n = 0; sil_blacklist[n].product; n++)
 640                if (!strcmp(sil_blacklist[n].product, model_num)) {
 641                        quirks = sil_blacklist[n].quirk;
 642                        break;
 643                }
 644
 645        /* limit requests to 15 sectors */
 646        if (slow_down ||
 647            ((ap->flags & SIL_FLAG_MOD15WRITE) &&
 648             (quirks & SIL_QUIRK_MOD15WRITE))) {
 649                if (print_info)
 650                        ata_dev_printk(dev, KERN_INFO, "applying Seagate "
 651                                       "errata fix (mod15write workaround)\n");
 652                dev->max_sectors = 15;
 653                return;
 654        }
 655
 656        /* limit to udma5 */
 657        if (quirks & SIL_QUIRK_UDMA5MAX) {
 658                if (print_info)
 659                        ata_dev_printk(dev, KERN_INFO, "applying Maxtor "
 660                                       "errata fix %s\n", model_num);
 661                dev->udma_mask &= ATA_UDMA5;
 662                return;
 663        }
 664}
 665
 666static void sil_init_controller(struct ata_host *host)
 667{
 668        struct pci_dev *pdev = to_pci_dev(host->dev);
 669        void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
 670        u8 cls;
 671        u32 tmp;
 672        int i;
 673
 674        /* Initialize FIFO PCI bus arbitration */
 675        cls = sil_get_device_cache_line(pdev);
 676        if (cls) {
 677                cls >>= 3;
 678                cls++;  /* cls = (line_size/8)+1 */
 679                for (i = 0; i < host->n_ports; i++)
 680                        writew(cls << 8 | cls,
 681                               mmio_base + sil_port[i].fifo_cfg);
 682        } else
 683                dev_printk(KERN_WARNING, &pdev->dev,
 684                           "cache line size not set.  Driver may not function\n");
 685
 686        /* Apply R_ERR on DMA activate FIS errata workaround */
 687        if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
 688                int cnt;
 689
 690                for (i = 0, cnt = 0; i < host->n_ports; i++) {
 691                        tmp = readl(mmio_base + sil_port[i].sfis_cfg);
 692                        if ((tmp & 0x3) != 0x01)
 693                                continue;
 694                        if (!cnt)
 695                                dev_printk(KERN_INFO, &pdev->dev,
 696                                           "Applying R_ERR on DMA activate "
 697                                           "FIS errata fix\n");
 698                        writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
 699                        cnt++;
 700                }
 701        }
 702
 703        if (host->n_ports == 4) {
 704                /* flip the magic "make 4 ports work" bit */
 705                tmp = readl(mmio_base + sil_port[2].bmdma);
 706                if ((tmp & SIL_INTR_STEERING) == 0)
 707                        writel(tmp | SIL_INTR_STEERING,
 708                               mmio_base + sil_port[2].bmdma);
 709        }
 710}
 711
 712static bool sil_broken_system_poweroff(struct pci_dev *pdev)
 713{
 714        static const struct dmi_system_id broken_systems[] = {
 715                {
 716                        .ident = "HP Compaq nx6325",
 717                        .matches = {
 718                                DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
 719                                DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
 720                        },
 721                        /* PCI slot number of the controller */
 722                        .driver_data = (void *)0x12UL,
 723                },
 724
 725                { }     /* terminate list */
 726        };
 727        const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
 728
 729        if (dmi) {
 730                unsigned long slot = (unsigned long)dmi->driver_data;
 731                /* apply the quirk only to on-board controllers */
 732                return slot == PCI_SLOT(pdev->devfn);
 733        }
 734
 735        return false;
 736}
 737
 738static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 739{
 740        static int printed_version;
 741        int board_id = ent->driver_data;
 742        struct ata_port_info pi = sil_port_info[board_id];
 743        const struct ata_port_info *ppi[] = { &pi, NULL };
 744        struct ata_host *host;
 745        void __iomem *mmio_base;
 746        int n_ports, rc;
 747        unsigned int i;
 748
 749        if (!printed_version++)
 750                dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 751
 752        /* allocate host */
 753        n_ports = 2;
 754        if (board_id == sil_3114)
 755                n_ports = 4;
 756
 757        if (sil_broken_system_poweroff(pdev)) {
 758                pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN |
 759                                        ATA_FLAG_NO_HIBERNATE_SPINDOWN;
 760                dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
 761                                "on poweroff and hibernation\n");
 762        }
 763
 764        host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
 765        if (!host)
 766                return -ENOMEM;
 767
 768        /* acquire resources and fill host */
 769        rc = pcim_enable_device(pdev);
 770        if (rc)
 771                return rc;
 772
 773        rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
 774        if (rc == -EBUSY)
 775                pcim_pin_device(pdev);
 776        if (rc)
 777                return rc;
 778        host->iomap = pcim_iomap_table(pdev);
 779
 780        rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
 781        if (rc)
 782                return rc;
 783        rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
 784        if (rc)
 785                return rc;
 786
 787        mmio_base = host->iomap[SIL_MMIO_BAR];
 788
 789        for (i = 0; i < host->n_ports; i++) {
 790                struct ata_port *ap = host->ports[i];
 791                struct ata_ioports *ioaddr = &ap->ioaddr;
 792
 793                ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
 794                ioaddr->altstatus_addr =
 795                ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
 796                ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
 797                ioaddr->scr_addr = mmio_base + sil_port[i].scr;
 798                ata_sff_std_ports(ioaddr);
 799
 800                ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio");
 801                ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf");
 802        }
 803
 804        /* initialize and activate */
 805        sil_init_controller(host);
 806
 807        pci_set_master(pdev);
 808        return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED,
 809                                 &sil_sht);
 810}
 811
 812#ifdef CONFIG_PM
 813static int sil_pci_device_resume(struct pci_dev *pdev)
 814{
 815        struct ata_host *host = dev_get_drvdata(&pdev->dev);
 816        int rc;
 817
 818        rc = ata_pci_device_do_resume(pdev);
 819        if (rc)
 820                return rc;
 821
 822        sil_init_controller(host);
 823        ata_host_resume(host);
 824
 825        return 0;
 826}
 827#endif
 828
 829static int __init sil_init(void)
 830{
 831        return pci_register_driver(&sil_pci_driver);
 832}
 833
 834static void __exit sil_exit(void)
 835{
 836        pci_unregister_driver(&sil_pci_driver);
 837}
 838
 839
 840module_init(sil_init);
 841module_exit(sil_exit);
 842