linux/drivers/ata/sata_sil.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  sata_sil.c - Silicon Image SATA
   4 *
   5 *  Maintained by:  Tejun Heo <tj@kernel.org>
   6 *                  Please ALWAYS copy linux-ide@vger.kernel.org
   7 *                  on emails.
   8 *
   9 *  Copyright 2003-2005 Red Hat, Inc.
  10 *  Copyright 2003 Benjamin Herrenschmidt
  11 *
  12 *  libata documentation is available via 'make {ps|pdf}docs',
  13 *  as Documentation/driver-api/libata.rst
  14 *
  15 *  Documentation for SiI 3112:
  16 *  http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
  17 *
  18 *  Other errata and documentation available under NDA.
  19 */
  20
  21#include <linux/kernel.h>
  22#include <linux/module.h>
  23#include <linux/pci.h>
  24#include <linux/blkdev.h>
  25#include <linux/delay.h>
  26#include <linux/interrupt.h>
  27#include <linux/device.h>
  28#include <scsi/scsi_host.h>
  29#include <linux/libata.h>
  30#include <linux/dmi.h>
  31
  32#define DRV_NAME        "sata_sil"
  33#define DRV_VERSION     "2.4"
  34
  35#define SIL_DMA_BOUNDARY        0x7fffffffUL
  36
  37enum {
  38        SIL_MMIO_BAR            = 5,
  39
  40        /*
  41         * host flags
  42         */
  43        SIL_FLAG_NO_SATA_IRQ    = (1 << 28),
  44        SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
  45        SIL_FLAG_MOD15WRITE     = (1 << 30),
  46
  47        SIL_DFL_PORT_FLAGS      = ATA_FLAG_SATA,
  48
  49        /*
  50         * Controller IDs
  51         */
  52        sil_3112                = 0,
  53        sil_3112_no_sata_irq    = 1,
  54        sil_3512                = 2,
  55        sil_3114                = 3,
  56
  57        /*
  58         * Register offsets
  59         */
  60        SIL_SYSCFG              = 0x48,
  61
  62        /*
  63         * Register bits
  64         */
  65        /* SYSCFG */
  66        SIL_MASK_IDE0_INT       = (1 << 22),
  67        SIL_MASK_IDE1_INT       = (1 << 23),
  68        SIL_MASK_IDE2_INT       = (1 << 24),
  69        SIL_MASK_IDE3_INT       = (1 << 25),
  70        SIL_MASK_2PORT          = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
  71        SIL_MASK_4PORT          = SIL_MASK_2PORT |
  72                                  SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
  73
  74        /* BMDMA/BMDMA2 */
  75        SIL_INTR_STEERING       = (1 << 1),
  76
  77        SIL_DMA_ENABLE          = (1 << 0),  /* DMA run switch */
  78        SIL_DMA_RDWR            = (1 << 3),  /* DMA Rd-Wr */
  79        SIL_DMA_SATA_IRQ        = (1 << 4),  /* OR of all SATA IRQs */
  80        SIL_DMA_ACTIVE          = (1 << 16), /* DMA running */
  81        SIL_DMA_ERROR           = (1 << 17), /* PCI bus error */
  82        SIL_DMA_COMPLETE        = (1 << 18), /* cmd complete / IRQ pending */
  83        SIL_DMA_N_SATA_IRQ      = (1 << 6),  /* SATA_IRQ for the next channel */
  84        SIL_DMA_N_ACTIVE        = (1 << 24), /* ACTIVE for the next channel */
  85        SIL_DMA_N_ERROR         = (1 << 25), /* ERROR for the next channel */
  86        SIL_DMA_N_COMPLETE      = (1 << 26), /* COMPLETE for the next channel */
  87
  88        /* SIEN */
  89        SIL_SIEN_N              = (1 << 16), /* triggered by SError.N */
  90
  91        /*
  92         * Others
  93         */
  94        SIL_QUIRK_MOD15WRITE    = (1 << 0),
  95        SIL_QUIRK_UDMA5MAX      = (1 << 1),
  96};
  97
  98static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  99#ifdef CONFIG_PM_SLEEP
 100static int sil_pci_device_resume(struct pci_dev *pdev);
 101#endif
 102static void sil_dev_config(struct ata_device *dev);
 103static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 104static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
 105static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
 106static void sil_qc_prep(struct ata_queued_cmd *qc);
 107static void sil_bmdma_setup(struct ata_queued_cmd *qc);
 108static void sil_bmdma_start(struct ata_queued_cmd *qc);
 109static void sil_bmdma_stop(struct ata_queued_cmd *qc);
 110static void sil_freeze(struct ata_port *ap);
 111static void sil_thaw(struct ata_port *ap);
 112
 113
 114static const struct pci_device_id sil_pci_tbl[] = {
 115        { PCI_VDEVICE(CMD, 0x3112), sil_3112 },
 116        { PCI_VDEVICE(CMD, 0x0240), sil_3112 },
 117        { PCI_VDEVICE(CMD, 0x3512), sil_3512 },
 118        { PCI_VDEVICE(CMD, 0x3114), sil_3114 },
 119        { PCI_VDEVICE(ATI, 0x436e), sil_3112 },
 120        { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq },
 121        { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq },
 122
 123        { }     /* terminate list */
 124};
 125
 126
 127/* TODO firmware versions should be added - eric */
 128static const struct sil_drivelist {
 129        const char *product;
 130        unsigned int quirk;
 131} sil_blacklist [] = {
 132        { "ST320012AS",         SIL_QUIRK_MOD15WRITE },
 133        { "ST330013AS",         SIL_QUIRK_MOD15WRITE },
 134        { "ST340017AS",         SIL_QUIRK_MOD15WRITE },
 135        { "ST360015AS",         SIL_QUIRK_MOD15WRITE },
 136        { "ST380023AS",         SIL_QUIRK_MOD15WRITE },
 137        { "ST3120023AS",        SIL_QUIRK_MOD15WRITE },
 138        { "ST340014ASL",        SIL_QUIRK_MOD15WRITE },
 139        { "ST360014ASL",        SIL_QUIRK_MOD15WRITE },
 140        { "ST380011ASL",        SIL_QUIRK_MOD15WRITE },
 141        { "ST3120022ASL",       SIL_QUIRK_MOD15WRITE },
 142        { "ST3160021ASL",       SIL_QUIRK_MOD15WRITE },
 143        { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
 144        { "Maxtor 4D060H3",     SIL_QUIRK_UDMA5MAX },
 145        { }
 146};
 147
 148static struct pci_driver sil_pci_driver = {
 149        .name                   = DRV_NAME,
 150        .id_table               = sil_pci_tbl,
 151        .probe                  = sil_init_one,
 152        .remove                 = ata_pci_remove_one,
 153#ifdef CONFIG_PM_SLEEP
 154        .suspend                = ata_pci_device_suspend,
 155        .resume                 = sil_pci_device_resume,
 156#endif
 157};
 158
 159static struct scsi_host_template sil_sht = {
 160        ATA_BASE_SHT(DRV_NAME),
 161        /** These controllers support Large Block Transfer which allows
 162            transfer chunks up to 2GB and which cross 64KB boundaries,
 163            therefore the DMA limits are more relaxed than standard ATA SFF. */
 164        .dma_boundary           = SIL_DMA_BOUNDARY,
 165        .sg_tablesize           = ATA_MAX_PRD
 166};
 167
 168static struct ata_port_operations sil_ops = {
 169        .inherits               = &ata_bmdma32_port_ops,
 170        .dev_config             = sil_dev_config,
 171        .set_mode               = sil_set_mode,
 172        .bmdma_setup            = sil_bmdma_setup,
 173        .bmdma_start            = sil_bmdma_start,
 174        .bmdma_stop             = sil_bmdma_stop,
 175        .qc_prep                = sil_qc_prep,
 176        .freeze                 = sil_freeze,
 177        .thaw                   = sil_thaw,
 178        .scr_read               = sil_scr_read,
 179        .scr_write              = sil_scr_write,
 180};
 181
 182static const struct ata_port_info sil_port_info[] = {
 183        /* sil_3112 */
 184        {
 185                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
 186                .pio_mask       = ATA_PIO4,
 187                .mwdma_mask     = ATA_MWDMA2,
 188                .udma_mask      = ATA_UDMA5,
 189                .port_ops       = &sil_ops,
 190        },
 191        /* sil_3112_no_sata_irq */
 192        {
 193                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
 194                                  SIL_FLAG_NO_SATA_IRQ,
 195                .pio_mask       = ATA_PIO4,
 196                .mwdma_mask     = ATA_MWDMA2,
 197                .udma_mask      = ATA_UDMA5,
 198                .port_ops       = &sil_ops,
 199        },
 200        /* sil_3512 */
 201        {
 202                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
 203                .pio_mask       = ATA_PIO4,
 204                .mwdma_mask     = ATA_MWDMA2,
 205                .udma_mask      = ATA_UDMA5,
 206                .port_ops       = &sil_ops,
 207        },
 208        /* sil_3114 */
 209        {
 210                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
 211                .pio_mask       = ATA_PIO4,
 212                .mwdma_mask     = ATA_MWDMA2,
 213                .udma_mask      = ATA_UDMA5,
 214                .port_ops       = &sil_ops,
 215        },
 216};
 217
 218/* per-port register offsets */
 219/* TODO: we can probably calculate rather than use a table */
 220static const struct {
 221        unsigned long tf;       /* ATA taskfile register block */
 222        unsigned long ctl;      /* ATA control/altstatus register block */
 223        unsigned long bmdma;    /* DMA register block */
 224        unsigned long bmdma2;   /* DMA register block #2 */
 225        unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
 226        unsigned long scr;      /* SATA control register block */
 227        unsigned long sien;     /* SATA Interrupt Enable register */
 228        unsigned long xfer_mode;/* data transfer mode register */
 229        unsigned long sfis_cfg; /* SATA FIS reception config register */
 230} sil_port[] = {
 231        /* port 0 ... */
 232        /*   tf    ctl  bmdma  bmdma2  fifo    scr   sien   mode   sfis */
 233        {  0x80,  0x8A,   0x0,  0x10,  0x40, 0x100, 0x148,  0xb4, 0x14c },
 234        {  0xC0,  0xCA,   0x8,  0x18,  0x44, 0x180, 0x1c8,  0xf4, 0x1cc },
 235        { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
 236        { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
 237        /* ... port 3 */
 238};
 239
 240MODULE_AUTHOR("Jeff Garzik");
 241MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
 242MODULE_LICENSE("GPL");
 243MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
 244MODULE_VERSION(DRV_VERSION);
 245
 246static int slow_down;
 247module_param(slow_down, int, 0444);
 248MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
 249
 250
 251static void sil_bmdma_stop(struct ata_queued_cmd *qc)
 252{
 253        struct ata_port *ap = qc->ap;
 254        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 255        void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
 256
 257        /* clear start/stop bit - can safely always write 0 */
 258        iowrite8(0, bmdma2);
 259
 260        /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
 261        ata_sff_dma_pause(ap);
 262}
 263
 264static void sil_bmdma_setup(struct ata_queued_cmd *qc)
 265{
 266        struct ata_port *ap = qc->ap;
 267        void __iomem *bmdma = ap->ioaddr.bmdma_addr;
 268
 269        /* load PRD table addr. */
 270        iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS);
 271
 272        /* issue r/w command */
 273        ap->ops->sff_exec_command(ap, &qc->tf);
 274}
 275
 276static void sil_bmdma_start(struct ata_queued_cmd *qc)
 277{
 278        unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
 279        struct ata_port *ap = qc->ap;
 280        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 281        void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
 282        u8 dmactl = ATA_DMA_START;
 283
 284        /* set transfer direction, start host DMA transaction
 285           Note: For Large Block Transfer to work, the DMA must be started
 286           using the bmdma2 register. */
 287        if (!rw)
 288                dmactl |= ATA_DMA_WR;
 289        iowrite8(dmactl, bmdma2);
 290}
 291
 292/* The way God intended PCI IDE scatter/gather lists to look and behave... */
 293static void sil_fill_sg(struct ata_queued_cmd *qc)
 294{
 295        struct scatterlist *sg;
 296        struct ata_port *ap = qc->ap;
 297        struct ata_bmdma_prd *prd, *last_prd = NULL;
 298        unsigned int si;
 299
 300        prd = &ap->bmdma_prd[0];
 301        for_each_sg(qc->sg, sg, qc->n_elem, si) {
 302                /* Note h/w doesn't support 64-bit, so we unconditionally
 303                 * truncate dma_addr_t to u32.
 304                 */
 305                u32 addr = (u32) sg_dma_address(sg);
 306                u32 sg_len = sg_dma_len(sg);
 307
 308                prd->addr = cpu_to_le32(addr);
 309                prd->flags_len = cpu_to_le32(sg_len);
 310                VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len);
 311
 312                last_prd = prd;
 313                prd++;
 314        }
 315
 316        if (likely(last_prd))
 317                last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
 318}
 319
 320static void sil_qc_prep(struct ata_queued_cmd *qc)
 321{
 322        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
 323                return;
 324
 325        sil_fill_sg(qc);
 326}
 327
 328static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
 329{
 330        u8 cache_line = 0;
 331        pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
 332        return cache_line;
 333}
 334
 335/**
 336 *      sil_set_mode            -       wrap set_mode functions
 337 *      @link: link to set up
 338 *      @r_failed: returned device when we fail
 339 *
 340 *      Wrap the libata method for device setup as after the setup we need
 341 *      to inspect the results and do some configuration work
 342 */
 343
 344static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
 345{
 346        struct ata_port *ap = link->ap;
 347        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 348        void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
 349        struct ata_device *dev;
 350        u32 tmp, dev_mode[2] = { };
 351        int rc;
 352
 353        rc = ata_do_set_mode(link, r_failed);
 354        if (rc)
 355                return rc;
 356
 357        ata_for_each_dev(dev, link, ALL) {
 358                if (!ata_dev_enabled(dev))
 359                        dev_mode[dev->devno] = 0;       /* PIO0/1/2 */
 360                else if (dev->flags & ATA_DFLAG_PIO)
 361                        dev_mode[dev->devno] = 1;       /* PIO3/4 */
 362                else
 363                        dev_mode[dev->devno] = 3;       /* UDMA */
 364                /* value 2 indicates MDMA */
 365        }
 366
 367        tmp = readl(addr);
 368        tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
 369        tmp |= dev_mode[0];
 370        tmp |= (dev_mode[1] << 4);
 371        writel(tmp, addr);
 372        readl(addr);    /* flush */
 373        return 0;
 374}
 375
 376static inline void __iomem *sil_scr_addr(struct ata_port *ap,
 377                                         unsigned int sc_reg)
 378{
 379        void __iomem *offset = ap->ioaddr.scr_addr;
 380
 381        switch (sc_reg) {
 382        case SCR_STATUS:
 383                return offset + 4;
 384        case SCR_ERROR:
 385                return offset + 8;
 386        case SCR_CONTROL:
 387                return offset;
 388        default:
 389                /* do nothing */
 390                break;
 391        }
 392
 393        return NULL;
 394}
 395
 396static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
 397{
 398        void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
 399
 400        if (mmio) {
 401                *val = readl(mmio);
 402                return 0;
 403        }
 404        return -EINVAL;
 405}
 406
 407static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
 408{
 409        void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
 410
 411        if (mmio) {
 412                writel(val, mmio);
 413                return 0;
 414        }
 415        return -EINVAL;
 416}
 417
 418static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
 419{
 420        struct ata_eh_info *ehi = &ap->link.eh_info;
 421        struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
 422        u8 status;
 423
 424        if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
 425                u32 serror = 0xffffffff;
 426
 427                /* SIEN doesn't mask SATA IRQs on some 3112s.  Those
 428                 * controllers continue to assert IRQ as long as
 429                 * SError bits are pending.  Clear SError immediately.
 430                 */
 431                sil_scr_read(&ap->link, SCR_ERROR, &serror);
 432                sil_scr_write(&ap->link, SCR_ERROR, serror);
 433
 434                /* Sometimes spurious interrupts occur, double check
 435                 * it's PHYRDY CHG.
 436                 */
 437                if (serror & SERR_PHYRDY_CHG) {
 438                        ap->link.eh_info.serror |= serror;
 439                        goto freeze;
 440                }
 441
 442                if (!(bmdma2 & SIL_DMA_COMPLETE))
 443                        return;
 444        }
 445
 446        if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 447                /* this sometimes happens, just clear IRQ */
 448                ap->ops->sff_check_status(ap);
 449                return;
 450        }
 451
 452        /* Check whether we are expecting interrupt in this state */
 453        switch (ap->hsm_task_state) {
 454        case HSM_ST_FIRST:
 455                /* Some pre-ATAPI-4 devices assert INTRQ
 456                 * at this state when ready to receive CDB.
 457                 */
 458
 459                /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
 460                 * The flag was turned on only for atapi devices.  No
 461                 * need to check ata_is_atapi(qc->tf.protocol) again.
 462                 */
 463                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
 464                        goto err_hsm;
 465                break;
 466        case HSM_ST_LAST:
 467                if (ata_is_dma(qc->tf.protocol)) {
 468                        /* clear DMA-Start bit */
 469                        ap->ops->bmdma_stop(qc);
 470
 471                        if (bmdma2 & SIL_DMA_ERROR) {
 472                                qc->err_mask |= AC_ERR_HOST_BUS;
 473                                ap->hsm_task_state = HSM_ST_ERR;
 474                        }
 475                }
 476                break;
 477        case HSM_ST:
 478                break;
 479        default:
 480                goto err_hsm;
 481        }
 482
 483        /* check main status, clearing INTRQ */
 484        status = ap->ops->sff_check_status(ap);
 485        if (unlikely(status & ATA_BUSY))
 486                goto err_hsm;
 487
 488        /* ack bmdma irq events */
 489        ata_bmdma_irq_clear(ap);
 490
 491        /* kick HSM in the ass */
 492        ata_sff_hsm_move(ap, qc, status, 0);
 493
 494        if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
 495                ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
 496
 497        return;
 498
 499 err_hsm:
 500        qc->err_mask |= AC_ERR_HSM;
 501 freeze:
 502        ata_port_freeze(ap);
 503}
 504
 505static irqreturn_t sil_interrupt(int irq, void *dev_instance)
 506{
 507        struct ata_host *host = dev_instance;
 508        void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
 509        int handled = 0;
 510        int i;
 511
 512        spin_lock(&host->lock);
 513
 514        for (i = 0; i < host->n_ports; i++) {
 515                struct ata_port *ap = host->ports[i];
 516                u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
 517
 518                /* turn off SATA_IRQ if not supported */
 519                if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
 520                        bmdma2 &= ~SIL_DMA_SATA_IRQ;
 521
 522                if (bmdma2 == 0xffffffff ||
 523                    !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
 524                        continue;
 525
 526                sil_host_intr(ap, bmdma2);
 527                handled = 1;
 528        }
 529
 530        spin_unlock(&host->lock);
 531
 532        return IRQ_RETVAL(handled);
 533}
 534
 535static void sil_freeze(struct ata_port *ap)
 536{
 537        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 538        u32 tmp;
 539
 540        /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
 541        writel(0, mmio_base + sil_port[ap->port_no].sien);
 542
 543        /* plug IRQ */
 544        tmp = readl(mmio_base + SIL_SYSCFG);
 545        tmp |= SIL_MASK_IDE0_INT << ap->port_no;
 546        writel(tmp, mmio_base + SIL_SYSCFG);
 547        readl(mmio_base + SIL_SYSCFG);  /* flush */
 548
 549        /* Ensure DMA_ENABLE is off.
 550         *
 551         * This is because the controller will not give us access to the
 552         * taskfile registers while a DMA is in progress
 553         */
 554        iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
 555                 ap->ioaddr.bmdma_addr);
 556
 557        /* According to ata_bmdma_stop, an HDMA transition requires
 558         * on PIO cycle. But we can't read a taskfile register.
 559         */
 560        ioread8(ap->ioaddr.bmdma_addr);
 561}
 562
 563static void sil_thaw(struct ata_port *ap)
 564{
 565        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 566        u32 tmp;
 567
 568        /* clear IRQ */
 569        ap->ops->sff_check_status(ap);
 570        ata_bmdma_irq_clear(ap);
 571
 572        /* turn on SATA IRQ if supported */
 573        if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
 574                writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
 575
 576        /* turn on IRQ */
 577        tmp = readl(mmio_base + SIL_SYSCFG);
 578        tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
 579        writel(tmp, mmio_base + SIL_SYSCFG);
 580}
 581
 582/**
 583 *      sil_dev_config - Apply device/host-specific errata fixups
 584 *      @dev: Device to be examined
 585 *
 586 *      After the IDENTIFY [PACKET] DEVICE step is complete, and a
 587 *      device is known to be present, this function is called.
 588 *      We apply two errata fixups which are specific to Silicon Image,
 589 *      a Seagate and a Maxtor fixup.
 590 *
 591 *      For certain Seagate devices, we must limit the maximum sectors
 592 *      to under 8K.
 593 *
 594 *      For certain Maxtor devices, we must not program the drive
 595 *      beyond udma5.
 596 *
 597 *      Both fixups are unfairly pessimistic.  As soon as I get more
 598 *      information on these errata, I will create a more exhaustive
 599 *      list, and apply the fixups to only the specific
 600 *      devices/hosts/firmwares that need it.
 601 *
 602 *      20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
 603 *      The Maxtor quirk is in the blacklist, but I'm keeping the original
 604 *      pessimistic fix for the following reasons...
 605 *      - There seems to be less info on it, only one device gleaned off the
 606 *      Windows driver, maybe only one is affected.  More info would be greatly
 607 *      appreciated.
 608 *      - But then again UDMA5 is hardly anything to complain about
 609 */
 610static void sil_dev_config(struct ata_device *dev)
 611{
 612        struct ata_port *ap = dev->link->ap;
 613        int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
 614        unsigned int n, quirks = 0;
 615        unsigned char model_num[ATA_ID_PROD_LEN + 1];
 616
 617        /* This controller doesn't support trim */
 618        dev->horkage |= ATA_HORKAGE_NOTRIM;
 619
 620        ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
 621
 622        for (n = 0; sil_blacklist[n].product; n++)
 623                if (!strcmp(sil_blacklist[n].product, model_num)) {
 624                        quirks = sil_blacklist[n].quirk;
 625                        break;
 626                }
 627
 628        /* limit requests to 15 sectors */
 629        if (slow_down ||
 630            ((ap->flags & SIL_FLAG_MOD15WRITE) &&
 631             (quirks & SIL_QUIRK_MOD15WRITE))) {
 632                if (print_info)
 633                        ata_dev_info(dev,
 634                "applying Seagate errata fix (mod15write workaround)\n");
 635                dev->max_sectors = 15;
 636                return;
 637        }
 638
 639        /* limit to udma5 */
 640        if (quirks & SIL_QUIRK_UDMA5MAX) {
 641                if (print_info)
 642                        ata_dev_info(dev, "applying Maxtor errata fix %s\n",
 643                                     model_num);
 644                dev->udma_mask &= ATA_UDMA5;
 645                return;
 646        }
 647}
 648
 649static void sil_init_controller(struct ata_host *host)
 650{
 651        struct pci_dev *pdev = to_pci_dev(host->dev);
 652        void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
 653        u8 cls;
 654        u32 tmp;
 655        int i;
 656
 657        /* Initialize FIFO PCI bus arbitration */
 658        cls = sil_get_device_cache_line(pdev);
 659        if (cls) {
 660                cls >>= 3;
 661                cls++;  /* cls = (line_size/8)+1 */
 662                for (i = 0; i < host->n_ports; i++)
 663                        writew(cls << 8 | cls,
 664                               mmio_base + sil_port[i].fifo_cfg);
 665        } else
 666                dev_warn(&pdev->dev,
 667                         "cache line size not set.  Driver may not function\n");
 668
 669        /* Apply R_ERR on DMA activate FIS errata workaround */
 670        if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
 671                int cnt;
 672
 673                for (i = 0, cnt = 0; i < host->n_ports; i++) {
 674                        tmp = readl(mmio_base + sil_port[i].sfis_cfg);
 675                        if ((tmp & 0x3) != 0x01)
 676                                continue;
 677                        if (!cnt)
 678                                dev_info(&pdev->dev,
 679                                         "Applying R_ERR on DMA activate FIS errata fix\n");
 680                        writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
 681                        cnt++;
 682                }
 683        }
 684
 685        if (host->n_ports == 4) {
 686                /* flip the magic "make 4 ports work" bit */
 687                tmp = readl(mmio_base + sil_port[2].bmdma);
 688                if ((tmp & SIL_INTR_STEERING) == 0)
 689                        writel(tmp | SIL_INTR_STEERING,
 690                               mmio_base + sil_port[2].bmdma);
 691        }
 692}
 693
 694static bool sil_broken_system_poweroff(struct pci_dev *pdev)
 695{
 696        static const struct dmi_system_id broken_systems[] = {
 697                {
 698                        .ident = "HP Compaq nx6325",
 699                        .matches = {
 700                                DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
 701                                DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
 702                        },
 703                        /* PCI slot number of the controller */
 704                        .driver_data = (void *)0x12UL,
 705                },
 706
 707                { }     /* terminate list */
 708        };
 709        const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
 710
 711        if (dmi) {
 712                unsigned long slot = (unsigned long)dmi->driver_data;
 713                /* apply the quirk only to on-board controllers */
 714                return slot == PCI_SLOT(pdev->devfn);
 715        }
 716
 717        return false;
 718}
 719
 720static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 721{
 722        int board_id = ent->driver_data;
 723        struct ata_port_info pi = sil_port_info[board_id];
 724        const struct ata_port_info *ppi[] = { &pi, NULL };
 725        struct ata_host *host;
 726        void __iomem *mmio_base;
 727        int n_ports, rc;
 728        unsigned int i;
 729
 730        ata_print_version_once(&pdev->dev, DRV_VERSION);
 731
 732        /* allocate host */
 733        n_ports = 2;
 734        if (board_id == sil_3114)
 735                n_ports = 4;
 736
 737        if (sil_broken_system_poweroff(pdev)) {
 738                pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN |
 739                                        ATA_FLAG_NO_HIBERNATE_SPINDOWN;
 740                dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
 741                                "on poweroff and hibernation\n");
 742        }
 743
 744        host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
 745        if (!host)
 746                return -ENOMEM;
 747
 748        /* acquire resources and fill host */
 749        rc = pcim_enable_device(pdev);
 750        if (rc)
 751                return rc;
 752
 753        rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
 754        if (rc == -EBUSY)
 755                pcim_pin_device(pdev);
 756        if (rc)
 757                return rc;
 758        host->iomap = pcim_iomap_table(pdev);
 759
 760        rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
 761        if (rc)
 762                return rc;
 763        rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
 764        if (rc)
 765                return rc;
 766
 767        mmio_base = host->iomap[SIL_MMIO_BAR];
 768
 769        for (i = 0; i < host->n_ports; i++) {
 770                struct ata_port *ap = host->ports[i];
 771                struct ata_ioports *ioaddr = &ap->ioaddr;
 772
 773                ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
 774                ioaddr->altstatus_addr =
 775                ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
 776                ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
 777                ioaddr->scr_addr = mmio_base + sil_port[i].scr;
 778                ata_sff_std_ports(ioaddr);
 779
 780                ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio");
 781                ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf");
 782        }
 783
 784        /* initialize and activate */
 785        sil_init_controller(host);
 786
 787        pci_set_master(pdev);
 788        return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED,
 789                                 &sil_sht);
 790}
 791
 792#ifdef CONFIG_PM_SLEEP
 793static int sil_pci_device_resume(struct pci_dev *pdev)
 794{
 795        struct ata_host *host = pci_get_drvdata(pdev);
 796        int rc;
 797
 798        rc = ata_pci_device_do_resume(pdev);
 799        if (rc)
 800                return rc;
 801
 802        sil_init_controller(host);
 803        ata_host_resume(host);
 804
 805        return 0;
 806}
 807#endif
 808
 809module_pci_driver(sil_pci_driver);
 810