linux/drivers/ata/sata_sil.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  sata_sil.c - Silicon Image SATA
   4 *
   5 *  Maintained by:  Tejun Heo <tj@kernel.org>
   6 *                  Please ALWAYS copy linux-ide@vger.kernel.org
   7 *                  on emails.
   8 *
   9 *  Copyright 2003-2005 Red Hat, Inc.
  10 *  Copyright 2003 Benjamin Herrenschmidt
  11 *
  12 *  libata documentation is available via 'make {ps|pdf}docs',
  13 *  as Documentation/driver-api/libata.rst
  14 *
  15 *  Documentation for SiI 3112:
  16 *  http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
  17 *
  18 *  Other errata and documentation available under NDA.
  19 */
  20
  21#include <linux/kernel.h>
  22#include <linux/module.h>
  23#include <linux/pci.h>
  24#include <linux/blkdev.h>
  25#include <linux/delay.h>
  26#include <linux/interrupt.h>
  27#include <linux/device.h>
  28#include <scsi/scsi_host.h>
  29#include <linux/libata.h>
  30#include <linux/dmi.h>
  31
  32#define DRV_NAME        "sata_sil"
  33#define DRV_VERSION     "2.4"
  34
  35#define SIL_DMA_BOUNDARY        0x7fffffffUL
  36
  37enum {
  38        SIL_MMIO_BAR            = 5,
  39
  40        /*
  41         * host flags
  42         */
  43        SIL_FLAG_NO_SATA_IRQ    = (1 << 28),
  44        SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
  45        SIL_FLAG_MOD15WRITE     = (1 << 30),
  46
  47        SIL_DFL_PORT_FLAGS      = ATA_FLAG_SATA,
  48
  49        /*
  50         * Controller IDs
  51         */
  52        sil_3112                = 0,
  53        sil_3112_no_sata_irq    = 1,
  54        sil_3512                = 2,
  55        sil_3114                = 3,
  56
  57        /*
  58         * Register offsets
  59         */
  60        SIL_SYSCFG              = 0x48,
  61
  62        /*
  63         * Register bits
  64         */
  65        /* SYSCFG */
  66        SIL_MASK_IDE0_INT       = (1 << 22),
  67        SIL_MASK_IDE1_INT       = (1 << 23),
  68        SIL_MASK_IDE2_INT       = (1 << 24),
  69        SIL_MASK_IDE3_INT       = (1 << 25),
  70        SIL_MASK_2PORT          = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
  71        SIL_MASK_4PORT          = SIL_MASK_2PORT |
  72                                  SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
  73
  74        /* BMDMA/BMDMA2 */
  75        SIL_INTR_STEERING       = (1 << 1),
  76
  77        SIL_DMA_ENABLE          = (1 << 0),  /* DMA run switch */
  78        SIL_DMA_RDWR            = (1 << 3),  /* DMA Rd-Wr */
  79        SIL_DMA_SATA_IRQ        = (1 << 4),  /* OR of all SATA IRQs */
  80        SIL_DMA_ACTIVE          = (1 << 16), /* DMA running */
  81        SIL_DMA_ERROR           = (1 << 17), /* PCI bus error */
  82        SIL_DMA_COMPLETE        = (1 << 18), /* cmd complete / IRQ pending */
  83        SIL_DMA_N_SATA_IRQ      = (1 << 6),  /* SATA_IRQ for the next channel */
  84        SIL_DMA_N_ACTIVE        = (1 << 24), /* ACTIVE for the next channel */
  85        SIL_DMA_N_ERROR         = (1 << 25), /* ERROR for the next channel */
  86        SIL_DMA_N_COMPLETE      = (1 << 26), /* COMPLETE for the next channel */
  87
  88        /* SIEN */
  89        SIL_SIEN_N              = (1 << 16), /* triggered by SError.N */
  90
  91        /*
  92         * Others
  93         */
  94        SIL_QUIRK_MOD15WRITE    = (1 << 0),
  95        SIL_QUIRK_UDMA5MAX      = (1 << 1),
  96};
  97
  98static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  99#ifdef CONFIG_PM_SLEEP
 100static int sil_pci_device_resume(struct pci_dev *pdev);
 101#endif
 102static void sil_dev_config(struct ata_device *dev);
 103static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 104static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
 105static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
 106static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
 107static void sil_bmdma_setup(struct ata_queued_cmd *qc);
 108static void sil_bmdma_start(struct ata_queued_cmd *qc);
 109static void sil_bmdma_stop(struct ata_queued_cmd *qc);
 110static void sil_freeze(struct ata_port *ap);
 111static void sil_thaw(struct ata_port *ap);
 112
 113
 114static const struct pci_device_id sil_pci_tbl[] = {
 115        { PCI_VDEVICE(CMD, 0x3112), sil_3112 },
 116        { PCI_VDEVICE(CMD, 0x0240), sil_3112 },
 117        { PCI_VDEVICE(CMD, 0x3512), sil_3512 },
 118        { PCI_VDEVICE(CMD, 0x3114), sil_3114 },
 119        { PCI_VDEVICE(ATI, 0x436e), sil_3112 },
 120        { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq },
 121        { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq },
 122
 123        { }     /* terminate list */
 124};
 125
 126
 127/* TODO firmware versions should be added - eric */
 128static const struct sil_drivelist {
 129        const char *product;
 130        unsigned int quirk;
 131} sil_blacklist [] = {
 132        { "ST320012AS",         SIL_QUIRK_MOD15WRITE },
 133        { "ST330013AS",         SIL_QUIRK_MOD15WRITE },
 134        { "ST340017AS",         SIL_QUIRK_MOD15WRITE },
 135        { "ST360015AS",         SIL_QUIRK_MOD15WRITE },
 136        { "ST380023AS",         SIL_QUIRK_MOD15WRITE },
 137        { "ST3120023AS",        SIL_QUIRK_MOD15WRITE },
 138        { "ST340014ASL",        SIL_QUIRK_MOD15WRITE },
 139        { "ST360014ASL",        SIL_QUIRK_MOD15WRITE },
 140        { "ST380011ASL",        SIL_QUIRK_MOD15WRITE },
 141        { "ST3120022ASL",       SIL_QUIRK_MOD15WRITE },
 142        { "ST3160021ASL",       SIL_QUIRK_MOD15WRITE },
 143        { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
 144        { "Maxtor 4D060H3",     SIL_QUIRK_UDMA5MAX },
 145        { }
 146};
 147
 148static struct pci_driver sil_pci_driver = {
 149        .name                   = DRV_NAME,
 150        .id_table               = sil_pci_tbl,
 151        .probe                  = sil_init_one,
 152        .remove                 = ata_pci_remove_one,
 153#ifdef CONFIG_PM_SLEEP
 154        .suspend                = ata_pci_device_suspend,
 155        .resume                 = sil_pci_device_resume,
 156#endif
 157};
 158
 159static struct scsi_host_template sil_sht = {
 160        ATA_BASE_SHT(DRV_NAME),
 161        /** These controllers support Large Block Transfer which allows
 162            transfer chunks up to 2GB and which cross 64KB boundaries,
 163            therefore the DMA limits are more relaxed than standard ATA SFF. */
 164        .dma_boundary           = SIL_DMA_BOUNDARY,
 165        .sg_tablesize           = ATA_MAX_PRD
 166};
 167
 168static struct ata_port_operations sil_ops = {
 169        .inherits               = &ata_bmdma32_port_ops,
 170        .dev_config             = sil_dev_config,
 171        .set_mode               = sil_set_mode,
 172        .bmdma_setup            = sil_bmdma_setup,
 173        .bmdma_start            = sil_bmdma_start,
 174        .bmdma_stop             = sil_bmdma_stop,
 175        .qc_prep                = sil_qc_prep,
 176        .freeze                 = sil_freeze,
 177        .thaw                   = sil_thaw,
 178        .scr_read               = sil_scr_read,
 179        .scr_write              = sil_scr_write,
 180};
 181
 182static const struct ata_port_info sil_port_info[] = {
 183        /* sil_3112 */
 184        {
 185                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
 186                .pio_mask       = ATA_PIO4,
 187                .mwdma_mask     = ATA_MWDMA2,
 188                .udma_mask      = ATA_UDMA5,
 189                .port_ops       = &sil_ops,
 190        },
 191        /* sil_3112_no_sata_irq */
 192        {
 193                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
 194                                  SIL_FLAG_NO_SATA_IRQ,
 195                .pio_mask       = ATA_PIO4,
 196                .mwdma_mask     = ATA_MWDMA2,
 197                .udma_mask      = ATA_UDMA5,
 198                .port_ops       = &sil_ops,
 199        },
 200        /* sil_3512 */
 201        {
 202                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
 203                .pio_mask       = ATA_PIO4,
 204                .mwdma_mask     = ATA_MWDMA2,
 205                .udma_mask      = ATA_UDMA5,
 206                .port_ops       = &sil_ops,
 207        },
 208        /* sil_3114 */
 209        {
 210                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
 211                .pio_mask       = ATA_PIO4,
 212                .mwdma_mask     = ATA_MWDMA2,
 213                .udma_mask      = ATA_UDMA5,
 214                .port_ops       = &sil_ops,
 215        },
 216};
 217
 218/* per-port register offsets */
 219/* TODO: we can probably calculate rather than use a table */
 220static const struct {
 221        unsigned long tf;       /* ATA taskfile register block */
 222        unsigned long ctl;      /* ATA control/altstatus register block */
 223        unsigned long bmdma;    /* DMA register block */
 224        unsigned long bmdma2;   /* DMA register block #2 */
 225        unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
 226        unsigned long scr;      /* SATA control register block */
 227        unsigned long sien;     /* SATA Interrupt Enable register */
 228        unsigned long xfer_mode;/* data transfer mode register */
 229        unsigned long sfis_cfg; /* SATA FIS reception config register */
 230} sil_port[] = {
 231        /* port 0 ... */
 232        /*   tf    ctl  bmdma  bmdma2  fifo    scr   sien   mode   sfis */
 233        {  0x80,  0x8A,   0x0,  0x10,  0x40, 0x100, 0x148,  0xb4, 0x14c },
 234        {  0xC0,  0xCA,   0x8,  0x18,  0x44, 0x180, 0x1c8,  0xf4, 0x1cc },
 235        { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
 236        { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
 237        /* ... port 3 */
 238};
 239
 240MODULE_AUTHOR("Jeff Garzik");
 241MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
 242MODULE_LICENSE("GPL");
 243MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
 244MODULE_VERSION(DRV_VERSION);
 245
 246static int slow_down;
 247module_param(slow_down, int, 0444);
 248MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
 249
 250
 251static void sil_bmdma_stop(struct ata_queued_cmd *qc)
 252{
 253        struct ata_port *ap = qc->ap;
 254        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 255        void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
 256
 257        /* clear start/stop bit - can safely always write 0 */
 258        iowrite8(0, bmdma2);
 259
 260        /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
 261        ata_sff_dma_pause(ap);
 262}
 263
 264static void sil_bmdma_setup(struct ata_queued_cmd *qc)
 265{
 266        struct ata_port *ap = qc->ap;
 267        void __iomem *bmdma = ap->ioaddr.bmdma_addr;
 268
 269        /* load PRD table addr. */
 270        iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS);
 271
 272        /* issue r/w command */
 273        ap->ops->sff_exec_command(ap, &qc->tf);
 274}
 275
 276static void sil_bmdma_start(struct ata_queued_cmd *qc)
 277{
 278        unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
 279        struct ata_port *ap = qc->ap;
 280        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 281        void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
 282        u8 dmactl = ATA_DMA_START;
 283
 284        /* set transfer direction, start host DMA transaction
 285           Note: For Large Block Transfer to work, the DMA must be started
 286           using the bmdma2 register. */
 287        if (!rw)
 288                dmactl |= ATA_DMA_WR;
 289        iowrite8(dmactl, bmdma2);
 290}
 291
 292/* The way God intended PCI IDE scatter/gather lists to look and behave... */
 293static void sil_fill_sg(struct ata_queued_cmd *qc)
 294{
 295        struct scatterlist *sg;
 296        struct ata_port *ap = qc->ap;
 297        struct ata_bmdma_prd *prd, *last_prd = NULL;
 298        unsigned int si;
 299
 300        prd = &ap->bmdma_prd[0];
 301        for_each_sg(qc->sg, sg, qc->n_elem, si) {
 302                /* Note h/w doesn't support 64-bit, so we unconditionally
 303                 * truncate dma_addr_t to u32.
 304                 */
 305                u32 addr = (u32) sg_dma_address(sg);
 306                u32 sg_len = sg_dma_len(sg);
 307
 308                prd->addr = cpu_to_le32(addr);
 309                prd->flags_len = cpu_to_le32(sg_len);
 310                VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len);
 311
 312                last_prd = prd;
 313                prd++;
 314        }
 315
 316        if (likely(last_prd))
 317                last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
 318}
 319
 320static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
 321{
 322        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
 323                return AC_ERR_OK;
 324
 325        sil_fill_sg(qc);
 326
 327        return AC_ERR_OK;
 328}
 329
 330static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
 331{
 332        u8 cache_line = 0;
 333        pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
 334        return cache_line;
 335}
 336
 337/**
 338 *      sil_set_mode            -       wrap set_mode functions
 339 *      @link: link to set up
 340 *      @r_failed: returned device when we fail
 341 *
 342 *      Wrap the libata method for device setup as after the setup we need
 343 *      to inspect the results and do some configuration work
 344 */
 345
 346static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
 347{
 348        struct ata_port *ap = link->ap;
 349        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 350        void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
 351        struct ata_device *dev;
 352        u32 tmp, dev_mode[2] = { };
 353        int rc;
 354
 355        rc = ata_do_set_mode(link, r_failed);
 356        if (rc)
 357                return rc;
 358
 359        ata_for_each_dev(dev, link, ALL) {
 360                if (!ata_dev_enabled(dev))
 361                        dev_mode[dev->devno] = 0;       /* PIO0/1/2 */
 362                else if (dev->flags & ATA_DFLAG_PIO)
 363                        dev_mode[dev->devno] = 1;       /* PIO3/4 */
 364                else
 365                        dev_mode[dev->devno] = 3;       /* UDMA */
 366                /* value 2 indicates MDMA */
 367        }
 368
 369        tmp = readl(addr);
 370        tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
 371        tmp |= dev_mode[0];
 372        tmp |= (dev_mode[1] << 4);
 373        writel(tmp, addr);
 374        readl(addr);    /* flush */
 375        return 0;
 376}
 377
 378static inline void __iomem *sil_scr_addr(struct ata_port *ap,
 379                                         unsigned int sc_reg)
 380{
 381        void __iomem *offset = ap->ioaddr.scr_addr;
 382
 383        switch (sc_reg) {
 384        case SCR_STATUS:
 385                return offset + 4;
 386        case SCR_ERROR:
 387                return offset + 8;
 388        case SCR_CONTROL:
 389                return offset;
 390        default:
 391                /* do nothing */
 392                break;
 393        }
 394
 395        return NULL;
 396}
 397
 398static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
 399{
 400        void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
 401
 402        if (mmio) {
 403                *val = readl(mmio);
 404                return 0;
 405        }
 406        return -EINVAL;
 407}
 408
 409static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
 410{
 411        void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
 412
 413        if (mmio) {
 414                writel(val, mmio);
 415                return 0;
 416        }
 417        return -EINVAL;
 418}
 419
 420static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
 421{
 422        struct ata_eh_info *ehi = &ap->link.eh_info;
 423        struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
 424        u8 status;
 425
 426        if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
 427                u32 serror = 0xffffffff;
 428
 429                /* SIEN doesn't mask SATA IRQs on some 3112s.  Those
 430                 * controllers continue to assert IRQ as long as
 431                 * SError bits are pending.  Clear SError immediately.
 432                 */
 433                sil_scr_read(&ap->link, SCR_ERROR, &serror);
 434                sil_scr_write(&ap->link, SCR_ERROR, serror);
 435
 436                /* Sometimes spurious interrupts occur, double check
 437                 * it's PHYRDY CHG.
 438                 */
 439                if (serror & SERR_PHYRDY_CHG) {
 440                        ap->link.eh_info.serror |= serror;
 441                        goto freeze;
 442                }
 443
 444                if (!(bmdma2 & SIL_DMA_COMPLETE))
 445                        return;
 446        }
 447
 448        if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 449                /* this sometimes happens, just clear IRQ */
 450                ap->ops->sff_check_status(ap);
 451                return;
 452        }
 453
 454        /* Check whether we are expecting interrupt in this state */
 455        switch (ap->hsm_task_state) {
 456        case HSM_ST_FIRST:
 457                /* Some pre-ATAPI-4 devices assert INTRQ
 458                 * at this state when ready to receive CDB.
 459                 */
 460
 461                /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
 462                 * The flag was turned on only for atapi devices.  No
 463                 * need to check ata_is_atapi(qc->tf.protocol) again.
 464                 */
 465                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
 466                        goto err_hsm;
 467                break;
 468        case HSM_ST_LAST:
 469                if (ata_is_dma(qc->tf.protocol)) {
 470                        /* clear DMA-Start bit */
 471                        ap->ops->bmdma_stop(qc);
 472
 473                        if (bmdma2 & SIL_DMA_ERROR) {
 474                                qc->err_mask |= AC_ERR_HOST_BUS;
 475                                ap->hsm_task_state = HSM_ST_ERR;
 476                        }
 477                }
 478                break;
 479        case HSM_ST:
 480                break;
 481        default:
 482                goto err_hsm;
 483        }
 484
 485        /* check main status, clearing INTRQ */
 486        status = ap->ops->sff_check_status(ap);
 487        if (unlikely(status & ATA_BUSY))
 488                goto err_hsm;
 489
 490        /* ack bmdma irq events */
 491        ata_bmdma_irq_clear(ap);
 492
 493        /* kick HSM in the ass */
 494        ata_sff_hsm_move(ap, qc, status, 0);
 495
 496        if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
 497                ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
 498
 499        return;
 500
 501 err_hsm:
 502        qc->err_mask |= AC_ERR_HSM;
 503 freeze:
 504        ata_port_freeze(ap);
 505}
 506
 507static irqreturn_t sil_interrupt(int irq, void *dev_instance)
 508{
 509        struct ata_host *host = dev_instance;
 510        void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
 511        int handled = 0;
 512        int i;
 513
 514        spin_lock(&host->lock);
 515
 516        for (i = 0; i < host->n_ports; i++) {
 517                struct ata_port *ap = host->ports[i];
 518                u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
 519
 520                /* turn off SATA_IRQ if not supported */
 521                if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
 522                        bmdma2 &= ~SIL_DMA_SATA_IRQ;
 523
 524                if (bmdma2 == 0xffffffff ||
 525                    !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
 526                        continue;
 527
 528                sil_host_intr(ap, bmdma2);
 529                handled = 1;
 530        }
 531
 532        spin_unlock(&host->lock);
 533
 534        return IRQ_RETVAL(handled);
 535}
 536
 537static void sil_freeze(struct ata_port *ap)
 538{
 539        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 540        u32 tmp;
 541
 542        /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
 543        writel(0, mmio_base + sil_port[ap->port_no].sien);
 544
 545        /* plug IRQ */
 546        tmp = readl(mmio_base + SIL_SYSCFG);
 547        tmp |= SIL_MASK_IDE0_INT << ap->port_no;
 548        writel(tmp, mmio_base + SIL_SYSCFG);
 549        readl(mmio_base + SIL_SYSCFG);  /* flush */
 550
 551        /* Ensure DMA_ENABLE is off.
 552         *
 553         * This is because the controller will not give us access to the
 554         * taskfile registers while a DMA is in progress
 555         */
 556        iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
 557                 ap->ioaddr.bmdma_addr);
 558
 559        /* According to ata_bmdma_stop, an HDMA transition requires
 560         * on PIO cycle. But we can't read a taskfile register.
 561         */
 562        ioread8(ap->ioaddr.bmdma_addr);
 563}
 564
 565static void sil_thaw(struct ata_port *ap)
 566{
 567        void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 568        u32 tmp;
 569
 570        /* clear IRQ */
 571        ap->ops->sff_check_status(ap);
 572        ata_bmdma_irq_clear(ap);
 573
 574        /* turn on SATA IRQ if supported */
 575        if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
 576                writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
 577
 578        /* turn on IRQ */
 579        tmp = readl(mmio_base + SIL_SYSCFG);
 580        tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
 581        writel(tmp, mmio_base + SIL_SYSCFG);
 582}
 583
 584/**
 585 *      sil_dev_config - Apply device/host-specific errata fixups
 586 *      @dev: Device to be examined
 587 *
 588 *      After the IDENTIFY [PACKET] DEVICE step is complete, and a
 589 *      device is known to be present, this function is called.
 590 *      We apply two errata fixups which are specific to Silicon Image,
 591 *      a Seagate and a Maxtor fixup.
 592 *
 593 *      For certain Seagate devices, we must limit the maximum sectors
 594 *      to under 8K.
 595 *
 596 *      For certain Maxtor devices, we must not program the drive
 597 *      beyond udma5.
 598 *
 599 *      Both fixups are unfairly pessimistic.  As soon as I get more
 600 *      information on these errata, I will create a more exhaustive
 601 *      list, and apply the fixups to only the specific
 602 *      devices/hosts/firmwares that need it.
 603 *
 604 *      20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
 605 *      The Maxtor quirk is in the blacklist, but I'm keeping the original
 606 *      pessimistic fix for the following reasons...
 607 *      - There seems to be less info on it, only one device gleaned off the
 608 *      Windows driver, maybe only one is affected.  More info would be greatly
 609 *      appreciated.
 610 *      - But then again UDMA5 is hardly anything to complain about
 611 */
 612static void sil_dev_config(struct ata_device *dev)
 613{
 614        struct ata_port *ap = dev->link->ap;
 615        int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
 616        unsigned int n, quirks = 0;
 617        unsigned char model_num[ATA_ID_PROD_LEN + 1];
 618
 619        /* This controller doesn't support trim */
 620        dev->horkage |= ATA_HORKAGE_NOTRIM;
 621
 622        ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
 623
 624        for (n = 0; sil_blacklist[n].product; n++)
 625                if (!strcmp(sil_blacklist[n].product, model_num)) {
 626                        quirks = sil_blacklist[n].quirk;
 627                        break;
 628                }
 629
 630        /* limit requests to 15 sectors */
 631        if (slow_down ||
 632            ((ap->flags & SIL_FLAG_MOD15WRITE) &&
 633             (quirks & SIL_QUIRK_MOD15WRITE))) {
 634                if (print_info)
 635                        ata_dev_info(dev,
 636                "applying Seagate errata fix (mod15write workaround)\n");
 637                dev->max_sectors = 15;
 638                return;
 639        }
 640
 641        /* limit to udma5 */
 642        if (quirks & SIL_QUIRK_UDMA5MAX) {
 643                if (print_info)
 644                        ata_dev_info(dev, "applying Maxtor errata fix %s\n",
 645                                     model_num);
 646                dev->udma_mask &= ATA_UDMA5;
 647                return;
 648        }
 649}
 650
 651static void sil_init_controller(struct ata_host *host)
 652{
 653        struct pci_dev *pdev = to_pci_dev(host->dev);
 654        void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
 655        u8 cls;
 656        u32 tmp;
 657        int i;
 658
 659        /* Initialize FIFO PCI bus arbitration */
 660        cls = sil_get_device_cache_line(pdev);
 661        if (cls) {
 662                cls >>= 3;
 663                cls++;  /* cls = (line_size/8)+1 */
 664                for (i = 0; i < host->n_ports; i++)
 665                        writew(cls << 8 | cls,
 666                               mmio_base + sil_port[i].fifo_cfg);
 667        } else
 668                dev_warn(&pdev->dev,
 669                         "cache line size not set.  Driver may not function\n");
 670
 671        /* Apply R_ERR on DMA activate FIS errata workaround */
 672        if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
 673                int cnt;
 674
 675                for (i = 0, cnt = 0; i < host->n_ports; i++) {
 676                        tmp = readl(mmio_base + sil_port[i].sfis_cfg);
 677                        if ((tmp & 0x3) != 0x01)
 678                                continue;
 679                        if (!cnt)
 680                                dev_info(&pdev->dev,
 681                                         "Applying R_ERR on DMA activate FIS errata fix\n");
 682                        writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
 683                        cnt++;
 684                }
 685        }
 686
 687        if (host->n_ports == 4) {
 688                /* flip the magic "make 4 ports work" bit */
 689                tmp = readl(mmio_base + sil_port[2].bmdma);
 690                if ((tmp & SIL_INTR_STEERING) == 0)
 691                        writel(tmp | SIL_INTR_STEERING,
 692                               mmio_base + sil_port[2].bmdma);
 693        }
 694}
 695
 696static bool sil_broken_system_poweroff(struct pci_dev *pdev)
 697{
 698        static const struct dmi_system_id broken_systems[] = {
 699                {
 700                        .ident = "HP Compaq nx6325",
 701                        .matches = {
 702                                DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
 703                                DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
 704                        },
 705                        /* PCI slot number of the controller */
 706                        .driver_data = (void *)0x12UL,
 707                },
 708
 709                { }     /* terminate list */
 710        };
 711        const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
 712
 713        if (dmi) {
 714                unsigned long slot = (unsigned long)dmi->driver_data;
 715                /* apply the quirk only to on-board controllers */
 716                return slot == PCI_SLOT(pdev->devfn);
 717        }
 718
 719        return false;
 720}
 721
 722static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 723{
 724        int board_id = ent->driver_data;
 725        struct ata_port_info pi = sil_port_info[board_id];
 726        const struct ata_port_info *ppi[] = { &pi, NULL };
 727        struct ata_host *host;
 728        void __iomem *mmio_base;
 729        int n_ports, rc;
 730        unsigned int i;
 731
 732        ata_print_version_once(&pdev->dev, DRV_VERSION);
 733
 734        /* allocate host */
 735        n_ports = 2;
 736        if (board_id == sil_3114)
 737                n_ports = 4;
 738
 739        if (sil_broken_system_poweroff(pdev)) {
 740                pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN |
 741                                        ATA_FLAG_NO_HIBERNATE_SPINDOWN;
 742                dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
 743                                "on poweroff and hibernation\n");
 744        }
 745
 746        host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
 747        if (!host)
 748                return -ENOMEM;
 749
 750        /* acquire resources and fill host */
 751        rc = pcim_enable_device(pdev);
 752        if (rc)
 753                return rc;
 754
 755        rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
 756        if (rc == -EBUSY)
 757                pcim_pin_device(pdev);
 758        if (rc)
 759                return rc;
 760        host->iomap = pcim_iomap_table(pdev);
 761
 762        rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
 763        if (rc)
 764                return rc;
 765
 766        mmio_base = host->iomap[SIL_MMIO_BAR];
 767
 768        for (i = 0; i < host->n_ports; i++) {
 769                struct ata_port *ap = host->ports[i];
 770                struct ata_ioports *ioaddr = &ap->ioaddr;
 771
 772                ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
 773                ioaddr->altstatus_addr =
 774                ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
 775                ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
 776                ioaddr->scr_addr = mmio_base + sil_port[i].scr;
 777                ata_sff_std_ports(ioaddr);
 778
 779                ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio");
 780                ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf");
 781        }
 782
 783        /* initialize and activate */
 784        sil_init_controller(host);
 785
 786        pci_set_master(pdev);
 787        return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED,
 788                                 &sil_sht);
 789}
 790
 791#ifdef CONFIG_PM_SLEEP
 792static int sil_pci_device_resume(struct pci_dev *pdev)
 793{
 794        struct ata_host *host = pci_get_drvdata(pdev);
 795        int rc;
 796
 797        rc = ata_pci_device_do_resume(pdev);
 798        if (rc)
 799                return rc;
 800
 801        sil_init_controller(host);
 802        ata_host_resume(host);
 803
 804        return 0;
 805}
 806#endif
 807
 808module_pci_driver(sil_pci_driver);
 809