linux/drivers/ata/sata_mv.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * sata_mv.c - Marvell SATA support
   4 *
   5 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
   6 * Copyright 2005: EMC Corporation, all rights reserved.
   7 * Copyright 2005 Red Hat, Inc.  All rights reserved.
   8 *
   9 * Originally written by Brett Russ.
  10 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
  11 *
  12 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  13 */
  14
  15/*
  16 * sata_mv TODO list:
  17 *
  18 * --> Develop a low-power-consumption strategy, and implement it.
  19 *
  20 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
  21 *
  22 * --> [Experiment, Marvell value added] Is it possible to use target
  23 *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
  24 *       creating LibATA target mode support would be very interesting.
  25 *
  26 *       Target mode, for those without docs, is the ability to directly
  27 *       connect two SATA ports.
  28 */
  29
  30/*
  31 * 80x1-B2 errata PCI#11:
  32 *
  33 * Users of the 6041/6081 Rev.B2 chips (current is C0)
  34 * should be careful to insert those cards only onto PCI-X bus #0,
  35 * and only in device slots 0..7, not higher.  The chips may not
  36 * work correctly otherwise  (note: this is a pretty rare condition).
  37 */
  38
  39#include <linux/kernel.h>
  40#include <linux/module.h>
  41#include <linux/pci.h>
  42#include <linux/init.h>
  43#include <linux/blkdev.h>
  44#include <linux/delay.h>
  45#include <linux/interrupt.h>
  46#include <linux/dmapool.h>
  47#include <linux/dma-mapping.h>
  48#include <linux/device.h>
  49#include <linux/clk.h>
  50#include <linux/phy/phy.h>
  51#include <linux/platform_device.h>
  52#include <linux/ata_platform.h>
  53#include <linux/mbus.h>
  54#include <linux/bitops.h>
  55#include <linux/gfp.h>
  56#include <linux/of.h>
  57#include <linux/of_irq.h>
  58#include <scsi/scsi_host.h>
  59#include <scsi/scsi_cmnd.h>
  60#include <scsi/scsi_device.h>
  61#include <linux/libata.h>
  62
  63#define DRV_NAME        "sata_mv"
  64#define DRV_VERSION     "1.28"
  65
  66/*
  67 * module options
  68 */
  69
  70#ifdef CONFIG_PCI
  71static int msi;
  72module_param(msi, int, S_IRUGO);
  73MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  74#endif
  75
  76static int irq_coalescing_io_count;
  77module_param(irq_coalescing_io_count, int, S_IRUGO);
  78MODULE_PARM_DESC(irq_coalescing_io_count,
  79                 "IRQ coalescing I/O count threshold (0..255)");
  80
  81static int irq_coalescing_usecs;
  82module_param(irq_coalescing_usecs, int, S_IRUGO);
  83MODULE_PARM_DESC(irq_coalescing_usecs,
  84                 "IRQ coalescing time threshold in usecs");
  85
  86enum {
  87        /* BAR's are enumerated in terms of pci_resource_start() terms */
  88        MV_PRIMARY_BAR          = 0,    /* offset 0x10: memory space */
  89        MV_IO_BAR               = 2,    /* offset 0x18: IO space */
  90        MV_MISC_BAR             = 3,    /* offset 0x1c: FLASH, NVRAM, SRAM */
  91
  92        MV_MAJOR_REG_AREA_SZ    = 0x10000,      /* 64KB */
  93        MV_MINOR_REG_AREA_SZ    = 0x2000,       /* 8KB */
  94
  95        /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
  96        COAL_CLOCKS_PER_USEC    = 150,          /* for calculating COAL_TIMEs */
  97        MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
  98        MAX_COAL_IO_COUNT       = 255,          /* completed I/O count */
  99
 100        MV_PCI_REG_BASE         = 0,
 101
 102        /*
 103         * Per-chip ("all ports") interrupt coalescing feature.
 104         * This is only for GEN_II / GEN_IIE hardware.
 105         *
 106         * Coalescing defers the interrupt until either the IO_THRESHOLD
 107         * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
 108         */
 109        COAL_REG_BASE           = 0x18000,
 110        IRQ_COAL_CAUSE          = (COAL_REG_BASE + 0x08),
 111        ALL_PORTS_COAL_IRQ      = (1 << 4),     /* all ports irq event */
 112
 113        IRQ_COAL_IO_THRESHOLD   = (COAL_REG_BASE + 0xcc),
 114        IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
 115
 116        /*
 117         * Registers for the (unused here) transaction coalescing feature:
 118         */
 119        TRAN_COAL_CAUSE_LO      = (COAL_REG_BASE + 0x88),
 120        TRAN_COAL_CAUSE_HI      = (COAL_REG_BASE + 0x8c),
 121
 122        SATAHC0_REG_BASE        = 0x20000,
 123        FLASH_CTL               = 0x1046c,
 124        GPIO_PORT_CTL           = 0x104f0,
 125        RESET_CFG               = 0x180d8,
 126
 127        MV_PCI_REG_SZ           = MV_MAJOR_REG_AREA_SZ,
 128        MV_SATAHC_REG_SZ        = MV_MAJOR_REG_AREA_SZ,
 129        MV_SATAHC_ARBTR_REG_SZ  = MV_MINOR_REG_AREA_SZ,         /* arbiter */
 130        MV_PORT_REG_SZ          = MV_MINOR_REG_AREA_SZ,
 131
 132        MV_MAX_Q_DEPTH          = 32,
 133        MV_MAX_Q_DEPTH_MASK     = MV_MAX_Q_DEPTH - 1,
 134
 135        /* CRQB needs alignment on a 1KB boundary. Size == 1KB
 136         * CRPB needs alignment on a 256B boundary. Size == 256B
 137         * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
 138         */
 139        MV_CRQB_Q_SZ            = (32 * MV_MAX_Q_DEPTH),
 140        MV_CRPB_Q_SZ            = (8 * MV_MAX_Q_DEPTH),
 141        MV_MAX_SG_CT            = 256,
 142        MV_SG_TBL_SZ            = (16 * MV_MAX_SG_CT),
 143
 144        /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
 145        MV_PORT_HC_SHIFT        = 2,
 146        MV_PORTS_PER_HC         = (1 << MV_PORT_HC_SHIFT), /* 4 */
 147        /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
 148        MV_PORT_MASK            = (MV_PORTS_PER_HC - 1),   /* 3 */
 149
 150        /* Host Flags */
 151        MV_FLAG_DUAL_HC         = (1 << 30),  /* two SATA Host Controllers */
 152
 153        MV_COMMON_FLAGS         = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
 154
 155        MV_GEN_I_FLAGS          = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
 156
 157        MV_GEN_II_FLAGS         = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
 158                                  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
 159
 160        MV_GEN_IIE_FLAGS        = MV_GEN_II_FLAGS | ATA_FLAG_AN,
 161
 162        CRQB_FLAG_READ          = (1 << 0),
 163        CRQB_TAG_SHIFT          = 1,
 164        CRQB_IOID_SHIFT         = 6,    /* CRQB Gen-II/IIE IO Id shift */
 165        CRQB_PMP_SHIFT          = 12,   /* CRQB Gen-II/IIE PMP shift */
 166        CRQB_HOSTQ_SHIFT        = 17,   /* CRQB Gen-II/IIE HostQueTag shift */
 167        CRQB_CMD_ADDR_SHIFT     = 8,
 168        CRQB_CMD_CS             = (0x2 << 11),
 169        CRQB_CMD_LAST           = (1 << 15),
 170
 171        CRPB_FLAG_STATUS_SHIFT  = 8,
 172        CRPB_IOID_SHIFT_6       = 5,    /* CRPB Gen-II IO Id shift */
 173        CRPB_IOID_SHIFT_7       = 7,    /* CRPB Gen-IIE IO Id shift */
 174
 175        EPRD_FLAG_END_OF_TBL    = (1 << 31),
 176
 177        /* PCI interface registers */
 178
 179        MV_PCI_COMMAND          = 0xc00,
 180        MV_PCI_COMMAND_MWRCOM   = (1 << 4),     /* PCI Master Write Combining */
 181        MV_PCI_COMMAND_MRDTRIG  = (1 << 7),     /* PCI Master Read Trigger */
 182
 183        PCI_MAIN_CMD_STS        = 0xd30,
 184        STOP_PCI_MASTER         = (1 << 2),
 185        PCI_MASTER_EMPTY        = (1 << 3),
 186        GLOB_SFT_RST            = (1 << 4),
 187
 188        MV_PCI_MODE             = 0xd00,
 189        MV_PCI_MODE_MASK        = 0x30,
 190
 191        MV_PCI_EXP_ROM_BAR_CTL  = 0xd2c,
 192        MV_PCI_DISC_TIMER       = 0xd04,
 193        MV_PCI_MSI_TRIGGER      = 0xc38,
 194        MV_PCI_SERR_MASK        = 0xc28,
 195        MV_PCI_XBAR_TMOUT       = 0x1d04,
 196        MV_PCI_ERR_LOW_ADDRESS  = 0x1d40,
 197        MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
 198        MV_PCI_ERR_ATTRIBUTE    = 0x1d48,
 199        MV_PCI_ERR_COMMAND      = 0x1d50,
 200
 201        PCI_IRQ_CAUSE           = 0x1d58,
 202        PCI_IRQ_MASK            = 0x1d5c,
 203        PCI_UNMASK_ALL_IRQS     = 0x7fffff,     /* bits 22-0 */
 204
 205        PCIE_IRQ_CAUSE          = 0x1900,
 206        PCIE_IRQ_MASK           = 0x1910,
 207        PCIE_UNMASK_ALL_IRQS    = 0x40a,        /* assorted bits */
 208
 209        /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
 210        PCI_HC_MAIN_IRQ_CAUSE   = 0x1d60,
 211        PCI_HC_MAIN_IRQ_MASK    = 0x1d64,
 212        SOC_HC_MAIN_IRQ_CAUSE   = 0x20020,
 213        SOC_HC_MAIN_IRQ_MASK    = 0x20024,
 214        ERR_IRQ                 = (1 << 0),     /* shift by (2 * port #) */
 215        DONE_IRQ                = (1 << 1),     /* shift by (2 * port #) */
 216        HC0_IRQ_PEND            = 0x1ff,        /* bits 0-8 = HC0's ports */
 217        HC_SHIFT                = 9,            /* bits 9-17 = HC1's ports */
 218        DONE_IRQ_0_3            = 0x000000aa,   /* DONE_IRQ ports 0,1,2,3 */
 219        DONE_IRQ_4_7            = (DONE_IRQ_0_3 << HC_SHIFT),  /* 4,5,6,7 */
 220        PCI_ERR                 = (1 << 18),
 221        TRAN_COAL_LO_DONE       = (1 << 19),    /* transaction coalescing */
 222        TRAN_COAL_HI_DONE       = (1 << 20),    /* transaction coalescing */
 223        PORTS_0_3_COAL_DONE     = (1 << 8),     /* HC0 IRQ coalescing */
 224        PORTS_4_7_COAL_DONE     = (1 << 17),    /* HC1 IRQ coalescing */
 225        ALL_PORTS_COAL_DONE     = (1 << 21),    /* GEN_II(E) IRQ coalescing */
 226        GPIO_INT                = (1 << 22),
 227        SELF_INT                = (1 << 23),
 228        TWSI_INT                = (1 << 24),
 229        HC_MAIN_RSVD            = (0x7f << 25), /* bits 31-25 */
 230        HC_MAIN_RSVD_5          = (0x1fff << 19), /* bits 31-19 */
 231        HC_MAIN_RSVD_SOC        = (0x3fffffb << 6),     /* bits 31-9, 7-6 */
 232
 233        /* SATAHC registers */
 234        HC_CFG                  = 0x00,
 235
 236        HC_IRQ_CAUSE            = 0x14,
 237        DMA_IRQ                 = (1 << 0),     /* shift by port # */
 238        HC_COAL_IRQ             = (1 << 4),     /* IRQ coalescing */
 239        DEV_IRQ                 = (1 << 8),     /* shift by port # */
 240
 241        /*
 242         * Per-HC (Host-Controller) interrupt coalescing feature.
 243         * This is present on all chip generations.
 244         *
 245         * Coalescing defers the interrupt until either the IO_THRESHOLD
 246         * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
 247         */
 248        HC_IRQ_COAL_IO_THRESHOLD        = 0x000c,
 249        HC_IRQ_COAL_TIME_THRESHOLD      = 0x0010,
 250
 251        SOC_LED_CTRL            = 0x2c,
 252        SOC_LED_CTRL_BLINK      = (1 << 0),     /* Active LED blink */
 253        SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),   /* Multiplex dev presence */
 254                                                /*  with dev activity LED */
 255
 256        /* Shadow block registers */
 257        SHD_BLK                 = 0x100,
 258        SHD_CTL_AST             = 0x20,         /* ofs from SHD_BLK */
 259
 260        /* SATA registers */
 261        SATA_STATUS             = 0x300,  /* ctrl, err regs follow status */
 262        SATA_ACTIVE             = 0x350,
 263        FIS_IRQ_CAUSE           = 0x364,
 264        FIS_IRQ_CAUSE_AN        = (1 << 9),     /* async notification */
 265
 266        LTMODE                  = 0x30c,        /* requires read-after-write */
 267        LTMODE_BIT8             = (1 << 8),     /* unknown, but necessary */
 268
 269        PHY_MODE2               = 0x330,
 270        PHY_MODE3               = 0x310,
 271
 272        PHY_MODE4               = 0x314,        /* requires read-after-write */
 273        PHY_MODE4_CFG_MASK      = 0x00000003,   /* phy internal config field */
 274        PHY_MODE4_CFG_VALUE     = 0x00000001,   /* phy internal config field */
 275        PHY_MODE4_RSVD_ZEROS    = 0x5de3fffa,   /* Gen2e always write zeros */
 276        PHY_MODE4_RSVD_ONES     = 0x00000005,   /* Gen2e always write ones */
 277
 278        SATA_IFCTL              = 0x344,
 279        SATA_TESTCTL            = 0x348,
 280        SATA_IFSTAT             = 0x34c,
 281        VENDOR_UNIQUE_FIS       = 0x35c,
 282
 283        FISCFG                  = 0x360,
 284        FISCFG_WAIT_DEV_ERR     = (1 << 8),     /* wait for host on DevErr */
 285        FISCFG_SINGLE_SYNC      = (1 << 16),    /* SYNC on DMA activation */
 286
 287        PHY_MODE9_GEN2          = 0x398,
 288        PHY_MODE9_GEN1          = 0x39c,
 289        PHYCFG_OFS              = 0x3a0,        /* only in 65n devices */
 290
 291        MV5_PHY_MODE            = 0x74,
 292        MV5_LTMODE              = 0x30,
 293        MV5_PHY_CTL             = 0x0C,
 294        SATA_IFCFG              = 0x050,
 295        LP_PHY_CTL              = 0x058,
 296        LP_PHY_CTL_PIN_PU_PLL   = (1 << 0),
 297        LP_PHY_CTL_PIN_PU_RX    = (1 << 1),
 298        LP_PHY_CTL_PIN_PU_TX    = (1 << 2),
 299        LP_PHY_CTL_GEN_TX_3G    = (1 << 5),
 300        LP_PHY_CTL_GEN_RX_3G    = (1 << 9),
 301
 302        MV_M2_PREAMP_MASK       = 0x7e0,
 303
 304        /* Port registers */
 305        EDMA_CFG                = 0,
 306        EDMA_CFG_Q_DEPTH        = 0x1f,         /* max device queue depth */
 307        EDMA_CFG_NCQ            = (1 << 5),     /* for R/W FPDMA queued */
 308        EDMA_CFG_NCQ_GO_ON_ERR  = (1 << 14),    /* continue on error */
 309        EDMA_CFG_RD_BRST_EXT    = (1 << 11),    /* read burst 512B */
 310        EDMA_CFG_WR_BUFF_LEN    = (1 << 13),    /* write buffer 512B */
 311        EDMA_CFG_EDMA_FBS       = (1 << 16),    /* EDMA FIS-Based Switching */
 312        EDMA_CFG_FBS            = (1 << 26),    /* FIS-Based Switching */
 313
 314        EDMA_ERR_IRQ_CAUSE      = 0x8,
 315        EDMA_ERR_IRQ_MASK       = 0xc,
 316        EDMA_ERR_D_PAR          = (1 << 0),     /* UDMA data parity err */
 317        EDMA_ERR_PRD_PAR        = (1 << 1),     /* UDMA PRD parity err */
 318        EDMA_ERR_DEV            = (1 << 2),     /* device error */
 319        EDMA_ERR_DEV_DCON       = (1 << 3),     /* device disconnect */
 320        EDMA_ERR_DEV_CON        = (1 << 4),     /* device connected */
 321        EDMA_ERR_SERR           = (1 << 5),     /* SError bits [WBDST] raised */
 322        EDMA_ERR_SELF_DIS       = (1 << 7),     /* Gen II/IIE self-disable */
 323        EDMA_ERR_SELF_DIS_5     = (1 << 8),     /* Gen I self-disable */
 324        EDMA_ERR_BIST_ASYNC     = (1 << 8),     /* BIST FIS or Async Notify */
 325        EDMA_ERR_TRANS_IRQ_7    = (1 << 8),     /* Gen IIE transprt layer irq */
 326        EDMA_ERR_CRQB_PAR       = (1 << 9),     /* CRQB parity error */
 327        EDMA_ERR_CRPB_PAR       = (1 << 10),    /* CRPB parity error */
 328        EDMA_ERR_INTRL_PAR      = (1 << 11),    /* internal parity error */
 329        EDMA_ERR_IORDY          = (1 << 12),    /* IORdy timeout */
 330
 331        EDMA_ERR_LNK_CTRL_RX    = (0xf << 13),  /* link ctrl rx error */
 332        EDMA_ERR_LNK_CTRL_RX_0  = (1 << 13),    /* transient: CRC err */
 333        EDMA_ERR_LNK_CTRL_RX_1  = (1 << 14),    /* transient: FIFO err */
 334        EDMA_ERR_LNK_CTRL_RX_2  = (1 << 15),    /* fatal: caught SYNC */
 335        EDMA_ERR_LNK_CTRL_RX_3  = (1 << 16),    /* transient: FIS rx err */
 336
 337        EDMA_ERR_LNK_DATA_RX    = (0xf << 17),  /* link data rx error */
 338
 339        EDMA_ERR_LNK_CTRL_TX    = (0x1f << 21), /* link ctrl tx error */
 340        EDMA_ERR_LNK_CTRL_TX_0  = (1 << 21),    /* transient: CRC err */
 341        EDMA_ERR_LNK_CTRL_TX_1  = (1 << 22),    /* transient: FIFO err */
 342        EDMA_ERR_LNK_CTRL_TX_2  = (1 << 23),    /* transient: caught SYNC */
 343        EDMA_ERR_LNK_CTRL_TX_3  = (1 << 24),    /* transient: caught DMAT */
 344        EDMA_ERR_LNK_CTRL_TX_4  = (1 << 25),    /* transient: FIS collision */
 345
 346        EDMA_ERR_LNK_DATA_TX    = (0x1f << 26), /* link data tx error */
 347
 348        EDMA_ERR_TRANS_PROTO    = (1 << 31),    /* transport protocol error */
 349        EDMA_ERR_OVERRUN_5      = (1 << 5),
 350        EDMA_ERR_UNDERRUN_5     = (1 << 6),
 351
 352        EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
 353                                  EDMA_ERR_LNK_CTRL_RX_1 |
 354                                  EDMA_ERR_LNK_CTRL_RX_3 |
 355                                  EDMA_ERR_LNK_CTRL_TX,
 356
 357        EDMA_EH_FREEZE          = EDMA_ERR_D_PAR |
 358                                  EDMA_ERR_PRD_PAR |
 359                                  EDMA_ERR_DEV_DCON |
 360                                  EDMA_ERR_DEV_CON |
 361                                  EDMA_ERR_SERR |
 362                                  EDMA_ERR_SELF_DIS |
 363                                  EDMA_ERR_CRQB_PAR |
 364                                  EDMA_ERR_CRPB_PAR |
 365                                  EDMA_ERR_INTRL_PAR |
 366                                  EDMA_ERR_IORDY |
 367                                  EDMA_ERR_LNK_CTRL_RX_2 |
 368                                  EDMA_ERR_LNK_DATA_RX |
 369                                  EDMA_ERR_LNK_DATA_TX |
 370                                  EDMA_ERR_TRANS_PROTO,
 371
 372        EDMA_EH_FREEZE_5        = EDMA_ERR_D_PAR |
 373                                  EDMA_ERR_PRD_PAR |
 374                                  EDMA_ERR_DEV_DCON |
 375                                  EDMA_ERR_DEV_CON |
 376                                  EDMA_ERR_OVERRUN_5 |
 377                                  EDMA_ERR_UNDERRUN_5 |
 378                                  EDMA_ERR_SELF_DIS_5 |
 379                                  EDMA_ERR_CRQB_PAR |
 380                                  EDMA_ERR_CRPB_PAR |
 381                                  EDMA_ERR_INTRL_PAR |
 382                                  EDMA_ERR_IORDY,
 383
 384        EDMA_REQ_Q_BASE_HI      = 0x10,
 385        EDMA_REQ_Q_IN_PTR       = 0x14,         /* also contains BASE_LO */
 386
 387        EDMA_REQ_Q_OUT_PTR      = 0x18,
 388        EDMA_REQ_Q_PTR_SHIFT    = 5,
 389
 390        EDMA_RSP_Q_BASE_HI      = 0x1c,
 391        EDMA_RSP_Q_IN_PTR       = 0x20,
 392        EDMA_RSP_Q_OUT_PTR      = 0x24,         /* also contains BASE_LO */
 393        EDMA_RSP_Q_PTR_SHIFT    = 3,
 394
 395        EDMA_CMD                = 0x28,         /* EDMA command register */
 396        EDMA_EN                 = (1 << 0),     /* enable EDMA */
 397        EDMA_DS                 = (1 << 1),     /* disable EDMA; self-negated */
 398        EDMA_RESET              = (1 << 2),     /* reset eng/trans/link/phy */
 399
 400        EDMA_STATUS             = 0x30,         /* EDMA engine status */
 401        EDMA_STATUS_CACHE_EMPTY = (1 << 6),     /* GenIIe command cache empty */
 402        EDMA_STATUS_IDLE        = (1 << 7),     /* GenIIe EDMA enabled/idle */
 403
 404        EDMA_IORDY_TMOUT        = 0x34,
 405        EDMA_ARB_CFG            = 0x38,
 406
 407        EDMA_HALTCOND           = 0x60,         /* GenIIe halt conditions */
 408        EDMA_UNKNOWN_RSVD       = 0x6C,         /* GenIIe unknown/reserved */
 409
 410        BMDMA_CMD               = 0x224,        /* bmdma command register */
 411        BMDMA_STATUS            = 0x228,        /* bmdma status register */
 412        BMDMA_PRD_LOW           = 0x22c,        /* bmdma PRD addr 31:0 */
 413        BMDMA_PRD_HIGH          = 0x230,        /* bmdma PRD addr 63:32 */
 414
 415        /* Host private flags (hp_flags) */
 416        MV_HP_FLAG_MSI          = (1 << 0),
 417        MV_HP_ERRATA_50XXB0     = (1 << 1),
 418        MV_HP_ERRATA_50XXB2     = (1 << 2),
 419        MV_HP_ERRATA_60X1B2     = (1 << 3),
 420        MV_HP_ERRATA_60X1C0     = (1 << 4),
 421        MV_HP_GEN_I             = (1 << 6),     /* Generation I: 50xx */
 422        MV_HP_GEN_II            = (1 << 7),     /* Generation II: 60xx */
 423        MV_HP_GEN_IIE           = (1 << 8),     /* Generation IIE: 6042/7042 */
 424        MV_HP_PCIE              = (1 << 9),     /* PCIe bus/regs: 7042 */
 425        MV_HP_CUT_THROUGH       = (1 << 10),    /* can use EDMA cut-through */
 426        MV_HP_FLAG_SOC          = (1 << 11),    /* SystemOnChip, no PCI */
 427        MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),   /* is led blinking enabled? */
 428        MV_HP_FIX_LP_PHY_CTL    = (1 << 13),    /* fix speed in LP_PHY_CTL ? */
 429
 430        /* Port private flags (pp_flags) */
 431        MV_PP_FLAG_EDMA_EN      = (1 << 0),     /* is EDMA engine enabled? */
 432        MV_PP_FLAG_NCQ_EN       = (1 << 1),     /* is EDMA set up for NCQ? */
 433        MV_PP_FLAG_FBS_EN       = (1 << 2),     /* is EDMA set up for FBS? */
 434        MV_PP_FLAG_DELAYED_EH   = (1 << 3),     /* delayed dev err handling */
 435        MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),    /* ignore initial ATA_DRDY */
 436};
 437
 438#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
 439#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
 440#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
 441#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
 442#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
 443
 444#define WINDOW_CTRL(i)          (0x20030 + ((i) << 4))
 445#define WINDOW_BASE(i)          (0x20034 + ((i) << 4))
 446
 447enum {
 448        /* DMA boundary 0xffff is required by the s/g splitting
 449         * we need on /length/ in mv_fill-sg().
 450         */
 451        MV_DMA_BOUNDARY         = 0xffffU,
 452
 453        /* mask of register bits containing lower 32 bits
 454         * of EDMA request queue DMA address
 455         */
 456        EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
 457
 458        /* ditto, for response queue */
 459        EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
 460};
 461
 462enum chip_type {
 463        chip_504x,
 464        chip_508x,
 465        chip_5080,
 466        chip_604x,
 467        chip_608x,
 468        chip_6042,
 469        chip_7042,
 470        chip_soc,
 471};
 472
 473/* Command ReQuest Block: 32B */
 474struct mv_crqb {
 475        __le32                  sg_addr;
 476        __le32                  sg_addr_hi;
 477        __le16                  ctrl_flags;
 478        __le16                  ata_cmd[11];
 479};
 480
 481struct mv_crqb_iie {
 482        __le32                  addr;
 483        __le32                  addr_hi;
 484        __le32                  flags;
 485        __le32                  len;
 486        __le32                  ata_cmd[4];
 487};
 488
 489/* Command ResPonse Block: 8B */
 490struct mv_crpb {
 491        __le16                  id;
 492        __le16                  flags;
 493        __le32                  tmstmp;
 494};
 495
 496/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
 497struct mv_sg {
 498        __le32                  addr;
 499        __le32                  flags_size;
 500        __le32                  addr_hi;
 501        __le32                  reserved;
 502};
 503
 504/*
 505 * We keep a local cache of a few frequently accessed port
 506 * registers here, to avoid having to read them (very slow)
 507 * when switching between EDMA and non-EDMA modes.
 508 */
 509struct mv_cached_regs {
 510        u32                     fiscfg;
 511        u32                     ltmode;
 512        u32                     haltcond;
 513        u32                     unknown_rsvd;
 514};
 515
 516struct mv_port_priv {
 517        struct mv_crqb          *crqb;
 518        dma_addr_t              crqb_dma;
 519        struct mv_crpb          *crpb;
 520        dma_addr_t              crpb_dma;
 521        struct mv_sg            *sg_tbl[MV_MAX_Q_DEPTH];
 522        dma_addr_t              sg_tbl_dma[MV_MAX_Q_DEPTH];
 523
 524        unsigned int            req_idx;
 525        unsigned int            resp_idx;
 526
 527        u32                     pp_flags;
 528        struct mv_cached_regs   cached;
 529        unsigned int            delayed_eh_pmp_map;
 530};
 531
 532struct mv_port_signal {
 533        u32                     amps;
 534        u32                     pre;
 535};
 536
 537struct mv_host_priv {
 538        u32                     hp_flags;
 539        unsigned int            board_idx;
 540        u32                     main_irq_mask;
 541        struct mv_port_signal   signal[8];
 542        const struct mv_hw_ops  *ops;
 543        int                     n_ports;
 544        void __iomem            *base;
 545        void __iomem            *main_irq_cause_addr;
 546        void __iomem            *main_irq_mask_addr;
 547        u32                     irq_cause_offset;
 548        u32                     irq_mask_offset;
 549        u32                     unmask_all_irqs;
 550
 551        /*
 552         * Needed on some devices that require their clocks to be enabled.
 553         * These are optional: if the platform device does not have any
 554         * clocks, they won't be used.  Also, if the underlying hardware
 555         * does not support the common clock framework (CONFIG_HAVE_CLK=n),
 556         * all the clock operations become no-ops (see clk.h).
 557         */
 558        struct clk              *clk;
 559        struct clk              **port_clks;
 560        /*
 561         * Some devices have a SATA PHY which can be enabled/disabled
 562         * in order to save power. These are optional: if the platform
 563         * devices does not have any phy, they won't be used.
 564         */
 565        struct phy              **port_phys;
 566        /*
 567         * These consistent DMA memory pools give us guaranteed
 568         * alignment for hardware-accessed data structures,
 569         * and less memory waste in accomplishing the alignment.
 570         */
 571        struct dma_pool         *crqb_pool;
 572        struct dma_pool         *crpb_pool;
 573        struct dma_pool         *sg_tbl_pool;
 574};
 575
 576struct mv_hw_ops {
 577        void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
 578                           unsigned int port);
 579        void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
 580        void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
 581                           void __iomem *mmio);
 582        int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
 583                        unsigned int n_hc);
 584        void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
 585        void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
 586};
 587
 588static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
 589static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
 590static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
 591static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
 592static int mv_port_start(struct ata_port *ap);
 593static void mv_port_stop(struct ata_port *ap);
 594static int mv_qc_defer(struct ata_queued_cmd *qc);
 595static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
 596static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
 597static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
 598static int mv_hardreset(struct ata_link *link, unsigned int *class,
 599                        unsigned long deadline);
 600static void mv_eh_freeze(struct ata_port *ap);
 601static void mv_eh_thaw(struct ata_port *ap);
 602static void mv6_dev_config(struct ata_device *dev);
 603
 604static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
 605                           unsigned int port);
 606static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
 607static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
 608                           void __iomem *mmio);
 609static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
 610                        unsigned int n_hc);
 611static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
 612static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
 613
 614static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
 615                           unsigned int port);
 616static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
 617static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
 618                           void __iomem *mmio);
 619static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
 620                        unsigned int n_hc);
 621static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
 622static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
 623                                      void __iomem *mmio);
 624static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
 625                                      void __iomem *mmio);
 626static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
 627                                  void __iomem *mmio, unsigned int n_hc);
 628static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
 629                                      void __iomem *mmio);
 630static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
 631static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
 632                                  void __iomem *mmio, unsigned int port);
 633static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
 634static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
 635                             unsigned int port_no);
 636static int mv_stop_edma(struct ata_port *ap);
 637static int mv_stop_edma_engine(void __iomem *port_mmio);
 638static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
 639
 640static void mv_pmp_select(struct ata_port *ap, int pmp);
 641static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
 642                                unsigned long deadline);
 643static int  mv_softreset(struct ata_link *link, unsigned int *class,
 644                                unsigned long deadline);
 645static void mv_pmp_error_handler(struct ata_port *ap);
 646static void mv_process_crpb_entries(struct ata_port *ap,
 647                                        struct mv_port_priv *pp);
 648
 649static void mv_sff_irq_clear(struct ata_port *ap);
 650static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
 651static void mv_bmdma_setup(struct ata_queued_cmd *qc);
 652static void mv_bmdma_start(struct ata_queued_cmd *qc);
 653static void mv_bmdma_stop(struct ata_queued_cmd *qc);
 654static u8   mv_bmdma_status(struct ata_port *ap);
 655static u8 mv_sff_check_status(struct ata_port *ap);
 656
 657/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
 658 * because we have to allow room for worst case splitting of
 659 * PRDs for 64K boundaries in mv_fill_sg().
 660 */
 661#ifdef CONFIG_PCI
 662static struct scsi_host_template mv5_sht = {
 663        ATA_BASE_SHT(DRV_NAME),
 664        .sg_tablesize           = MV_MAX_SG_CT / 2,
 665        .dma_boundary           = MV_DMA_BOUNDARY,
 666};
 667#endif
 668static struct scsi_host_template mv6_sht = {
 669        __ATA_BASE_SHT(DRV_NAME),
 670        .can_queue              = MV_MAX_Q_DEPTH - 1,
 671        .sg_tablesize           = MV_MAX_SG_CT / 2,
 672        .dma_boundary           = MV_DMA_BOUNDARY,
 673        .sdev_groups            = ata_ncq_sdev_groups,
 674        .change_queue_depth     = ata_scsi_change_queue_depth,
 675        .tag_alloc_policy       = BLK_TAG_ALLOC_RR,
 676        .slave_configure        = ata_scsi_slave_config
 677};
 678
 679static struct ata_port_operations mv5_ops = {
 680        .inherits               = &ata_sff_port_ops,
 681
 682        .lost_interrupt         = ATA_OP_NULL,
 683
 684        .qc_defer               = mv_qc_defer,
 685        .qc_prep                = mv_qc_prep,
 686        .qc_issue               = mv_qc_issue,
 687
 688        .freeze                 = mv_eh_freeze,
 689        .thaw                   = mv_eh_thaw,
 690        .hardreset              = mv_hardreset,
 691
 692        .scr_read               = mv5_scr_read,
 693        .scr_write              = mv5_scr_write,
 694
 695        .port_start             = mv_port_start,
 696        .port_stop              = mv_port_stop,
 697};
 698
 699static struct ata_port_operations mv6_ops = {
 700        .inherits               = &ata_bmdma_port_ops,
 701
 702        .lost_interrupt         = ATA_OP_NULL,
 703
 704        .qc_defer               = mv_qc_defer,
 705        .qc_prep                = mv_qc_prep,
 706        .qc_issue               = mv_qc_issue,
 707
 708        .dev_config             = mv6_dev_config,
 709
 710        .freeze                 = mv_eh_freeze,
 711        .thaw                   = mv_eh_thaw,
 712        .hardreset              = mv_hardreset,
 713        .softreset              = mv_softreset,
 714        .pmp_hardreset          = mv_pmp_hardreset,
 715        .pmp_softreset          = mv_softreset,
 716        .error_handler          = mv_pmp_error_handler,
 717
 718        .scr_read               = mv_scr_read,
 719        .scr_write              = mv_scr_write,
 720
 721        .sff_check_status       = mv_sff_check_status,
 722        .sff_irq_clear          = mv_sff_irq_clear,
 723        .check_atapi_dma        = mv_check_atapi_dma,
 724        .bmdma_setup            = mv_bmdma_setup,
 725        .bmdma_start            = mv_bmdma_start,
 726        .bmdma_stop             = mv_bmdma_stop,
 727        .bmdma_status           = mv_bmdma_status,
 728
 729        .port_start             = mv_port_start,
 730        .port_stop              = mv_port_stop,
 731};
 732
 733static struct ata_port_operations mv_iie_ops = {
 734        .inherits               = &mv6_ops,
 735        .dev_config             = ATA_OP_NULL,
 736        .qc_prep                = mv_qc_prep_iie,
 737};
 738
 739static const struct ata_port_info mv_port_info[] = {
 740        {  /* chip_504x */
 741                .flags          = MV_GEN_I_FLAGS,
 742                .pio_mask       = ATA_PIO4,
 743                .udma_mask      = ATA_UDMA6,
 744                .port_ops       = &mv5_ops,
 745        },
 746        {  /* chip_508x */
 747                .flags          = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
 748                .pio_mask       = ATA_PIO4,
 749                .udma_mask      = ATA_UDMA6,
 750                .port_ops       = &mv5_ops,
 751        },
 752        {  /* chip_5080 */
 753                .flags          = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
 754                .pio_mask       = ATA_PIO4,
 755                .udma_mask      = ATA_UDMA6,
 756                .port_ops       = &mv5_ops,
 757        },
 758        {  /* chip_604x */
 759                .flags          = MV_GEN_II_FLAGS,
 760                .pio_mask       = ATA_PIO4,
 761                .udma_mask      = ATA_UDMA6,
 762                .port_ops       = &mv6_ops,
 763        },
 764        {  /* chip_608x */
 765                .flags          = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
 766                .pio_mask       = ATA_PIO4,
 767                .udma_mask      = ATA_UDMA6,
 768                .port_ops       = &mv6_ops,
 769        },
 770        {  /* chip_6042 */
 771                .flags          = MV_GEN_IIE_FLAGS,
 772                .pio_mask       = ATA_PIO4,
 773                .udma_mask      = ATA_UDMA6,
 774                .port_ops       = &mv_iie_ops,
 775        },
 776        {  /* chip_7042 */
 777                .flags          = MV_GEN_IIE_FLAGS,
 778                .pio_mask       = ATA_PIO4,
 779                .udma_mask      = ATA_UDMA6,
 780                .port_ops       = &mv_iie_ops,
 781        },
 782        {  /* chip_soc */
 783                .flags          = MV_GEN_IIE_FLAGS,
 784                .pio_mask       = ATA_PIO4,
 785                .udma_mask      = ATA_UDMA6,
 786                .port_ops       = &mv_iie_ops,
 787        },
 788};
 789
 790static const struct pci_device_id mv_pci_tbl[] = {
 791        { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
 792        { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
 793        { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
 794        { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
 795        /* RocketRAID 1720/174x have different identifiers */
 796        { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
 797        { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
 798        { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
 799
 800        { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
 801        { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
 802        { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
 803        { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
 804        { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
 805
 806        { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
 807
 808        /* Adaptec 1430SA */
 809        { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
 810
 811        /* Marvell 7042 support */
 812        { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
 813
 814        /* Highpoint RocketRAID PCIe series */
 815        { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
 816        { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
 817
 818        { }                     /* terminate list */
 819};
 820
 821static const struct mv_hw_ops mv5xxx_ops = {
 822        .phy_errata             = mv5_phy_errata,
 823        .enable_leds            = mv5_enable_leds,
 824        .read_preamp            = mv5_read_preamp,
 825        .reset_hc               = mv5_reset_hc,
 826        .reset_flash            = mv5_reset_flash,
 827        .reset_bus              = mv5_reset_bus,
 828};
 829
 830static const struct mv_hw_ops mv6xxx_ops = {
 831        .phy_errata             = mv6_phy_errata,
 832        .enable_leds            = mv6_enable_leds,
 833        .read_preamp            = mv6_read_preamp,
 834        .reset_hc               = mv6_reset_hc,
 835        .reset_flash            = mv6_reset_flash,
 836        .reset_bus              = mv_reset_pci_bus,
 837};
 838
 839static const struct mv_hw_ops mv_soc_ops = {
 840        .phy_errata             = mv6_phy_errata,
 841        .enable_leds            = mv_soc_enable_leds,
 842        .read_preamp            = mv_soc_read_preamp,
 843        .reset_hc               = mv_soc_reset_hc,
 844        .reset_flash            = mv_soc_reset_flash,
 845        .reset_bus              = mv_soc_reset_bus,
 846};
 847
 848static const struct mv_hw_ops mv_soc_65n_ops = {
 849        .phy_errata             = mv_soc_65n_phy_errata,
 850        .enable_leds            = mv_soc_enable_leds,
 851        .reset_hc               = mv_soc_reset_hc,
 852        .reset_flash            = mv_soc_reset_flash,
 853        .reset_bus              = mv_soc_reset_bus,
 854};
 855
 856/*
 857 * Functions
 858 */
 859
 860static inline void writelfl(unsigned long data, void __iomem *addr)
 861{
 862        writel(data, addr);
 863        (void) readl(addr);     /* flush to avoid PCI posted write */
 864}
 865
 866static inline unsigned int mv_hc_from_port(unsigned int port)
 867{
 868        return port >> MV_PORT_HC_SHIFT;
 869}
 870
 871static inline unsigned int mv_hardport_from_port(unsigned int port)
 872{
 873        return port & MV_PORT_MASK;
 874}
 875
 876/*
 877 * Consolidate some rather tricky bit shift calculations.
 878 * This is hot-path stuff, so not a function.
 879 * Simple code, with two return values, so macro rather than inline.
 880 *
 881 * port is the sole input, in range 0..7.
 882 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
 883 * hardport is the other output, in range 0..3.
 884 *
 885 * Note that port and hardport may be the same variable in some cases.
 886 */
 887#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)    \
 888{                                                               \
 889        shift    = mv_hc_from_port(port) * HC_SHIFT;            \
 890        hardport = mv_hardport_from_port(port);                 \
 891        shift   += hardport * 2;                                \
 892}
 893
 894static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
 895{
 896        return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
 897}
 898
 899static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
 900                                                 unsigned int port)
 901{
 902        return mv_hc_base(base, mv_hc_from_port(port));
 903}
 904
 905static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
 906{
 907        return  mv_hc_base_from_port(base, port) +
 908                MV_SATAHC_ARBTR_REG_SZ +
 909                (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
 910}
 911
 912static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
 913{
 914        void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
 915        unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
 916
 917        return hc_mmio + ofs;
 918}
 919
 920static inline void __iomem *mv_host_base(struct ata_host *host)
 921{
 922        struct mv_host_priv *hpriv = host->private_data;
 923        return hpriv->base;
 924}
 925
 926static inline void __iomem *mv_ap_base(struct ata_port *ap)
 927{
 928        return mv_port_base(mv_host_base(ap->host), ap->port_no);
 929}
 930
 931static inline int mv_get_hc_count(unsigned long port_flags)
 932{
 933        return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
 934}
 935
 936/**
 937 *      mv_save_cached_regs - (re-)initialize cached port registers
 938 *      @ap: the port whose registers we are caching
 939 *
 940 *      Initialize the local cache of port registers,
 941 *      so that reading them over and over again can
 942 *      be avoided on the hotter paths of this driver.
 943 *      This saves a few microseconds each time we switch
 944 *      to/from EDMA mode to perform (eg.) a drive cache flush.
 945 */
 946static void mv_save_cached_regs(struct ata_port *ap)
 947{
 948        void __iomem *port_mmio = mv_ap_base(ap);
 949        struct mv_port_priv *pp = ap->private_data;
 950
 951        pp->cached.fiscfg = readl(port_mmio + FISCFG);
 952        pp->cached.ltmode = readl(port_mmio + LTMODE);
 953        pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
 954        pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
 955}
 956
 957/**
 958 *      mv_write_cached_reg - write to a cached port register
 959 *      @addr: hardware address of the register
 960 *      @old: pointer to cached value of the register
 961 *      @new: new value for the register
 962 *
 963 *      Write a new value to a cached register,
 964 *      but only if the value is different from before.
 965 */
 966static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
 967{
 968        if (new != *old) {
 969                unsigned long laddr;
 970                *old = new;
 971                /*
 972                 * Workaround for 88SX60x1-B2 FEr SATA#13:
 973                 * Read-after-write is needed to prevent generating 64-bit
 974                 * write cycles on the PCI bus for SATA interface registers
 975                 * at offsets ending in 0x4 or 0xc.
 976                 *
 977                 * Looks like a lot of fuss, but it avoids an unnecessary
 978                 * +1 usec read-after-write delay for unaffected registers.
 979                 */
 980                laddr = (unsigned long)addr & 0xffff;
 981                if (laddr >= 0x300 && laddr <= 0x33c) {
 982                        laddr &= 0x000f;
 983                        if (laddr == 0x4 || laddr == 0xc) {
 984                                writelfl(new, addr); /* read after write */
 985                                return;
 986                        }
 987                }
 988                writel(new, addr); /* unaffected by the errata */
 989        }
 990}
 991
 992static void mv_set_edma_ptrs(void __iomem *port_mmio,
 993                             struct mv_host_priv *hpriv,
 994                             struct mv_port_priv *pp)
 995{
 996        u32 index;
 997
 998        /*
 999         * initialize request queue
1000         */
1001        pp->req_idx &= MV_MAX_Q_DEPTH_MASK;     /* paranoia */
1002        index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
1003
1004        WARN_ON(pp->crqb_dma & 0x3ff);
1005        writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
1006        writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
1007                 port_mmio + EDMA_REQ_Q_IN_PTR);
1008        writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
1009
1010        /*
1011         * initialize response queue
1012         */
1013        pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;    /* paranoia */
1014        index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1015
1016        WARN_ON(pp->crpb_dma & 0xff);
1017        writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1018        writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1019        writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1020                 port_mmio + EDMA_RSP_Q_OUT_PTR);
1021}
1022
1023static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1024{
1025        /*
1026         * When writing to the main_irq_mask in hardware,
1027         * we must ensure exclusivity between the interrupt coalescing bits
1028         * and the corresponding individual port DONE_IRQ bits.
1029         *
1030         * Note that this register is really an "IRQ enable" register,
1031         * not an "IRQ mask" register as Marvell's naming might suggest.
1032         */
1033        if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1034                mask &= ~DONE_IRQ_0_3;
1035        if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1036                mask &= ~DONE_IRQ_4_7;
1037        writelfl(mask, hpriv->main_irq_mask_addr);
1038}
1039
1040static void mv_set_main_irq_mask(struct ata_host *host,
1041                                 u32 disable_bits, u32 enable_bits)
1042{
1043        struct mv_host_priv *hpriv = host->private_data;
1044        u32 old_mask, new_mask;
1045
1046        old_mask = hpriv->main_irq_mask;
1047        new_mask = (old_mask & ~disable_bits) | enable_bits;
1048        if (new_mask != old_mask) {
1049                hpriv->main_irq_mask = new_mask;
1050                mv_write_main_irq_mask(new_mask, hpriv);
1051        }
1052}
1053
1054static void mv_enable_port_irqs(struct ata_port *ap,
1055                                     unsigned int port_bits)
1056{
1057        unsigned int shift, hardport, port = ap->port_no;
1058        u32 disable_bits, enable_bits;
1059
1060        MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1061
1062        disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1063        enable_bits  = port_bits << shift;
1064        mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1065}
1066
1067static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1068                                          void __iomem *port_mmio,
1069                                          unsigned int port_irqs)
1070{
1071        struct mv_host_priv *hpriv = ap->host->private_data;
1072        int hardport = mv_hardport_from_port(ap->port_no);
1073        void __iomem *hc_mmio = mv_hc_base_from_port(
1074                                mv_host_base(ap->host), ap->port_no);
1075        u32 hc_irq_cause;
1076
1077        /* clear EDMA event indicators, if any */
1078        writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1079
1080        /* clear pending irq events */
1081        hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1082        writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1083
1084        /* clear FIS IRQ Cause */
1085        if (IS_GEN_IIE(hpriv))
1086                writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1087
1088        mv_enable_port_irqs(ap, port_irqs);
1089}
1090
1091static void mv_set_irq_coalescing(struct ata_host *host,
1092                                  unsigned int count, unsigned int usecs)
1093{
1094        struct mv_host_priv *hpriv = host->private_data;
1095        void __iomem *mmio = hpriv->base, *hc_mmio;
1096        u32 coal_enable = 0;
1097        unsigned long flags;
1098        unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1099        const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1100                                                        ALL_PORTS_COAL_DONE;
1101
1102        /* Disable IRQ coalescing if either threshold is zero */
1103        if (!usecs || !count) {
1104                clks = count = 0;
1105        } else {
1106                /* Respect maximum limits of the hardware */
1107                clks = usecs * COAL_CLOCKS_PER_USEC;
1108                if (clks > MAX_COAL_TIME_THRESHOLD)
1109                        clks = MAX_COAL_TIME_THRESHOLD;
1110                if (count > MAX_COAL_IO_COUNT)
1111                        count = MAX_COAL_IO_COUNT;
1112        }
1113
1114        spin_lock_irqsave(&host->lock, flags);
1115        mv_set_main_irq_mask(host, coal_disable, 0);
1116
1117        if (is_dual_hc && !IS_GEN_I(hpriv)) {
1118                /*
1119                 * GEN_II/GEN_IIE with dual host controllers:
1120                 * one set of global thresholds for the entire chip.
1121                 */
1122                writel(clks,  mmio + IRQ_COAL_TIME_THRESHOLD);
1123                writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1124                /* clear leftover coal IRQ bit */
1125                writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1126                if (count)
1127                        coal_enable = ALL_PORTS_COAL_DONE;
1128                clks = count = 0; /* force clearing of regular regs below */
1129        }
1130
1131        /*
1132         * All chips: independent thresholds for each HC on the chip.
1133         */
1134        hc_mmio = mv_hc_base_from_port(mmio, 0);
1135        writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1136        writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1137        writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1138        if (count)
1139                coal_enable |= PORTS_0_3_COAL_DONE;
1140        if (is_dual_hc) {
1141                hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1142                writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1143                writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1144                writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1145                if (count)
1146                        coal_enable |= PORTS_4_7_COAL_DONE;
1147        }
1148
1149        mv_set_main_irq_mask(host, 0, coal_enable);
1150        spin_unlock_irqrestore(&host->lock, flags);
1151}
1152
1153/*
1154 *      mv_start_edma - Enable eDMA engine
1155 *      @pp: port private data
1156 *
1157 *      Verify the local cache of the eDMA state is accurate with a
1158 *      WARN_ON.
1159 *
1160 *      LOCKING:
1161 *      Inherited from caller.
1162 */
1163static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1164                         struct mv_port_priv *pp, u8 protocol)
1165{
1166        int want_ncq = (protocol == ATA_PROT_NCQ);
1167
1168        if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1169                int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1170                if (want_ncq != using_ncq)
1171                        mv_stop_edma(ap);
1172        }
1173        if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1174                struct mv_host_priv *hpriv = ap->host->private_data;
1175
1176                mv_edma_cfg(ap, want_ncq, 1);
1177
1178                mv_set_edma_ptrs(port_mmio, hpriv, pp);
1179                mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1180
1181                writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1182                pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1183        }
1184}
1185
1186static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1187{
1188        void __iomem *port_mmio = mv_ap_base(ap);
1189        const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1190        const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1191        int i;
1192
1193        /*
1194         * Wait for the EDMA engine to finish transactions in progress.
1195         * No idea what a good "timeout" value might be, but measurements
1196         * indicate that it often requires hundreds of microseconds
1197         * with two drives in-use.  So we use the 15msec value above
1198         * as a rough guess at what even more drives might require.
1199         */
1200        for (i = 0; i < timeout; ++i) {
1201                u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1202                if ((edma_stat & empty_idle) == empty_idle)
1203                        break;
1204                udelay(per_loop);
1205        }
1206        /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
1207}
1208
1209/**
1210 *      mv_stop_edma_engine - Disable eDMA engine
1211 *      @port_mmio: io base address
1212 *
1213 *      LOCKING:
1214 *      Inherited from caller.
1215 */
1216static int mv_stop_edma_engine(void __iomem *port_mmio)
1217{
1218        int i;
1219
1220        /* Disable eDMA.  The disable bit auto clears. */
1221        writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1222
1223        /* Wait for the chip to confirm eDMA is off. */
1224        for (i = 10000; i > 0; i--) {
1225                u32 reg = readl(port_mmio + EDMA_CMD);
1226                if (!(reg & EDMA_EN))
1227                        return 0;
1228                udelay(10);
1229        }
1230        return -EIO;
1231}
1232
1233static int mv_stop_edma(struct ata_port *ap)
1234{
1235        void __iomem *port_mmio = mv_ap_base(ap);
1236        struct mv_port_priv *pp = ap->private_data;
1237        int err = 0;
1238
1239        if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1240                return 0;
1241        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1242        mv_wait_for_edma_empty_idle(ap);
1243        if (mv_stop_edma_engine(port_mmio)) {
1244                ata_port_err(ap, "Unable to stop eDMA\n");
1245                err = -EIO;
1246        }
1247        mv_edma_cfg(ap, 0, 0);
1248        return err;
1249}
1250
1251#ifdef ATA_DEBUG
1252static void mv_dump_mem(void __iomem *start, unsigned bytes)
1253{
1254        int b, w;
1255        for (b = 0; b < bytes; ) {
1256                DPRINTK("%p: ", start + b);
1257                for (w = 0; b < bytes && w < 4; w++) {
1258                        printk("%08x ", readl(start + b));
1259                        b += sizeof(u32);
1260                }
1261                printk("\n");
1262        }
1263}
1264#endif
1265#if defined(ATA_DEBUG) || defined(CONFIG_PCI)
1266static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1267{
1268#ifdef ATA_DEBUG
1269        int b, w;
1270        u32 dw;
1271        for (b = 0; b < bytes; ) {
1272                DPRINTK("%02x: ", b);
1273                for (w = 0; b < bytes && w < 4; w++) {
1274                        (void) pci_read_config_dword(pdev, b, &dw);
1275                        printk("%08x ", dw);
1276                        b += sizeof(u32);
1277                }
1278                printk("\n");
1279        }
1280#endif
1281}
1282#endif
1283static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1284                             struct pci_dev *pdev)
1285{
1286#ifdef ATA_DEBUG
1287        void __iomem *hc_base = mv_hc_base(mmio_base,
1288                                           port >> MV_PORT_HC_SHIFT);
1289        void __iomem *port_base;
1290        int start_port, num_ports, p, start_hc, num_hcs, hc;
1291
1292        if (0 > port) {
1293                start_hc = start_port = 0;
1294                num_ports = 8;          /* shld be benign for 4 port devs */
1295                num_hcs = 2;
1296        } else {
1297                start_hc = port >> MV_PORT_HC_SHIFT;
1298                start_port = port;
1299                num_ports = num_hcs = 1;
1300        }
1301        DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1302                num_ports > 1 ? num_ports - 1 : start_port);
1303
1304        if (NULL != pdev) {
1305                DPRINTK("PCI config space regs:\n");
1306                mv_dump_pci_cfg(pdev, 0x68);
1307        }
1308        DPRINTK("PCI regs:\n");
1309        mv_dump_mem(mmio_base+0xc00, 0x3c);
1310        mv_dump_mem(mmio_base+0xd00, 0x34);
1311        mv_dump_mem(mmio_base+0xf00, 0x4);
1312        mv_dump_mem(mmio_base+0x1d00, 0x6c);
1313        for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1314                hc_base = mv_hc_base(mmio_base, hc);
1315                DPRINTK("HC regs (HC %i):\n", hc);
1316                mv_dump_mem(hc_base, 0x1c);
1317        }
1318        for (p = start_port; p < start_port + num_ports; p++) {
1319                port_base = mv_port_base(mmio_base, p);
1320                DPRINTK("EDMA regs (port %i):\n", p);
1321                mv_dump_mem(port_base, 0x54);
1322                DPRINTK("SATA regs (port %i):\n", p);
1323                mv_dump_mem(port_base+0x300, 0x60);
1324        }
1325#endif
1326}
1327
1328static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1329{
1330        unsigned int ofs;
1331
1332        switch (sc_reg_in) {
1333        case SCR_STATUS:
1334        case SCR_CONTROL:
1335        case SCR_ERROR:
1336                ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1337                break;
1338        case SCR_ACTIVE:
1339                ofs = SATA_ACTIVE;   /* active is not with the others */
1340                break;
1341        default:
1342                ofs = 0xffffffffU;
1343                break;
1344        }
1345        return ofs;
1346}
1347
1348static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1349{
1350        unsigned int ofs = mv_scr_offset(sc_reg_in);
1351
1352        if (ofs != 0xffffffffU) {
1353                *val = readl(mv_ap_base(link->ap) + ofs);
1354                return 0;
1355        } else
1356                return -EINVAL;
1357}
1358
1359static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1360{
1361        unsigned int ofs = mv_scr_offset(sc_reg_in);
1362
1363        if (ofs != 0xffffffffU) {
1364                void __iomem *addr = mv_ap_base(link->ap) + ofs;
1365                struct mv_host_priv *hpriv = link->ap->host->private_data;
1366                if (sc_reg_in == SCR_CONTROL) {
1367                        /*
1368                         * Workaround for 88SX60x1 FEr SATA#26:
1369                         *
1370                         * COMRESETs have to take care not to accidentally
1371                         * put the drive to sleep when writing SCR_CONTROL.
1372                         * Setting bits 12..15 prevents this problem.
1373                         *
1374                         * So if we see an outbound COMMRESET, set those bits.
1375                         * Ditto for the followup write that clears the reset.
1376                         *
1377                         * The proprietary driver does this for
1378                         * all chip versions, and so do we.
1379                         */
1380                        if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1381                                val |= 0xf000;
1382
1383                        if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
1384                                void __iomem *lp_phy_addr =
1385                                        mv_ap_base(link->ap) + LP_PHY_CTL;
1386                                /*
1387                                 * Set PHY speed according to SControl speed.
1388                                 */
1389                                u32 lp_phy_val =
1390                                        LP_PHY_CTL_PIN_PU_PLL |
1391                                        LP_PHY_CTL_PIN_PU_RX  |
1392                                        LP_PHY_CTL_PIN_PU_TX;
1393
1394                                if ((val & 0xf0) != 0x10)
1395                                        lp_phy_val |=
1396                                                LP_PHY_CTL_GEN_TX_3G |
1397                                                LP_PHY_CTL_GEN_RX_3G;
1398
1399                                writelfl(lp_phy_val, lp_phy_addr);
1400                        }
1401                }
1402                writelfl(val, addr);
1403                return 0;
1404        } else
1405                return -EINVAL;
1406}
1407
1408static void mv6_dev_config(struct ata_device *adev)
1409{
1410        /*
1411         * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1412         *
1413         * Gen-II does not support NCQ over a port multiplier
1414         *  (no FIS-based switching).
1415         */
1416        if (adev->flags & ATA_DFLAG_NCQ) {
1417                if (sata_pmp_attached(adev->link->ap)) {
1418                        adev->flags &= ~ATA_DFLAG_NCQ;
1419                        ata_dev_info(adev,
1420                                "NCQ disabled for command-based switching\n");
1421                }
1422        }
1423}
1424
1425static int mv_qc_defer(struct ata_queued_cmd *qc)
1426{
1427        struct ata_link *link = qc->dev->link;
1428        struct ata_port *ap = link->ap;
1429        struct mv_port_priv *pp = ap->private_data;
1430
1431        /*
1432         * Don't allow new commands if we're in a delayed EH state
1433         * for NCQ and/or FIS-based switching.
1434         */
1435        if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1436                return ATA_DEFER_PORT;
1437
1438        /* PIO commands need exclusive link: no other commands [DMA or PIO]
1439         * can run concurrently.
1440         * set excl_link when we want to send a PIO command in DMA mode
1441         * or a non-NCQ command in NCQ mode.
1442         * When we receive a command from that link, and there are no
1443         * outstanding commands, mark a flag to clear excl_link and let
1444         * the command go through.
1445         */
1446        if (unlikely(ap->excl_link)) {
1447                if (link == ap->excl_link) {
1448                        if (ap->nr_active_links)
1449                                return ATA_DEFER_PORT;
1450                        qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1451                        return 0;
1452                } else
1453                        return ATA_DEFER_PORT;
1454        }
1455
1456        /*
1457         * If the port is completely idle, then allow the new qc.
1458         */
1459        if (ap->nr_active_links == 0)
1460                return 0;
1461
1462        /*
1463         * The port is operating in host queuing mode (EDMA) with NCQ
1464         * enabled, allow multiple NCQ commands.  EDMA also allows
1465         * queueing multiple DMA commands but libata core currently
1466         * doesn't allow it.
1467         */
1468        if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1469            (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1470                if (ata_is_ncq(qc->tf.protocol))
1471                        return 0;
1472                else {
1473                        ap->excl_link = link;
1474                        return ATA_DEFER_PORT;
1475                }
1476        }
1477
1478        return ATA_DEFER_PORT;
1479}
1480
1481static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1482{
1483        struct mv_port_priv *pp = ap->private_data;
1484        void __iomem *port_mmio;
1485
1486        u32 fiscfg,   *old_fiscfg   = &pp->cached.fiscfg;
1487        u32 ltmode,   *old_ltmode   = &pp->cached.ltmode;
1488        u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1489
1490        ltmode   = *old_ltmode & ~LTMODE_BIT8;
1491        haltcond = *old_haltcond | EDMA_ERR_DEV;
1492
1493        if (want_fbs) {
1494                fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1495                ltmode = *old_ltmode | LTMODE_BIT8;
1496                if (want_ncq)
1497                        haltcond &= ~EDMA_ERR_DEV;
1498                else
1499                        fiscfg |=  FISCFG_WAIT_DEV_ERR;
1500        } else {
1501                fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1502        }
1503
1504        port_mmio = mv_ap_base(ap);
1505        mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1506        mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1507        mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1508}
1509
1510static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1511{
1512        struct mv_host_priv *hpriv = ap->host->private_data;
1513        u32 old, new;
1514
1515        /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1516        old = readl(hpriv->base + GPIO_PORT_CTL);
1517        if (want_ncq)
1518                new = old | (1 << 22);
1519        else
1520                new = old & ~(1 << 22);
1521        if (new != old)
1522                writel(new, hpriv->base + GPIO_PORT_CTL);
1523}
1524
1525/*
1526 *      mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1527 *      @ap: Port being initialized
1528 *
1529 *      There are two DMA modes on these chips:  basic DMA, and EDMA.
1530 *
1531 *      Bit-0 of the "EDMA RESERVED" register enables/disables use
1532 *      of basic DMA on the GEN_IIE versions of the chips.
1533 *
1534 *      This bit survives EDMA resets, and must be set for basic DMA
1535 *      to function, and should be cleared when EDMA is active.
1536 */
1537static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1538{
1539        struct mv_port_priv *pp = ap->private_data;
1540        u32 new, *old = &pp->cached.unknown_rsvd;
1541
1542        if (enable_bmdma)
1543                new = *old | 1;
1544        else
1545                new = *old & ~1;
1546        mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1547}
1548
1549/*
1550 * SOC chips have an issue whereby the HDD LEDs don't always blink
1551 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1552 * of the SOC takes care of it, generating a steady blink rate when
1553 * any drive on the chip is active.
1554 *
1555 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1556 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1557 *
1558 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1559 * LED operation works then, and provides better (more accurate) feedback.
1560 *
1561 * Note that this code assumes that an SOC never has more than one HC onboard.
1562 */
1563static void mv_soc_led_blink_enable(struct ata_port *ap)
1564{
1565        struct ata_host *host = ap->host;
1566        struct mv_host_priv *hpriv = host->private_data;
1567        void __iomem *hc_mmio;
1568        u32 led_ctrl;
1569
1570        if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1571                return;
1572        hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1573        hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1574        led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1575        writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1576}
1577
1578static void mv_soc_led_blink_disable(struct ata_port *ap)
1579{
1580        struct ata_host *host = ap->host;
1581        struct mv_host_priv *hpriv = host->private_data;
1582        void __iomem *hc_mmio;
1583        u32 led_ctrl;
1584        unsigned int port;
1585
1586        if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1587                return;
1588
1589        /* disable led-blink only if no ports are using NCQ */
1590        for (port = 0; port < hpriv->n_ports; port++) {
1591                struct ata_port *this_ap = host->ports[port];
1592                struct mv_port_priv *pp = this_ap->private_data;
1593
1594                if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1595                        return;
1596        }
1597
1598        hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1599        hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1600        led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1601        writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1602}
1603
1604static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1605{
1606        u32 cfg;
1607        struct mv_port_priv *pp    = ap->private_data;
1608        struct mv_host_priv *hpriv = ap->host->private_data;
1609        void __iomem *port_mmio    = mv_ap_base(ap);
1610
1611        /* set up non-NCQ EDMA configuration */
1612        cfg = EDMA_CFG_Q_DEPTH;         /* always 0x1f for *all* chips */
1613        pp->pp_flags &=
1614          ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1615
1616        if (IS_GEN_I(hpriv))
1617                cfg |= (1 << 8);        /* enab config burst size mask */
1618
1619        else if (IS_GEN_II(hpriv)) {
1620                cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1621                mv_60x1_errata_sata25(ap, want_ncq);
1622
1623        } else if (IS_GEN_IIE(hpriv)) {
1624                int want_fbs = sata_pmp_attached(ap);
1625                /*
1626                 * Possible future enhancement:
1627                 *
1628                 * The chip can use FBS with non-NCQ, if we allow it,
1629                 * But first we need to have the error handling in place
1630                 * for this mode (datasheet section 7.3.15.4.2.3).
1631                 * So disallow non-NCQ FBS for now.
1632                 */
1633                want_fbs &= want_ncq;
1634
1635                mv_config_fbs(ap, want_ncq, want_fbs);
1636
1637                if (want_fbs) {
1638                        pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1639                        cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1640                }
1641
1642                cfg |= (1 << 23);       /* do not mask PM field in rx'd FIS */
1643                if (want_edma) {
1644                        cfg |= (1 << 22); /* enab 4-entry host queue cache */
1645                        if (!IS_SOC(hpriv))
1646                                cfg |= (1 << 18); /* enab early completion */
1647                }
1648                if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1649                        cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1650                mv_bmdma_enable_iie(ap, !want_edma);
1651
1652                if (IS_SOC(hpriv)) {
1653                        if (want_ncq)
1654                                mv_soc_led_blink_enable(ap);
1655                        else
1656                                mv_soc_led_blink_disable(ap);
1657                }
1658        }
1659
1660        if (want_ncq) {
1661                cfg |= EDMA_CFG_NCQ;
1662                pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
1663        }
1664
1665        writelfl(cfg, port_mmio + EDMA_CFG);
1666}
1667
1668static void mv_port_free_dma_mem(struct ata_port *ap)
1669{
1670        struct mv_host_priv *hpriv = ap->host->private_data;
1671        struct mv_port_priv *pp = ap->private_data;
1672        int tag;
1673
1674        if (pp->crqb) {
1675                dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1676                pp->crqb = NULL;
1677        }
1678        if (pp->crpb) {
1679                dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1680                pp->crpb = NULL;
1681        }
1682        /*
1683         * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1684         * For later hardware, we have one unique sg_tbl per NCQ tag.
1685         */
1686        for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1687                if (pp->sg_tbl[tag]) {
1688                        if (tag == 0 || !IS_GEN_I(hpriv))
1689                                dma_pool_free(hpriv->sg_tbl_pool,
1690                                              pp->sg_tbl[tag],
1691                                              pp->sg_tbl_dma[tag]);
1692                        pp->sg_tbl[tag] = NULL;
1693                }
1694        }
1695}
1696
1697/**
1698 *      mv_port_start - Port specific init/start routine.
1699 *      @ap: ATA channel to manipulate
1700 *
1701 *      Allocate and point to DMA memory, init port private memory,
1702 *      zero indices.
1703 *
1704 *      LOCKING:
1705 *      Inherited from caller.
1706 */
1707static int mv_port_start(struct ata_port *ap)
1708{
1709        struct device *dev = ap->host->dev;
1710        struct mv_host_priv *hpriv = ap->host->private_data;
1711        struct mv_port_priv *pp;
1712        unsigned long flags;
1713        int tag;
1714
1715        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1716        if (!pp)
1717                return -ENOMEM;
1718        ap->private_data = pp;
1719
1720        pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1721        if (!pp->crqb)
1722                return -ENOMEM;
1723
1724        pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1725        if (!pp->crpb)
1726                goto out_port_free_dma_mem;
1727
1728        /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1729        if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1730                ap->flags |= ATA_FLAG_AN;
1731        /*
1732         * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1733         * For later hardware, we need one unique sg_tbl per NCQ tag.
1734         */
1735        for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1736                if (tag == 0 || !IS_GEN_I(hpriv)) {
1737                        pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1738                                              GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1739                        if (!pp->sg_tbl[tag])
1740                                goto out_port_free_dma_mem;
1741                } else {
1742                        pp->sg_tbl[tag]     = pp->sg_tbl[0];
1743                        pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1744                }
1745        }
1746
1747        spin_lock_irqsave(ap->lock, flags);
1748        mv_save_cached_regs(ap);
1749        mv_edma_cfg(ap, 0, 0);
1750        spin_unlock_irqrestore(ap->lock, flags);
1751
1752        return 0;
1753
1754out_port_free_dma_mem:
1755        mv_port_free_dma_mem(ap);
1756        return -ENOMEM;
1757}
1758
1759/**
1760 *      mv_port_stop - Port specific cleanup/stop routine.
1761 *      @ap: ATA channel to manipulate
1762 *
1763 *      Stop DMA, cleanup port memory.
1764 *
1765 *      LOCKING:
1766 *      This routine uses the host lock to protect the DMA stop.
1767 */
1768static void mv_port_stop(struct ata_port *ap)
1769{
1770        unsigned long flags;
1771
1772        spin_lock_irqsave(ap->lock, flags);
1773        mv_stop_edma(ap);
1774        mv_enable_port_irqs(ap, 0);
1775        spin_unlock_irqrestore(ap->lock, flags);
1776        mv_port_free_dma_mem(ap);
1777}
1778
1779/**
1780 *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1781 *      @qc: queued command whose SG list to source from
1782 *
1783 *      Populate the SG list and mark the last entry.
1784 *
1785 *      LOCKING:
1786 *      Inherited from caller.
1787 */
1788static void mv_fill_sg(struct ata_queued_cmd *qc)
1789{
1790        struct mv_port_priv *pp = qc->ap->private_data;
1791        struct scatterlist *sg;
1792        struct mv_sg *mv_sg, *last_sg = NULL;
1793        unsigned int si;
1794
1795        mv_sg = pp->sg_tbl[qc->hw_tag];
1796        for_each_sg(qc->sg, sg, qc->n_elem, si) {
1797                dma_addr_t addr = sg_dma_address(sg);
1798                u32 sg_len = sg_dma_len(sg);
1799
1800                while (sg_len) {
1801                        u32 offset = addr & 0xffff;
1802                        u32 len = sg_len;
1803
1804                        if (offset + len > 0x10000)
1805                                len = 0x10000 - offset;
1806
1807                        mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1808                        mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1809                        mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1810                        mv_sg->reserved = 0;
1811
1812                        sg_len -= len;
1813                        addr += len;
1814
1815                        last_sg = mv_sg;
1816                        mv_sg++;
1817                }
1818        }
1819
1820        if (likely(last_sg))
1821                last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1822        mb(); /* ensure data structure is visible to the chipset */
1823}
1824
1825static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1826{
1827        u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1828                (last ? CRQB_CMD_LAST : 0);
1829        *cmdw = cpu_to_le16(tmp);
1830}
1831
1832/**
1833 *      mv_sff_irq_clear - Clear hardware interrupt after DMA.
1834 *      @ap: Port associated with this ATA transaction.
1835 *
1836 *      We need this only for ATAPI bmdma transactions,
1837 *      as otherwise we experience spurious interrupts
1838 *      after libata-sff handles the bmdma interrupts.
1839 */
1840static void mv_sff_irq_clear(struct ata_port *ap)
1841{
1842        mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1843}
1844
1845/**
1846 *      mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1847 *      @qc: queued command to check for chipset/DMA compatibility.
1848 *
1849 *      The bmdma engines cannot handle speculative data sizes
1850 *      (bytecount under/over flow).  So only allow DMA for
1851 *      data transfer commands with known data sizes.
1852 *
1853 *      LOCKING:
1854 *      Inherited from caller.
1855 */
1856static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1857{
1858        struct scsi_cmnd *scmd = qc->scsicmd;
1859
1860        if (scmd) {
1861                switch (scmd->cmnd[0]) {
1862                case READ_6:
1863                case READ_10:
1864                case READ_12:
1865                case WRITE_6:
1866                case WRITE_10:
1867                case WRITE_12:
1868                case GPCMD_READ_CD:
1869                case GPCMD_SEND_DVD_STRUCTURE:
1870                case GPCMD_SEND_CUE_SHEET:
1871                        return 0; /* DMA is safe */
1872                }
1873        }
1874        return -EOPNOTSUPP; /* use PIO instead */
1875}
1876
1877/**
1878 *      mv_bmdma_setup - Set up BMDMA transaction
1879 *      @qc: queued command to prepare DMA for.
1880 *
1881 *      LOCKING:
1882 *      Inherited from caller.
1883 */
1884static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1885{
1886        struct ata_port *ap = qc->ap;
1887        void __iomem *port_mmio = mv_ap_base(ap);
1888        struct mv_port_priv *pp = ap->private_data;
1889
1890        mv_fill_sg(qc);
1891
1892        /* clear all DMA cmd bits */
1893        writel(0, port_mmio + BMDMA_CMD);
1894
1895        /* load PRD table addr. */
1896        writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16,
1897                port_mmio + BMDMA_PRD_HIGH);
1898        writelfl(pp->sg_tbl_dma[qc->hw_tag],
1899                port_mmio + BMDMA_PRD_LOW);
1900
1901        /* issue r/w command */
1902        ap->ops->sff_exec_command(ap, &qc->tf);
1903}
1904
1905/**
1906 *      mv_bmdma_start - Start a BMDMA transaction
1907 *      @qc: queued command to start DMA on.
1908 *
1909 *      LOCKING:
1910 *      Inherited from caller.
1911 */
1912static void mv_bmdma_start(struct ata_queued_cmd *qc)
1913{
1914        struct ata_port *ap = qc->ap;
1915        void __iomem *port_mmio = mv_ap_base(ap);
1916        unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1917        u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1918
1919        /* start host DMA transaction */
1920        writelfl(cmd, port_mmio + BMDMA_CMD);
1921}
1922
1923/**
1924 *      mv_bmdma_stop_ap - Stop BMDMA transfer
1925 *      @ap: port to stop
1926 *
1927 *      Clears the ATA_DMA_START flag in the bmdma control register
1928 *
1929 *      LOCKING:
1930 *      Inherited from caller.
1931 */
1932static void mv_bmdma_stop_ap(struct ata_port *ap)
1933{
1934        void __iomem *port_mmio = mv_ap_base(ap);
1935        u32 cmd;
1936
1937        /* clear start/stop bit */
1938        cmd = readl(port_mmio + BMDMA_CMD);
1939        if (cmd & ATA_DMA_START) {
1940                cmd &= ~ATA_DMA_START;
1941                writelfl(cmd, port_mmio + BMDMA_CMD);
1942
1943                /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1944                ata_sff_dma_pause(ap);
1945        }
1946}
1947
1948static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1949{
1950        mv_bmdma_stop_ap(qc->ap);
1951}
1952
1953/**
1954 *      mv_bmdma_status - Read BMDMA status
1955 *      @ap: port for which to retrieve DMA status.
1956 *
1957 *      Read and return equivalent of the sff BMDMA status register.
1958 *
1959 *      LOCKING:
1960 *      Inherited from caller.
1961 */
1962static u8 mv_bmdma_status(struct ata_port *ap)
1963{
1964        void __iomem *port_mmio = mv_ap_base(ap);
1965        u32 reg, status;
1966
1967        /*
1968         * Other bits are valid only if ATA_DMA_ACTIVE==0,
1969         * and the ATA_DMA_INTR bit doesn't exist.
1970         */
1971        reg = readl(port_mmio + BMDMA_STATUS);
1972        if (reg & ATA_DMA_ACTIVE)
1973                status = ATA_DMA_ACTIVE;
1974        else if (reg & ATA_DMA_ERR)
1975                status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1976        else {
1977                /*
1978                 * Just because DMA_ACTIVE is 0 (DMA completed),
1979                 * this does _not_ mean the device is "done".
1980                 * So we should not yet be signalling ATA_DMA_INTR
1981                 * in some cases.  Eg. DSM/TRIM, and perhaps others.
1982                 */
1983                mv_bmdma_stop_ap(ap);
1984                if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1985                        status = 0;
1986                else
1987                        status = ATA_DMA_INTR;
1988        }
1989        return status;
1990}
1991
1992static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1993{
1994        struct ata_taskfile *tf = &qc->tf;
1995        /*
1996         * Workaround for 88SX60x1 FEr SATA#24.
1997         *
1998         * Chip may corrupt WRITEs if multi_count >= 4kB.
1999         * Note that READs are unaffected.
2000         *
2001         * It's not clear if this errata really means "4K bytes",
2002         * or if it always happens for multi_count > 7
2003         * regardless of device sector_size.
2004         *
2005         * So, for safety, any write with multi_count > 7
2006         * gets converted here into a regular PIO write instead:
2007         */
2008        if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
2009                if (qc->dev->multi_count > 7) {
2010                        switch (tf->command) {
2011                        case ATA_CMD_WRITE_MULTI:
2012                                tf->command = ATA_CMD_PIO_WRITE;
2013                                break;
2014                        case ATA_CMD_WRITE_MULTI_FUA_EXT:
2015                                tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
2016                                fallthrough;
2017                        case ATA_CMD_WRITE_MULTI_EXT:
2018                                tf->command = ATA_CMD_PIO_WRITE_EXT;
2019                                break;
2020                        }
2021                }
2022        }
2023}
2024
2025/**
2026 *      mv_qc_prep - Host specific command preparation.
2027 *      @qc: queued command to prepare
2028 *
2029 *      This routine simply redirects to the general purpose routine
2030 *      if command is not DMA.  Else, it handles prep of the CRQB
2031 *      (command request block), does some sanity checking, and calls
2032 *      the SG load routine.
2033 *
2034 *      LOCKING:
2035 *      Inherited from caller.
2036 */
2037static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
2038{
2039        struct ata_port *ap = qc->ap;
2040        struct mv_port_priv *pp = ap->private_data;
2041        __le16 *cw;
2042        struct ata_taskfile *tf = &qc->tf;
2043        u16 flags = 0;
2044        unsigned in_index;
2045
2046        switch (tf->protocol) {
2047        case ATA_PROT_DMA:
2048                if (tf->command == ATA_CMD_DSM)
2049                        return AC_ERR_OK;
2050                fallthrough;
2051        case ATA_PROT_NCQ:
2052                break;  /* continue below */
2053        case ATA_PROT_PIO:
2054                mv_rw_multi_errata_sata24(qc);
2055                return AC_ERR_OK;
2056        default:
2057                return AC_ERR_OK;
2058        }
2059
2060        /* Fill in command request block
2061         */
2062        if (!(tf->flags & ATA_TFLAG_WRITE))
2063                flags |= CRQB_FLAG_READ;
2064        WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
2065        flags |= qc->hw_tag << CRQB_TAG_SHIFT;
2066        flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2067
2068        /* get current queue index from software */
2069        in_index = pp->req_idx;
2070
2071        pp->crqb[in_index].sg_addr =
2072                cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
2073        pp->crqb[in_index].sg_addr_hi =
2074                cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
2075        pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2076
2077        cw = &pp->crqb[in_index].ata_cmd[0];
2078
2079        /* Sadly, the CRQB cannot accommodate all registers--there are
2080         * only 11 bytes...so we must pick and choose required
2081         * registers based on the command.  So, we drop feature and
2082         * hob_feature for [RW] DMA commands, but they are needed for
2083         * NCQ.  NCQ will drop hob_nsect, which is not needed there
2084         * (nsect is used only for the tag; feat/hob_feat hold true nsect).
2085         */
2086        switch (tf->command) {
2087        case ATA_CMD_READ:
2088        case ATA_CMD_READ_EXT:
2089        case ATA_CMD_WRITE:
2090        case ATA_CMD_WRITE_EXT:
2091        case ATA_CMD_WRITE_FUA_EXT:
2092                mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2093                break;
2094        case ATA_CMD_FPDMA_READ:
2095        case ATA_CMD_FPDMA_WRITE:
2096                mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2097                mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2098                break;
2099        default:
2100                /* The only other commands EDMA supports in non-queued and
2101                 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2102                 * of which are defined/used by Linux.  If we get here, this
2103                 * driver needs work.
2104                 */
2105                ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
2106                                tf->command);
2107                return AC_ERR_INVALID;
2108        }
2109        mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2110        mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2111        mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2112        mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2113        mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2114        mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2115        mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2116        mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2117        mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);    /* last */
2118
2119        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2120                return AC_ERR_OK;
2121        mv_fill_sg(qc);
2122
2123        return AC_ERR_OK;
2124}
2125
2126/**
2127 *      mv_qc_prep_iie - Host specific command preparation.
2128 *      @qc: queued command to prepare
2129 *
2130 *      This routine simply redirects to the general purpose routine
2131 *      if command is not DMA.  Else, it handles prep of the CRQB
2132 *      (command request block), does some sanity checking, and calls
2133 *      the SG load routine.
2134 *
2135 *      LOCKING:
2136 *      Inherited from caller.
2137 */
2138static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
2139{
2140        struct ata_port *ap = qc->ap;
2141        struct mv_port_priv *pp = ap->private_data;
2142        struct mv_crqb_iie *crqb;
2143        struct ata_taskfile *tf = &qc->tf;
2144        unsigned in_index;
2145        u32 flags = 0;
2146
2147        if ((tf->protocol != ATA_PROT_DMA) &&
2148            (tf->protocol != ATA_PROT_NCQ))
2149                return AC_ERR_OK;
2150        if (tf->command == ATA_CMD_DSM)
2151                return AC_ERR_OK;  /* use bmdma for this */
2152
2153        /* Fill in Gen IIE command request block */
2154        if (!(tf->flags & ATA_TFLAG_WRITE))
2155                flags |= CRQB_FLAG_READ;
2156
2157        WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
2158        flags |= qc->hw_tag << CRQB_TAG_SHIFT;
2159        flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT;
2160        flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2161
2162        /* get current queue index from software */
2163        in_index = pp->req_idx;
2164
2165        crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2166        crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
2167        crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
2168        crqb->flags = cpu_to_le32(flags);
2169
2170        crqb->ata_cmd[0] = cpu_to_le32(
2171                        (tf->command << 16) |
2172                        (tf->feature << 24)
2173                );
2174        crqb->ata_cmd[1] = cpu_to_le32(
2175                        (tf->lbal << 0) |
2176                        (tf->lbam << 8) |
2177                        (tf->lbah << 16) |
2178                        (tf->device << 24)
2179                );
2180        crqb->ata_cmd[2] = cpu_to_le32(
2181                        (tf->hob_lbal << 0) |
2182                        (tf->hob_lbam << 8) |
2183                        (tf->hob_lbah << 16) |
2184                        (tf->hob_feature << 24)
2185                );
2186        crqb->ata_cmd[3] = cpu_to_le32(
2187                        (tf->nsect << 0) |
2188                        (tf->hob_nsect << 8)
2189                );
2190
2191        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2192                return AC_ERR_OK;
2193        mv_fill_sg(qc);
2194
2195        return AC_ERR_OK;
2196}
2197
2198/**
2199 *      mv_sff_check_status - fetch device status, if valid
2200 *      @ap: ATA port to fetch status from
2201 *
2202 *      When using command issue via mv_qc_issue_fis(),
2203 *      the initial ATA_BUSY state does not show up in the
2204 *      ATA status (shadow) register.  This can confuse libata!
2205 *
2206 *      So we have a hook here to fake ATA_BUSY for that situation,
2207 *      until the first time a BUSY, DRQ, or ERR bit is seen.
2208 *
2209 *      The rest of the time, it simply returns the ATA status register.
2210 */
2211static u8 mv_sff_check_status(struct ata_port *ap)
2212{
2213        u8 stat = ioread8(ap->ioaddr.status_addr);
2214        struct mv_port_priv *pp = ap->private_data;
2215
2216        if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2217                if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2218                        pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2219                else
2220                        stat = ATA_BUSY;
2221        }
2222        return stat;
2223}
2224
2225/**
2226 *      mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2227 *      @ap: ATA port to send a FIS
2228 *      @fis: fis to be sent
2229 *      @nwords: number of 32-bit words in the fis
2230 */
2231static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2232{
2233        void __iomem *port_mmio = mv_ap_base(ap);
2234        u32 ifctl, old_ifctl, ifstat;
2235        int i, timeout = 200, final_word = nwords - 1;
2236
2237        /* Initiate FIS transmission mode */
2238        old_ifctl = readl(port_mmio + SATA_IFCTL);
2239        ifctl = 0x100 | (old_ifctl & 0xf);
2240        writelfl(ifctl, port_mmio + SATA_IFCTL);
2241
2242        /* Send all words of the FIS except for the final word */
2243        for (i = 0; i < final_word; ++i)
2244                writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2245
2246        /* Flag end-of-transmission, and then send the final word */
2247        writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2248        writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2249
2250        /*
2251         * Wait for FIS transmission to complete.
2252         * This typically takes just a single iteration.
2253         */
2254        do {
2255                ifstat = readl(port_mmio + SATA_IFSTAT);
2256        } while (!(ifstat & 0x1000) && --timeout);
2257
2258        /* Restore original port configuration */
2259        writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2260
2261        /* See if it worked */
2262        if ((ifstat & 0x3000) != 0x1000) {
2263                ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2264                              __func__, ifstat);
2265                return AC_ERR_OTHER;
2266        }
2267        return 0;
2268}
2269
2270/**
2271 *      mv_qc_issue_fis - Issue a command directly as a FIS
2272 *      @qc: queued command to start
2273 *
2274 *      Note that the ATA shadow registers are not updated
2275 *      after command issue, so the device will appear "READY"
2276 *      if polled, even while it is BUSY processing the command.
2277 *
2278 *      So we use a status hook to fake ATA_BUSY until the drive changes state.
2279 *
2280 *      Note: we don't get updated shadow regs on *completion*
2281 *      of non-data commands. So avoid sending them via this function,
2282 *      as they will appear to have completed immediately.
2283 *
2284 *      GEN_IIE has special registers that we could get the result tf from,
2285 *      but earlier chipsets do not.  For now, we ignore those registers.
2286 */
2287static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2288{
2289        struct ata_port *ap = qc->ap;
2290        struct mv_port_priv *pp = ap->private_data;
2291        struct ata_link *link = qc->dev->link;
2292        u32 fis[5];
2293        int err = 0;
2294
2295        ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2296        err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2297        if (err)
2298                return err;
2299
2300        switch (qc->tf.protocol) {
2301        case ATAPI_PROT_PIO:
2302                pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2303                fallthrough;
2304        case ATAPI_PROT_NODATA:
2305                ap->hsm_task_state = HSM_ST_FIRST;
2306                break;
2307        case ATA_PROT_PIO:
2308                pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2309                if (qc->tf.flags & ATA_TFLAG_WRITE)
2310                        ap->hsm_task_state = HSM_ST_FIRST;
2311                else
2312                        ap->hsm_task_state = HSM_ST;
2313                break;
2314        default:
2315                ap->hsm_task_state = HSM_ST_LAST;
2316                break;
2317        }
2318
2319        if (qc->tf.flags & ATA_TFLAG_POLLING)
2320                ata_sff_queue_pio_task(link, 0);
2321        return 0;
2322}
2323
2324/**
2325 *      mv_qc_issue - Initiate a command to the host
2326 *      @qc: queued command to start
2327 *
2328 *      This routine simply redirects to the general purpose routine
2329 *      if command is not DMA.  Else, it sanity checks our local
2330 *      caches of the request producer/consumer indices then enables
2331 *      DMA and bumps the request producer index.
2332 *
2333 *      LOCKING:
2334 *      Inherited from caller.
2335 */
2336static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2337{
2338        static int limit_warnings = 10;
2339        struct ata_port *ap = qc->ap;
2340        void __iomem *port_mmio = mv_ap_base(ap);
2341        struct mv_port_priv *pp = ap->private_data;
2342        u32 in_index;
2343        unsigned int port_irqs;
2344
2345        pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2346
2347        switch (qc->tf.protocol) {
2348        case ATA_PROT_DMA:
2349                if (qc->tf.command == ATA_CMD_DSM) {
2350                        if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
2351                                return AC_ERR_OTHER;
2352                        break;  /* use bmdma for this */
2353                }
2354                fallthrough;
2355        case ATA_PROT_NCQ:
2356                mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2357                pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2358                in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2359
2360                /* Write the request in pointer to kick the EDMA to life */
2361                writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2362                                        port_mmio + EDMA_REQ_Q_IN_PTR);
2363                return 0;
2364
2365        case ATA_PROT_PIO:
2366                /*
2367                 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2368                 *
2369                 * Someday, we might implement special polling workarounds
2370                 * for these, but it all seems rather unnecessary since we
2371                 * normally use only DMA for commands which transfer more
2372                 * than a single block of data.
2373                 *
2374                 * Much of the time, this could just work regardless.
2375                 * So for now, just log the incident, and allow the attempt.
2376                 */
2377                if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2378                        --limit_warnings;
2379                        ata_link_warn(qc->dev->link, DRV_NAME
2380                                      ": attempting PIO w/multiple DRQ: "
2381                                      "this may fail due to h/w errata\n");
2382                }
2383                fallthrough;
2384        case ATA_PROT_NODATA:
2385        case ATAPI_PROT_PIO:
2386        case ATAPI_PROT_NODATA:
2387                if (ap->flags & ATA_FLAG_PIO_POLLING)
2388                        qc->tf.flags |= ATA_TFLAG_POLLING;
2389                break;
2390        }
2391
2392        if (qc->tf.flags & ATA_TFLAG_POLLING)
2393                port_irqs = ERR_IRQ;    /* mask device interrupt when polling */
2394        else
2395                port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2396
2397        /*
2398         * We're about to send a non-EDMA capable command to the
2399         * port.  Turn off EDMA so there won't be problems accessing
2400         * shadow block, etc registers.
2401         */
2402        mv_stop_edma(ap);
2403        mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2404        mv_pmp_select(ap, qc->dev->link->pmp);
2405
2406        if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2407                struct mv_host_priv *hpriv = ap->host->private_data;
2408                /*
2409                 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2410                 *
2411                 * After any NCQ error, the READ_LOG_EXT command
2412                 * from libata-eh *must* use mv_qc_issue_fis().
2413                 * Otherwise it might fail, due to chip errata.
2414                 *
2415                 * Rather than special-case it, we'll just *always*
2416                 * use this method here for READ_LOG_EXT, making for
2417                 * easier testing.
2418                 */
2419                if (IS_GEN_II(hpriv))
2420                        return mv_qc_issue_fis(qc);
2421        }
2422        return ata_bmdma_qc_issue(qc);
2423}
2424
2425static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2426{
2427        struct mv_port_priv *pp = ap->private_data;
2428        struct ata_queued_cmd *qc;
2429
2430        if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2431                return NULL;
2432        qc = ata_qc_from_tag(ap, ap->link.active_tag);
2433        if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2434                return qc;
2435        return NULL;
2436}
2437
2438static void mv_pmp_error_handler(struct ata_port *ap)
2439{
2440        unsigned int pmp, pmp_map;
2441        struct mv_port_priv *pp = ap->private_data;
2442
2443        if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2444                /*
2445                 * Perform NCQ error analysis on failed PMPs
2446                 * before we freeze the port entirely.
2447                 *
2448                 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2449                 */
2450                pmp_map = pp->delayed_eh_pmp_map;
2451                pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2452                for (pmp = 0; pmp_map != 0; pmp++) {
2453                        unsigned int this_pmp = (1 << pmp);
2454                        if (pmp_map & this_pmp) {
2455                                struct ata_link *link = &ap->pmp_link[pmp];
2456                                pmp_map &= ~this_pmp;
2457                                ata_eh_analyze_ncq_error(link);
2458                        }
2459                }
2460                ata_port_freeze(ap);
2461        }
2462        sata_pmp_error_handler(ap);
2463}
2464
2465static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2466{
2467        void __iomem *port_mmio = mv_ap_base(ap);
2468
2469        return readl(port_mmio + SATA_TESTCTL) >> 16;
2470}
2471
2472static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2473{
2474        unsigned int pmp;
2475
2476        /*
2477         * Initialize EH info for PMPs which saw device errors
2478         */
2479        for (pmp = 0; pmp_map != 0; pmp++) {
2480                unsigned int this_pmp = (1 << pmp);
2481                if (pmp_map & this_pmp) {
2482                        struct ata_link *link = &ap->pmp_link[pmp];
2483                        struct ata_eh_info *ehi = &link->eh_info;
2484
2485                        pmp_map &= ~this_pmp;
2486                        ata_ehi_clear_desc(ehi);
2487                        ata_ehi_push_desc(ehi, "dev err");
2488                        ehi->err_mask |= AC_ERR_DEV;
2489                        ehi->action |= ATA_EH_RESET;
2490                        ata_link_abort(link);
2491                }
2492        }
2493}
2494
2495static int mv_req_q_empty(struct ata_port *ap)
2496{
2497        void __iomem *port_mmio = mv_ap_base(ap);
2498        u32 in_ptr, out_ptr;
2499
2500        in_ptr  = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2501                        >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2502        out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2503                        >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2504        return (in_ptr == out_ptr);     /* 1 == queue_is_empty */
2505}
2506
2507static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2508{
2509        struct mv_port_priv *pp = ap->private_data;
2510        int failed_links;
2511        unsigned int old_map, new_map;
2512
2513        /*
2514         * Device error during FBS+NCQ operation:
2515         *
2516         * Set a port flag to prevent further I/O being enqueued.
2517         * Leave the EDMA running to drain outstanding commands from this port.
2518         * Perform the post-mortem/EH only when all responses are complete.
2519         * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2520         */
2521        if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2522                pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2523                pp->delayed_eh_pmp_map = 0;
2524        }
2525        old_map = pp->delayed_eh_pmp_map;
2526        new_map = old_map | mv_get_err_pmp_map(ap);
2527
2528        if (old_map != new_map) {
2529                pp->delayed_eh_pmp_map = new_map;
2530                mv_pmp_eh_prep(ap, new_map & ~old_map);
2531        }
2532        failed_links = hweight16(new_map);
2533
2534        ata_port_info(ap,
2535                      "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n",
2536                      __func__, pp->delayed_eh_pmp_map,
2537                      ap->qc_active, failed_links,
2538                      ap->nr_active_links);
2539
2540        if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2541                mv_process_crpb_entries(ap, pp);
2542                mv_stop_edma(ap);
2543                mv_eh_freeze(ap);
2544                ata_port_info(ap, "%s: done\n", __func__);
2545                return 1;       /* handled */
2546        }
2547        ata_port_info(ap, "%s: waiting\n", __func__);
2548        return 1;       /* handled */
2549}
2550
2551static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2552{
2553        /*
2554         * Possible future enhancement:
2555         *
2556         * FBS+non-NCQ operation is not yet implemented.
2557         * See related notes in mv_edma_cfg().
2558         *
2559         * Device error during FBS+non-NCQ operation:
2560         *
2561         * We need to snapshot the shadow registers for each failed command.
2562         * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2563         */
2564        return 0;       /* not handled */
2565}
2566
2567static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2568{
2569        struct mv_port_priv *pp = ap->private_data;
2570
2571        if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2572                return 0;       /* EDMA was not active: not handled */
2573        if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2574                return 0;       /* FBS was not active: not handled */
2575
2576        if (!(edma_err_cause & EDMA_ERR_DEV))
2577                return 0;       /* non DEV error: not handled */
2578        edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2579        if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2580                return 0;       /* other problems: not handled */
2581
2582        if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2583                /*
2584                 * EDMA should NOT have self-disabled for this case.
2585                 * If it did, then something is wrong elsewhere,
2586                 * and we cannot handle it here.
2587                 */
2588                if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2589                        ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2590                                      __func__, edma_err_cause, pp->pp_flags);
2591                        return 0; /* not handled */
2592                }
2593                return mv_handle_fbs_ncq_dev_err(ap);
2594        } else {
2595                /*
2596                 * EDMA should have self-disabled for this case.
2597                 * If it did not, then something is wrong elsewhere,
2598                 * and we cannot handle it here.
2599                 */
2600                if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2601                        ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2602                                      __func__, edma_err_cause, pp->pp_flags);
2603                        return 0; /* not handled */
2604                }
2605                return mv_handle_fbs_non_ncq_dev_err(ap);
2606        }
2607        return 0;       /* not handled */
2608}
2609
2610static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2611{
2612        struct ata_eh_info *ehi = &ap->link.eh_info;
2613        char *when = "idle";
2614
2615        ata_ehi_clear_desc(ehi);
2616        if (edma_was_enabled) {
2617                when = "EDMA enabled";
2618        } else {
2619                struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2620                if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2621                        when = "polling";
2622        }
2623        ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2624        ehi->err_mask |= AC_ERR_OTHER;
2625        ehi->action   |= ATA_EH_RESET;
2626        ata_port_freeze(ap);
2627}
2628
2629/**
2630 *      mv_err_intr - Handle error interrupts on the port
2631 *      @ap: ATA channel to manipulate
2632 *
2633 *      Most cases require a full reset of the chip's state machine,
2634 *      which also performs a COMRESET.
2635 *      Also, if the port disabled DMA, update our cached copy to match.
2636 *
2637 *      LOCKING:
2638 *      Inherited from caller.
2639 */
2640static void mv_err_intr(struct ata_port *ap)
2641{
2642        void __iomem *port_mmio = mv_ap_base(ap);
2643        u32 edma_err_cause, eh_freeze_mask, serr = 0;
2644        u32 fis_cause = 0;
2645        struct mv_port_priv *pp = ap->private_data;
2646        struct mv_host_priv *hpriv = ap->host->private_data;
2647        unsigned int action = 0, err_mask = 0;
2648        struct ata_eh_info *ehi = &ap->link.eh_info;
2649        struct ata_queued_cmd *qc;
2650        int abort = 0;
2651
2652        /*
2653         * Read and clear the SError and err_cause bits.
2654         * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2655         * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2656         */
2657        sata_scr_read(&ap->link, SCR_ERROR, &serr);
2658        sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2659
2660        edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2661        if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2662                fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2663                writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2664        }
2665        writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2666
2667        if (edma_err_cause & EDMA_ERR_DEV) {
2668                /*
2669                 * Device errors during FIS-based switching operation
2670                 * require special handling.
2671                 */
2672                if (mv_handle_dev_err(ap, edma_err_cause))
2673                        return;
2674        }
2675
2676        qc = mv_get_active_qc(ap);
2677        ata_ehi_clear_desc(ehi);
2678        ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2679                          edma_err_cause, pp->pp_flags);
2680
2681        if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2682                ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2683                if (fis_cause & FIS_IRQ_CAUSE_AN) {
2684                        u32 ec = edma_err_cause &
2685                               ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2686                        sata_async_notification(ap);
2687                        if (!ec)
2688                                return; /* Just an AN; no need for the nukes */
2689                        ata_ehi_push_desc(ehi, "SDB notify");
2690                }
2691        }
2692        /*
2693         * All generations share these EDMA error cause bits:
2694         */
2695        if (edma_err_cause & EDMA_ERR_DEV) {
2696                err_mask |= AC_ERR_DEV;
2697                action |= ATA_EH_RESET;
2698                ata_ehi_push_desc(ehi, "dev error");
2699        }
2700        if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2701                        EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2702                        EDMA_ERR_INTRL_PAR)) {
2703                err_mask |= AC_ERR_ATA_BUS;
2704                action |= ATA_EH_RESET;
2705                ata_ehi_push_desc(ehi, "parity error");
2706        }
2707        if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2708                ata_ehi_hotplugged(ehi);
2709                ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2710                        "dev disconnect" : "dev connect");
2711                action |= ATA_EH_RESET;
2712        }
2713
2714        /*
2715         * Gen-I has a different SELF_DIS bit,
2716         * different FREEZE bits, and no SERR bit:
2717         */
2718        if (IS_GEN_I(hpriv)) {
2719                eh_freeze_mask = EDMA_EH_FREEZE_5;
2720                if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2721                        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2722                        ata_ehi_push_desc(ehi, "EDMA self-disable");
2723                }
2724        } else {
2725                eh_freeze_mask = EDMA_EH_FREEZE;
2726                if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2727                        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2728                        ata_ehi_push_desc(ehi, "EDMA self-disable");
2729                }
2730                if (edma_err_cause & EDMA_ERR_SERR) {
2731                        ata_ehi_push_desc(ehi, "SError=%08x", serr);
2732                        err_mask |= AC_ERR_ATA_BUS;
2733                        action |= ATA_EH_RESET;
2734                }
2735        }
2736
2737        if (!err_mask) {
2738                err_mask = AC_ERR_OTHER;
2739                action |= ATA_EH_RESET;
2740        }
2741
2742        ehi->serror |= serr;
2743        ehi->action |= action;
2744
2745        if (qc)
2746                qc->err_mask |= err_mask;
2747        else
2748                ehi->err_mask |= err_mask;
2749
2750        if (err_mask == AC_ERR_DEV) {
2751                /*
2752                 * Cannot do ata_port_freeze() here,
2753                 * because it would kill PIO access,
2754                 * which is needed for further diagnosis.
2755                 */
2756                mv_eh_freeze(ap);
2757                abort = 1;
2758        } else if (edma_err_cause & eh_freeze_mask) {
2759                /*
2760                 * Note to self: ata_port_freeze() calls ata_port_abort()
2761                 */
2762                ata_port_freeze(ap);
2763        } else {
2764                abort = 1;
2765        }
2766
2767        if (abort) {
2768                if (qc)
2769                        ata_link_abort(qc->dev->link);
2770                else
2771                        ata_port_abort(ap);
2772        }
2773}
2774
2775static bool mv_process_crpb_response(struct ata_port *ap,
2776                struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2777{
2778        u8 ata_status;
2779        u16 edma_status = le16_to_cpu(response->flags);
2780
2781        /*
2782         * edma_status from a response queue entry:
2783         *   LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2784         *   MSB is saved ATA status from command completion.
2785         */
2786        if (!ncq_enabled) {
2787                u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2788                if (err_cause) {
2789                        /*
2790                         * Error will be seen/handled by
2791                         * mv_err_intr().  So do nothing at all here.
2792                         */
2793                        return false;
2794                }
2795        }
2796        ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2797        if (!ac_err_mask(ata_status))
2798                return true;
2799        /* else: leave it for mv_err_intr() */
2800        return false;
2801}
2802
2803static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2804{
2805        void __iomem *port_mmio = mv_ap_base(ap);
2806        struct mv_host_priv *hpriv = ap->host->private_data;
2807        u32 in_index;
2808        bool work_done = false;
2809        u32 done_mask = 0;
2810        int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2811
2812        /* Get the hardware queue position index */
2813        in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2814                        >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2815
2816        /* Process new responses from since the last time we looked */
2817        while (in_index != pp->resp_idx) {
2818                unsigned int tag;
2819                struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2820
2821                pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2822
2823                if (IS_GEN_I(hpriv)) {
2824                        /* 50xx: no NCQ, only one command active at a time */
2825                        tag = ap->link.active_tag;
2826                } else {
2827                        /* Gen II/IIE: get command tag from CRPB entry */
2828                        tag = le16_to_cpu(response->id) & 0x1f;
2829                }
2830                if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2831                        done_mask |= 1 << tag;
2832                work_done = true;
2833        }
2834
2835        if (work_done) {
2836                ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2837
2838                /* Update the software queue position index in hardware */
2839                writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2840                         (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2841                         port_mmio + EDMA_RSP_Q_OUT_PTR);
2842        }
2843}
2844
2845static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2846{
2847        struct mv_port_priv *pp;
2848        int edma_was_enabled;
2849
2850        /*
2851         * Grab a snapshot of the EDMA_EN flag setting,
2852         * so that we have a consistent view for this port,
2853         * even if something we call of our routines changes it.
2854         */
2855        pp = ap->private_data;
2856        edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2857        /*
2858         * Process completed CRPB response(s) before other events.
2859         */
2860        if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2861                mv_process_crpb_entries(ap, pp);
2862                if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2863                        mv_handle_fbs_ncq_dev_err(ap);
2864        }
2865        /*
2866         * Handle chip-reported errors, or continue on to handle PIO.
2867         */
2868        if (unlikely(port_cause & ERR_IRQ)) {
2869                mv_err_intr(ap);
2870        } else if (!edma_was_enabled) {
2871                struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2872                if (qc)
2873                        ata_bmdma_port_intr(ap, qc);
2874                else
2875                        mv_unexpected_intr(ap, edma_was_enabled);
2876        }
2877}
2878
2879/**
2880 *      mv_host_intr - Handle all interrupts on the given host controller
2881 *      @host: host specific structure
2882 *      @main_irq_cause: Main interrupt cause register for the chip.
2883 *
2884 *      LOCKING:
2885 *      Inherited from caller.
2886 */
2887static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2888{
2889        struct mv_host_priv *hpriv = host->private_data;
2890        void __iomem *mmio = hpriv->base, *hc_mmio;
2891        unsigned int handled = 0, port;
2892
2893        /* If asserted, clear the "all ports" IRQ coalescing bit */
2894        if (main_irq_cause & ALL_PORTS_COAL_DONE)
2895                writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2896
2897        for (port = 0; port < hpriv->n_ports; port++) {
2898                struct ata_port *ap = host->ports[port];
2899                unsigned int p, shift, hardport, port_cause;
2900
2901                MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2902                /*
2903                 * Each hc within the host has its own hc_irq_cause register,
2904                 * where the interrupting ports bits get ack'd.
2905                 */
2906                if (hardport == 0) {    /* first port on this hc ? */
2907                        u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2908                        u32 port_mask, ack_irqs;
2909                        /*
2910                         * Skip this entire hc if nothing pending for any ports
2911                         */
2912                        if (!hc_cause) {
2913                                port += MV_PORTS_PER_HC - 1;
2914                                continue;
2915                        }
2916                        /*
2917                         * We don't need/want to read the hc_irq_cause register,
2918                         * because doing so hurts performance, and
2919                         * main_irq_cause already gives us everything we need.
2920                         *
2921                         * But we do have to *write* to the hc_irq_cause to ack
2922                         * the ports that we are handling this time through.
2923                         *
2924                         * This requires that we create a bitmap for those
2925                         * ports which interrupted us, and use that bitmap
2926                         * to ack (only) those ports via hc_irq_cause.
2927                         */
2928                        ack_irqs = 0;
2929                        if (hc_cause & PORTS_0_3_COAL_DONE)
2930                                ack_irqs = HC_COAL_IRQ;
2931                        for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2932                                if ((port + p) >= hpriv->n_ports)
2933                                        break;
2934                                port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2935                                if (hc_cause & port_mask)
2936                                        ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2937                        }
2938                        hc_mmio = mv_hc_base_from_port(mmio, port);
2939                        writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2940                        handled = 1;
2941                }
2942                /*
2943                 * Handle interrupts signalled for this port:
2944                 */
2945                port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2946                if (port_cause)
2947                        mv_port_intr(ap, port_cause);
2948        }
2949        return handled;
2950}
2951
2952static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2953{
2954        struct mv_host_priv *hpriv = host->private_data;
2955        struct ata_port *ap;
2956        struct ata_queued_cmd *qc;
2957        struct ata_eh_info *ehi;
2958        unsigned int i, err_mask, printed = 0;
2959        u32 err_cause;
2960
2961        err_cause = readl(mmio + hpriv->irq_cause_offset);
2962
2963        dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
2964
2965        DPRINTK("All regs @ PCI error\n");
2966        mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2967
2968        writelfl(0, mmio + hpriv->irq_cause_offset);
2969
2970        for (i = 0; i < host->n_ports; i++) {
2971                ap = host->ports[i];
2972                if (!ata_link_offline(&ap->link)) {
2973                        ehi = &ap->link.eh_info;
2974                        ata_ehi_clear_desc(ehi);
2975                        if (!printed++)
2976                                ata_ehi_push_desc(ehi,
2977                                        "PCI err cause 0x%08x", err_cause);
2978                        err_mask = AC_ERR_HOST_BUS;
2979                        ehi->action = ATA_EH_RESET;
2980                        qc = ata_qc_from_tag(ap, ap->link.active_tag);
2981                        if (qc)
2982                                qc->err_mask |= err_mask;
2983                        else
2984                                ehi->err_mask |= err_mask;
2985
2986                        ata_port_freeze(ap);
2987                }
2988        }
2989        return 1;       /* handled */
2990}
2991
2992/**
2993 *      mv_interrupt - Main interrupt event handler
2994 *      @irq: unused
2995 *      @dev_instance: private data; in this case the host structure
2996 *
2997 *      Read the read only register to determine if any host
2998 *      controllers have pending interrupts.  If so, call lower level
2999 *      routine to handle.  Also check for PCI errors which are only
3000 *      reported here.
3001 *
3002 *      LOCKING:
3003 *      This routine holds the host lock while processing pending
3004 *      interrupts.
3005 */
3006static irqreturn_t mv_interrupt(int irq, void *dev_instance)
3007{
3008        struct ata_host *host = dev_instance;
3009        struct mv_host_priv *hpriv = host->private_data;
3010        unsigned int handled = 0;
3011        int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
3012        u32 main_irq_cause, pending_irqs;
3013
3014        spin_lock(&host->lock);
3015
3016        /* for MSI:  block new interrupts while in here */
3017        if (using_msi)
3018                mv_write_main_irq_mask(0, hpriv);
3019
3020        main_irq_cause = readl(hpriv->main_irq_cause_addr);
3021        pending_irqs   = main_irq_cause & hpriv->main_irq_mask;
3022        /*
3023         * Deal with cases where we either have nothing pending, or have read
3024         * a bogus register value which can indicate HW removal or PCI fault.
3025         */
3026        if (pending_irqs && main_irq_cause != 0xffffffffU) {
3027                if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
3028                        handled = mv_pci_error(host, hpriv->base);
3029                else
3030                        handled = mv_host_intr(host, pending_irqs);
3031        }
3032
3033        /* for MSI: unmask; interrupt cause bits will retrigger now */
3034        if (using_msi)
3035                mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3036
3037        spin_unlock(&host->lock);
3038
3039        return IRQ_RETVAL(handled);
3040}
3041
3042static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3043{
3044        unsigned int ofs;
3045
3046        switch (sc_reg_in) {
3047        case SCR_STATUS:
3048        case SCR_ERROR:
3049        case SCR_CONTROL:
3050                ofs = sc_reg_in * sizeof(u32);
3051                break;
3052        default:
3053                ofs = 0xffffffffU;
3054                break;
3055        }
3056        return ofs;
3057}
3058
3059static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3060{
3061        struct mv_host_priv *hpriv = link->ap->host->private_data;
3062        void __iomem *mmio = hpriv->base;
3063        void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3064        unsigned int ofs = mv5_scr_offset(sc_reg_in);
3065
3066        if (ofs != 0xffffffffU) {
3067                *val = readl(addr + ofs);
3068                return 0;
3069        } else
3070                return -EINVAL;
3071}
3072
3073static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3074{
3075        struct mv_host_priv *hpriv = link->ap->host->private_data;
3076        void __iomem *mmio = hpriv->base;
3077        void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3078        unsigned int ofs = mv5_scr_offset(sc_reg_in);
3079
3080        if (ofs != 0xffffffffU) {
3081                writelfl(val, addr + ofs);
3082                return 0;
3083        } else
3084                return -EINVAL;
3085}
3086
3087static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3088{
3089        struct pci_dev *pdev = to_pci_dev(host->dev);
3090        int early_5080;
3091
3092        early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3093
3094        if (!early_5080) {
3095                u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3096                tmp |= (1 << 0);
3097                writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3098        }
3099
3100        mv_reset_pci_bus(host, mmio);
3101}
3102
3103static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3104{
3105        writel(0x0fcfffff, mmio + FLASH_CTL);
3106}
3107
3108static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3109                           void __iomem *mmio)
3110{
3111        void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3112        u32 tmp;
3113
3114        tmp = readl(phy_mmio + MV5_PHY_MODE);
3115
3116        hpriv->signal[idx].pre = tmp & 0x1800;  /* bits 12:11 */
3117        hpriv->signal[idx].amps = tmp & 0xe0;   /* bits 7:5 */
3118}
3119
3120static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3121{
3122        u32 tmp;
3123
3124        writel(0, mmio + GPIO_PORT_CTL);
3125
3126        /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3127
3128        tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3129        tmp |= ~(1 << 0);
3130        writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3131}
3132
3133static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3134                           unsigned int port)
3135{
3136        void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3137        const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3138        u32 tmp;
3139        int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3140
3141        if (fix_apm_sq) {
3142                tmp = readl(phy_mmio + MV5_LTMODE);
3143                tmp |= (1 << 19);
3144                writel(tmp, phy_mmio + MV5_LTMODE);
3145
3146                tmp = readl(phy_mmio + MV5_PHY_CTL);
3147                tmp &= ~0x3;
3148                tmp |= 0x1;
3149                writel(tmp, phy_mmio + MV5_PHY_CTL);
3150        }
3151
3152        tmp = readl(phy_mmio + MV5_PHY_MODE);
3153        tmp &= ~mask;
3154        tmp |= hpriv->signal[port].pre;
3155        tmp |= hpriv->signal[port].amps;
3156        writel(tmp, phy_mmio + MV5_PHY_MODE);
3157}
3158
3159
3160#undef ZERO
3161#define ZERO(reg) writel(0, port_mmio + (reg))
3162static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3163                             unsigned int port)
3164{
3165        void __iomem *port_mmio = mv_port_base(mmio, port);
3166
3167        mv_reset_channel(hpriv, mmio, port);
3168
3169        ZERO(0x028);    /* command */
3170        writel(0x11f, port_mmio + EDMA_CFG);
3171        ZERO(0x004);    /* timer */
3172        ZERO(0x008);    /* irq err cause */
3173        ZERO(0x00c);    /* irq err mask */
3174        ZERO(0x010);    /* rq bah */
3175        ZERO(0x014);    /* rq inp */
3176        ZERO(0x018);    /* rq outp */
3177        ZERO(0x01c);    /* respq bah */
3178        ZERO(0x024);    /* respq outp */
3179        ZERO(0x020);    /* respq inp */
3180        ZERO(0x02c);    /* test control */
3181        writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3182}
3183#undef ZERO
3184
3185#define ZERO(reg) writel(0, hc_mmio + (reg))
3186static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3187                        unsigned int hc)
3188{
3189        void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3190        u32 tmp;
3191
3192        ZERO(0x00c);
3193        ZERO(0x010);
3194        ZERO(0x014);
3195        ZERO(0x018);
3196
3197        tmp = readl(hc_mmio + 0x20);
3198        tmp &= 0x1c1c1c1c;
3199        tmp |= 0x03030303;
3200        writel(tmp, hc_mmio + 0x20);
3201}
3202#undef ZERO
3203
3204static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3205                        unsigned int n_hc)
3206{
3207        unsigned int hc, port;
3208
3209        for (hc = 0; hc < n_hc; hc++) {
3210                for (port = 0; port < MV_PORTS_PER_HC; port++)
3211                        mv5_reset_hc_port(hpriv, mmio,
3212                                          (hc * MV_PORTS_PER_HC) + port);
3213
3214                mv5_reset_one_hc(hpriv, mmio, hc);
3215        }
3216
3217        return 0;
3218}
3219
3220#undef ZERO
3221#define ZERO(reg) writel(0, mmio + (reg))
3222static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3223{
3224        struct mv_host_priv *hpriv = host->private_data;
3225        u32 tmp;
3226
3227        tmp = readl(mmio + MV_PCI_MODE);
3228        tmp &= 0xff00ffff;
3229        writel(tmp, mmio + MV_PCI_MODE);
3230
3231        ZERO(MV_PCI_DISC_TIMER);
3232        ZERO(MV_PCI_MSI_TRIGGER);
3233        writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3234        ZERO(MV_PCI_SERR_MASK);
3235        ZERO(hpriv->irq_cause_offset);
3236        ZERO(hpriv->irq_mask_offset);
3237        ZERO(MV_PCI_ERR_LOW_ADDRESS);
3238        ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3239        ZERO(MV_PCI_ERR_ATTRIBUTE);
3240        ZERO(MV_PCI_ERR_COMMAND);
3241}
3242#undef ZERO
3243
3244static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3245{
3246        u32 tmp;
3247
3248        mv5_reset_flash(hpriv, mmio);
3249
3250        tmp = readl(mmio + GPIO_PORT_CTL);
3251        tmp &= 0x3;
3252        tmp |= (1 << 5) | (1 << 6);
3253        writel(tmp, mmio + GPIO_PORT_CTL);
3254}
3255
3256/*
3257 *      mv6_reset_hc - Perform the 6xxx global soft reset
3258 *      @mmio: base address of the HBA
3259 *
3260 *      This routine only applies to 6xxx parts.
3261 *
3262 *      LOCKING:
3263 *      Inherited from caller.
3264 */
3265static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3266                        unsigned int n_hc)
3267{
3268        void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3269        int i, rc = 0;
3270        u32 t;
3271
3272        /* Following procedure defined in PCI "main command and status
3273         * register" table.
3274         */
3275        t = readl(reg);
3276        writel(t | STOP_PCI_MASTER, reg);
3277
3278        for (i = 0; i < 1000; i++) {
3279                udelay(1);
3280                t = readl(reg);
3281                if (PCI_MASTER_EMPTY & t)
3282                        break;
3283        }
3284        if (!(PCI_MASTER_EMPTY & t)) {
3285                printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3286                rc = 1;
3287                goto done;
3288        }
3289
3290        /* set reset */
3291        i = 5;
3292        do {
3293                writel(t | GLOB_SFT_RST, reg);
3294                t = readl(reg);
3295                udelay(1);
3296        } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3297
3298        if (!(GLOB_SFT_RST & t)) {
3299                printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3300                rc = 1;
3301                goto done;
3302        }
3303
3304        /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3305        i = 5;
3306        do {
3307                writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3308                t = readl(reg);
3309                udelay(1);
3310        } while ((GLOB_SFT_RST & t) && (i-- > 0));
3311
3312        if (GLOB_SFT_RST & t) {
3313                printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3314                rc = 1;
3315        }
3316done:
3317        return rc;
3318}
3319
3320static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3321                           void __iomem *mmio)
3322{
3323        void __iomem *port_mmio;
3324        u32 tmp;
3325
3326        tmp = readl(mmio + RESET_CFG);
3327        if ((tmp & (1 << 0)) == 0) {
3328                hpriv->signal[idx].amps = 0x7 << 8;
3329                hpriv->signal[idx].pre = 0x1 << 5;
3330                return;
3331        }
3332
3333        port_mmio = mv_port_base(mmio, idx);
3334        tmp = readl(port_mmio + PHY_MODE2);
3335
3336        hpriv->signal[idx].amps = tmp & 0x700;  /* bits 10:8 */
3337        hpriv->signal[idx].pre = tmp & 0xe0;    /* bits 7:5 */
3338}
3339
3340static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3341{
3342        writel(0x00000060, mmio + GPIO_PORT_CTL);
3343}
3344
3345static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3346                           unsigned int port)
3347{
3348        void __iomem *port_mmio = mv_port_base(mmio, port);
3349
3350        u32 hp_flags = hpriv->hp_flags;
3351        int fix_phy_mode2 =
3352                hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3353        int fix_phy_mode4 =
3354                hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3355        u32 m2, m3;
3356
3357        if (fix_phy_mode2) {
3358                m2 = readl(port_mmio + PHY_MODE2);
3359                m2 &= ~(1 << 16);
3360                m2 |= (1 << 31);
3361                writel(m2, port_mmio + PHY_MODE2);
3362
3363                udelay(200);
3364
3365                m2 = readl(port_mmio + PHY_MODE2);
3366                m2 &= ~((1 << 16) | (1 << 31));
3367                writel(m2, port_mmio + PHY_MODE2);
3368
3369                udelay(200);
3370        }
3371
3372        /*
3373         * Gen-II/IIe PHY_MODE3 errata RM#2:
3374         * Achieves better receiver noise performance than the h/w default:
3375         */
3376        m3 = readl(port_mmio + PHY_MODE3);
3377        m3 = (m3 & 0x1f) | (0x5555601 << 5);
3378
3379        /* Guideline 88F5182 (GL# SATA-S11) */
3380        if (IS_SOC(hpriv))
3381                m3 &= ~0x1c;
3382
3383        if (fix_phy_mode4) {
3384                u32 m4 = readl(port_mmio + PHY_MODE4);
3385                /*
3386                 * Enforce reserved-bit restrictions on GenIIe devices only.
3387                 * For earlier chipsets, force only the internal config field
3388                 *  (workaround for errata FEr SATA#10 part 1).
3389                 */
3390                if (IS_GEN_IIE(hpriv))
3391                        m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3392                else
3393                        m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3394                writel(m4, port_mmio + PHY_MODE4);
3395        }
3396        /*
3397         * Workaround for 60x1-B2 errata SATA#13:
3398         * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3399         * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3400         * Or ensure we use writelfl() when writing PHY_MODE4.
3401         */
3402        writel(m3, port_mmio + PHY_MODE3);
3403
3404        /* Revert values of pre-emphasis and signal amps to the saved ones */
3405        m2 = readl(port_mmio + PHY_MODE2);
3406
3407        m2 &= ~MV_M2_PREAMP_MASK;
3408        m2 |= hpriv->signal[port].amps;
3409        m2 |= hpriv->signal[port].pre;
3410        m2 &= ~(1 << 16);
3411
3412        /* according to mvSata 3.6.1, some IIE values are fixed */
3413        if (IS_GEN_IIE(hpriv)) {
3414                m2 &= ~0xC30FF01F;
3415                m2 |= 0x0000900F;
3416        }
3417
3418        writel(m2, port_mmio + PHY_MODE2);
3419}
3420
3421/* TODO: use the generic LED interface to configure the SATA Presence */
3422/* & Acitivy LEDs on the board */
3423static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3424                                      void __iomem *mmio)
3425{
3426        return;
3427}
3428
3429static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3430                           void __iomem *mmio)
3431{
3432        void __iomem *port_mmio;
3433        u32 tmp;
3434
3435        port_mmio = mv_port_base(mmio, idx);
3436        tmp = readl(port_mmio + PHY_MODE2);
3437
3438        hpriv->signal[idx].amps = tmp & 0x700;  /* bits 10:8 */
3439        hpriv->signal[idx].pre = tmp & 0xe0;    /* bits 7:5 */
3440}
3441
3442#undef ZERO
3443#define ZERO(reg) writel(0, port_mmio + (reg))
3444static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3445                                        void __iomem *mmio, unsigned int port)
3446{
3447        void __iomem *port_mmio = mv_port_base(mmio, port);
3448
3449        mv_reset_channel(hpriv, mmio, port);
3450
3451        ZERO(0x028);            /* command */
3452        writel(0x101f, port_mmio + EDMA_CFG);
3453        ZERO(0x004);            /* timer */
3454        ZERO(0x008);            /* irq err cause */
3455        ZERO(0x00c);            /* irq err mask */
3456        ZERO(0x010);            /* rq bah */
3457        ZERO(0x014);            /* rq inp */
3458        ZERO(0x018);            /* rq outp */
3459        ZERO(0x01c);            /* respq bah */
3460        ZERO(0x024);            /* respq outp */
3461        ZERO(0x020);            /* respq inp */
3462        ZERO(0x02c);            /* test control */
3463        writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3464}
3465
3466#undef ZERO
3467
3468#define ZERO(reg) writel(0, hc_mmio + (reg))
3469static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3470                                       void __iomem *mmio)
3471{
3472        void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3473
3474        ZERO(0x00c);
3475        ZERO(0x010);
3476        ZERO(0x014);
3477
3478}
3479
3480#undef ZERO
3481
3482static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3483                                  void __iomem *mmio, unsigned int n_hc)
3484{
3485        unsigned int port;
3486
3487        for (port = 0; port < hpriv->n_ports; port++)
3488                mv_soc_reset_hc_port(hpriv, mmio, port);
3489
3490        mv_soc_reset_one_hc(hpriv, mmio);
3491
3492        return 0;
3493}
3494
3495static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3496                                      void __iomem *mmio)
3497{
3498        return;
3499}
3500
3501static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3502{
3503        return;
3504}
3505
3506static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3507                                  void __iomem *mmio, unsigned int port)
3508{
3509        void __iomem *port_mmio = mv_port_base(mmio, port);
3510        u32     reg;
3511
3512        reg = readl(port_mmio + PHY_MODE3);
3513        reg &= ~(0x3 << 27);    /* SELMUPF (bits 28:27) to 1 */
3514        reg |= (0x1 << 27);
3515        reg &= ~(0x3 << 29);    /* SELMUPI (bits 30:29) to 1 */
3516        reg |= (0x1 << 29);
3517        writel(reg, port_mmio + PHY_MODE3);
3518
3519        reg = readl(port_mmio + PHY_MODE4);
3520        reg &= ~0x1;    /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3521        reg |= (0x1 << 16);
3522        writel(reg, port_mmio + PHY_MODE4);
3523
3524        reg = readl(port_mmio + PHY_MODE9_GEN2);
3525        reg &= ~0xf;    /* TXAMP[3:0] (bits 3:0) to 8 */
3526        reg |= 0x8;
3527        reg &= ~(0x1 << 14);    /* TXAMP[4] (bit 14) to 0 */
3528        writel(reg, port_mmio + PHY_MODE9_GEN2);
3529
3530        reg = readl(port_mmio + PHY_MODE9_GEN1);
3531        reg &= ~0xf;    /* TXAMP[3:0] (bits 3:0) to 8 */
3532        reg |= 0x8;
3533        reg &= ~(0x1 << 14);    /* TXAMP[4] (bit 14) to 0 */
3534        writel(reg, port_mmio + PHY_MODE9_GEN1);
3535}
3536
3537/*
3538 *      soc_is_65 - check if the soc is 65 nano device
3539 *
3540 *      Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3541 *      register, this register should contain non-zero value and it exists only
3542 *      in the 65 nano devices, when reading it from older devices we get 0.
3543 */
3544static bool soc_is_65n(struct mv_host_priv *hpriv)
3545{
3546        void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3547
3548        if (readl(port0_mmio + PHYCFG_OFS))
3549                return true;
3550        return false;
3551}
3552
3553static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3554{
3555        u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3556
3557        ifcfg = (ifcfg & 0xf7f) | 0x9b1000;     /* from chip spec */
3558        if (want_gen2i)
3559                ifcfg |= (1 << 7);              /* enable gen2i speed */
3560        writelfl(ifcfg, port_mmio + SATA_IFCFG);
3561}
3562
3563static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3564                             unsigned int port_no)
3565{
3566        void __iomem *port_mmio = mv_port_base(mmio, port_no);
3567
3568        /*
3569         * The datasheet warns against setting EDMA_RESET when EDMA is active
3570         * (but doesn't say what the problem might be).  So we first try
3571         * to disable the EDMA engine before doing the EDMA_RESET operation.
3572         */
3573        mv_stop_edma_engine(port_mmio);
3574        writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3575
3576        if (!IS_GEN_I(hpriv)) {
3577                /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3578                mv_setup_ifcfg(port_mmio, 1);
3579        }
3580        /*
3581         * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3582         * link, and physical layers.  It resets all SATA interface registers
3583         * (except for SATA_IFCFG), and issues a COMRESET to the dev.
3584         */
3585        writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3586        udelay(25);     /* allow reset propagation */
3587        writelfl(0, port_mmio + EDMA_CMD);
3588
3589        hpriv->ops->phy_errata(hpriv, mmio, port_no);
3590
3591        if (IS_GEN_I(hpriv))
3592                usleep_range(500, 1000);
3593}
3594
3595static void mv_pmp_select(struct ata_port *ap, int pmp)
3596{
3597        if (sata_pmp_supported(ap)) {
3598                void __iomem *port_mmio = mv_ap_base(ap);
3599                u32 reg = readl(port_mmio + SATA_IFCTL);
3600                int old = reg & 0xf;
3601
3602                if (old != pmp) {
3603                        reg = (reg & ~0xf) | pmp;
3604                        writelfl(reg, port_mmio + SATA_IFCTL);
3605                }
3606        }
3607}
3608
3609static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3610                                unsigned long deadline)
3611{
3612        mv_pmp_select(link->ap, sata_srst_pmp(link));
3613        return sata_std_hardreset(link, class, deadline);
3614}
3615
3616static int mv_softreset(struct ata_link *link, unsigned int *class,
3617                                unsigned long deadline)
3618{
3619        mv_pmp_select(link->ap, sata_srst_pmp(link));
3620        return ata_sff_softreset(link, class, deadline);
3621}
3622
3623static int mv_hardreset(struct ata_link *link, unsigned int *class,
3624                        unsigned long deadline)
3625{
3626        struct ata_port *ap = link->ap;
3627        struct mv_host_priv *hpriv = ap->host->private_data;
3628        struct mv_port_priv *pp = ap->private_data;
3629        void __iomem *mmio = hpriv->base;
3630        int rc, attempts = 0, extra = 0;
3631        u32 sstatus;
3632        bool online;
3633
3634        mv_reset_channel(hpriv, mmio, ap->port_no);
3635        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3636        pp->pp_flags &=
3637          ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3638
3639        /* Workaround for errata FEr SATA#10 (part 2) */
3640        do {
3641                const unsigned long *timing =
3642                                sata_ehc_deb_timing(&link->eh_context);
3643
3644                rc = sata_link_hardreset(link, timing, deadline + extra,
3645                                         &online, NULL);
3646                rc = online ? -EAGAIN : rc;
3647                if (rc)
3648                        return rc;
3649                sata_scr_read(link, SCR_STATUS, &sstatus);
3650                if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3651                        /* Force 1.5gb/s link speed and try again */
3652                        mv_setup_ifcfg(mv_ap_base(ap), 0);
3653                        if (time_after(jiffies + HZ, deadline))
3654                                extra = HZ; /* only extend it once, max */
3655                }
3656        } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3657        mv_save_cached_regs(ap);
3658        mv_edma_cfg(ap, 0, 0);
3659
3660        return rc;
3661}
3662
3663static void mv_eh_freeze(struct ata_port *ap)
3664{
3665        mv_stop_edma(ap);
3666        mv_enable_port_irqs(ap, 0);
3667}
3668
3669static void mv_eh_thaw(struct ata_port *ap)
3670{
3671        struct mv_host_priv *hpriv = ap->host->private_data;
3672        unsigned int port = ap->port_no;
3673        unsigned int hardport = mv_hardport_from_port(port);
3674        void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3675        void __iomem *port_mmio = mv_ap_base(ap);
3676        u32 hc_irq_cause;
3677
3678        /* clear EDMA errors on this port */
3679        writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3680
3681        /* clear pending irq events */
3682        hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3683        writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3684
3685        mv_enable_port_irqs(ap, ERR_IRQ);
3686}
3687
3688/**
3689 *      mv_port_init - Perform some early initialization on a single port.
3690 *      @port: libata data structure storing shadow register addresses
3691 *      @port_mmio: base address of the port
3692 *
3693 *      Initialize shadow register mmio addresses, clear outstanding
3694 *      interrupts on the port, and unmask interrupts for the future
3695 *      start of the port.
3696 *
3697 *      LOCKING:
3698 *      Inherited from caller.
3699 */
3700static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
3701{
3702        void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3703
3704        /* PIO related setup
3705         */
3706        port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3707        port->error_addr =
3708                port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3709        port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3710        port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3711        port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3712        port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3713        port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3714        port->status_addr =
3715                port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3716        /* special case: control/altstatus doesn't have ATA_REG_ address */
3717        port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3718
3719        /* Clear any currently outstanding port interrupt conditions */
3720        serr = port_mmio + mv_scr_offset(SCR_ERROR);
3721        writelfl(readl(serr), serr);
3722        writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3723
3724        /* unmask all non-transient EDMA error interrupts */
3725        writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3726
3727        VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3728                readl(port_mmio + EDMA_CFG),
3729                readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3730                readl(port_mmio + EDMA_ERR_IRQ_MASK));
3731}
3732
3733static unsigned int mv_in_pcix_mode(struct ata_host *host)
3734{
3735        struct mv_host_priv *hpriv = host->private_data;
3736        void __iomem *mmio = hpriv->base;
3737        u32 reg;
3738
3739        if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3740                return 0;       /* not PCI-X capable */
3741        reg = readl(mmio + MV_PCI_MODE);
3742        if ((reg & MV_PCI_MODE_MASK) == 0)
3743                return 0;       /* conventional PCI mode */
3744        return 1;       /* chip is in PCI-X mode */
3745}
3746
3747static int mv_pci_cut_through_okay(struct ata_host *host)
3748{
3749        struct mv_host_priv *hpriv = host->private_data;
3750        void __iomem *mmio = hpriv->base;
3751        u32 reg;
3752
3753        if (!mv_in_pcix_mode(host)) {
3754                reg = readl(mmio + MV_PCI_COMMAND);
3755                if (reg & MV_PCI_COMMAND_MRDTRIG)
3756                        return 0; /* not okay */
3757        }
3758        return 1; /* okay */
3759}
3760
3761static void mv_60x1b2_errata_pci7(struct ata_host *host)
3762{
3763        struct mv_host_priv *hpriv = host->private_data;
3764        void __iomem *mmio = hpriv->base;
3765
3766        /* workaround for 60x1-B2 errata PCI#7 */
3767        if (mv_in_pcix_mode(host)) {
3768                u32 reg = readl(mmio + MV_PCI_COMMAND);
3769                writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3770        }
3771}
3772
3773static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3774{
3775        struct pci_dev *pdev = to_pci_dev(host->dev);
3776        struct mv_host_priv *hpriv = host->private_data;
3777        u32 hp_flags = hpriv->hp_flags;
3778
3779        switch (board_idx) {
3780        case chip_5080:
3781                hpriv->ops = &mv5xxx_ops;
3782                hp_flags |= MV_HP_GEN_I;
3783
3784                switch (pdev->revision) {
3785                case 0x1:
3786                        hp_flags |= MV_HP_ERRATA_50XXB0;
3787                        break;
3788                case 0x3:
3789                        hp_flags |= MV_HP_ERRATA_50XXB2;
3790                        break;
3791                default:
3792                        dev_warn(&pdev->dev,
3793                                 "Applying 50XXB2 workarounds to unknown rev\n");
3794                        hp_flags |= MV_HP_ERRATA_50XXB2;
3795                        break;
3796                }
3797                break;
3798
3799        case chip_504x:
3800        case chip_508x:
3801                hpriv->ops = &mv5xxx_ops;
3802                hp_flags |= MV_HP_GEN_I;
3803
3804                switch (pdev->revision) {
3805                case 0x0:
3806                        hp_flags |= MV_HP_ERRATA_50XXB0;
3807                        break;
3808                case 0x3:
3809                        hp_flags |= MV_HP_ERRATA_50XXB2;
3810                        break;
3811                default:
3812                        dev_warn(&pdev->dev,
3813                                 "Applying B2 workarounds to unknown rev\n");
3814                        hp_flags |= MV_HP_ERRATA_50XXB2;
3815                        break;
3816                }
3817                break;
3818
3819        case chip_604x:
3820        case chip_608x:
3821                hpriv->ops = &mv6xxx_ops;
3822                hp_flags |= MV_HP_GEN_II;
3823
3824                switch (pdev->revision) {
3825                case 0x7:
3826                        mv_60x1b2_errata_pci7(host);
3827                        hp_flags |= MV_HP_ERRATA_60X1B2;
3828                        break;
3829                case 0x9:
3830                        hp_flags |= MV_HP_ERRATA_60X1C0;
3831                        break;
3832                default:
3833                        dev_warn(&pdev->dev,
3834                                 "Applying B2 workarounds to unknown rev\n");
3835                        hp_flags |= MV_HP_ERRATA_60X1B2;
3836                        break;
3837                }
3838                break;
3839
3840        case chip_7042:
3841                hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3842                if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3843                    (pdev->device == 0x2300 || pdev->device == 0x2310))
3844                {
3845                        /*
3846                         * Highpoint RocketRAID PCIe 23xx series cards:
3847                         *
3848                         * Unconfigured drives are treated as "Legacy"
3849                         * by the BIOS, and it overwrites sector 8 with
3850                         * a "Lgcy" metadata block prior to Linux boot.
3851                         *
3852                         * Configured drives (RAID or JBOD) leave sector 8
3853                         * alone, but instead overwrite a high numbered
3854                         * sector for the RAID metadata.  This sector can
3855                         * be determined exactly, by truncating the physical
3856                         * drive capacity to a nice even GB value.
3857                         *
3858                         * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3859                         *
3860                         * Warn the user, lest they think we're just buggy.
3861                         */
3862                        printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3863                                " BIOS CORRUPTS DATA on all attached drives,"
3864                                " regardless of if/how they are configured."
3865                                " BEWARE!\n");
3866                        printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3867                                " use sectors 8-9 on \"Legacy\" drives,"
3868                                " and avoid the final two gigabytes on"
3869                                " all RocketRAID BIOS initialized drives.\n");
3870                }
3871                fallthrough;
3872        case chip_6042:
3873                hpriv->ops = &mv6xxx_ops;
3874                hp_flags |= MV_HP_GEN_IIE;
3875                if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3876                        hp_flags |= MV_HP_CUT_THROUGH;
3877
3878                switch (pdev->revision) {
3879                case 0x2: /* Rev.B0: the first/only public release */
3880                        hp_flags |= MV_HP_ERRATA_60X1C0;
3881                        break;
3882                default:
3883                        dev_warn(&pdev->dev,
3884                                 "Applying 60X1C0 workarounds to unknown rev\n");
3885                        hp_flags |= MV_HP_ERRATA_60X1C0;
3886                        break;
3887                }
3888                break;
3889        case chip_soc:
3890                if (soc_is_65n(hpriv))
3891                        hpriv->ops = &mv_soc_65n_ops;
3892                else
3893                        hpriv->ops = &mv_soc_ops;
3894                hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3895                        MV_HP_ERRATA_60X1C0;
3896                break;
3897
3898        default:
3899                dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
3900                return -EINVAL;
3901        }
3902
3903        hpriv->hp_flags = hp_flags;
3904        if (hp_flags & MV_HP_PCIE) {
3905                hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3906                hpriv->irq_mask_offset  = PCIE_IRQ_MASK;
3907                hpriv->unmask_all_irqs  = PCIE_UNMASK_ALL_IRQS;
3908        } else {
3909                hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3910                hpriv->irq_mask_offset  = PCI_IRQ_MASK;
3911                hpriv->unmask_all_irqs  = PCI_UNMASK_ALL_IRQS;
3912        }
3913
3914        return 0;
3915}
3916
3917/**
3918 *      mv_init_host - Perform some early initialization of the host.
3919 *      @host: ATA host to initialize
3920 *
3921 *      If possible, do an early global reset of the host.  Then do
3922 *      our port init and clear/unmask all/relevant host interrupts.
3923 *
3924 *      LOCKING:
3925 *      Inherited from caller.
3926 */
3927static int mv_init_host(struct ata_host *host)
3928{
3929        int rc = 0, n_hc, port, hc;
3930        struct mv_host_priv *hpriv = host->private_data;
3931        void __iomem *mmio = hpriv->base;
3932
3933        rc = mv_chip_id(host, hpriv->board_idx);
3934        if (rc)
3935                goto done;
3936
3937        if (IS_SOC(hpriv)) {
3938                hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3939                hpriv->main_irq_mask_addr  = mmio + SOC_HC_MAIN_IRQ_MASK;
3940        } else {
3941                hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3942                hpriv->main_irq_mask_addr  = mmio + PCI_HC_MAIN_IRQ_MASK;
3943        }
3944
3945        /* initialize shadow irq mask with register's value */
3946        hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3947
3948        /* global interrupt mask: 0 == mask everything */
3949        mv_set_main_irq_mask(host, ~0, 0);
3950
3951        n_hc = mv_get_hc_count(host->ports[0]->flags);
3952
3953        for (port = 0; port < host->n_ports; port++)
3954                if (hpriv->ops->read_preamp)
3955                        hpriv->ops->read_preamp(hpriv, port, mmio);
3956
3957        rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3958        if (rc)
3959                goto done;
3960
3961        hpriv->ops->reset_flash(hpriv, mmio);
3962        hpriv->ops->reset_bus(host, mmio);
3963        hpriv->ops->enable_leds(hpriv, mmio);
3964
3965        for (port = 0; port < host->n_ports; port++) {
3966                struct ata_port *ap = host->ports[port];
3967                void __iomem *port_mmio = mv_port_base(mmio, port);
3968
3969                mv_port_init(&ap->ioaddr, port_mmio);
3970        }
3971
3972        for (hc = 0; hc < n_hc; hc++) {
3973                void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3974
3975                VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3976                        "(before clear)=0x%08x\n", hc,
3977                        readl(hc_mmio + HC_CFG),
3978                        readl(hc_mmio + HC_IRQ_CAUSE));
3979
3980                /* Clear any currently outstanding hc interrupt conditions */
3981                writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3982        }
3983
3984        if (!IS_SOC(hpriv)) {
3985                /* Clear any currently outstanding host interrupt conditions */
3986                writelfl(0, mmio + hpriv->irq_cause_offset);
3987
3988                /* and unmask interrupt generation for host regs */
3989                writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3990        }
3991
3992        /*
3993         * enable only global host interrupts for now.
3994         * The per-port interrupts get done later as ports are set up.
3995         */
3996        mv_set_main_irq_mask(host, 0, PCI_ERR);
3997        mv_set_irq_coalescing(host, irq_coalescing_io_count,
3998                                    irq_coalescing_usecs);
3999done:
4000        return rc;
4001}
4002
4003static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
4004{
4005        hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
4006                                                             MV_CRQB_Q_SZ, 0);
4007        if (!hpriv->crqb_pool)
4008                return -ENOMEM;
4009
4010        hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
4011                                                             MV_CRPB_Q_SZ, 0);
4012        if (!hpriv->crpb_pool)
4013                return -ENOMEM;
4014
4015        hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
4016                                                             MV_SG_TBL_SZ, 0);
4017        if (!hpriv->sg_tbl_pool)
4018                return -ENOMEM;
4019
4020        return 0;
4021}
4022
4023static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
4024                                 const struct mbus_dram_target_info *dram)
4025{
4026        int i;
4027
4028        for (i = 0; i < 4; i++) {
4029                writel(0, hpriv->base + WINDOW_CTRL(i));
4030                writel(0, hpriv->base + WINDOW_BASE(i));
4031        }
4032
4033        for (i = 0; i < dram->num_cs; i++) {
4034                const struct mbus_dram_window *cs = dram->cs + i;
4035
4036                writel(((cs->size - 1) & 0xffff0000) |
4037                        (cs->mbus_attr << 8) |
4038                        (dram->mbus_dram_target_id << 4) | 1,
4039                        hpriv->base + WINDOW_CTRL(i));
4040                writel(cs->base, hpriv->base + WINDOW_BASE(i));
4041        }
4042}
4043
4044/**
4045 *      mv_platform_probe - handle a positive probe of an soc Marvell
4046 *      host
4047 *      @pdev: platform device found
4048 *
4049 *      LOCKING:
4050 *      Inherited from caller.
4051 */
4052static int mv_platform_probe(struct platform_device *pdev)
4053{
4054        const struct mv_sata_platform_data *mv_platform_data;
4055        const struct mbus_dram_target_info *dram;
4056        const struct ata_port_info *ppi[] =
4057            { &mv_port_info[chip_soc], NULL };
4058        struct ata_host *host;
4059        struct mv_host_priv *hpriv;
4060        struct resource *res;
4061        int n_ports = 0, irq = 0;
4062        int rc;
4063        int port;
4064
4065        ata_print_version_once(&pdev->dev, DRV_VERSION);
4066
4067        /*
4068         * Simple resource validation ..
4069         */
4070        if (unlikely(pdev->num_resources != 2)) {
4071                dev_err(&pdev->dev, "invalid number of resources\n");
4072                return -EINVAL;
4073        }
4074
4075        /*
4076         * Get the register base first
4077         */
4078        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4079        if (res == NULL)
4080                return -EINVAL;
4081
4082        /* allocate host */
4083        if (pdev->dev.of_node) {
4084                rc = of_property_read_u32(pdev->dev.of_node, "nr-ports",
4085                                           &n_ports);
4086                if (rc) {
4087                        dev_err(&pdev->dev,
4088                                "error parsing nr-ports property: %d\n", rc);
4089                        return rc;
4090                }
4091
4092                if (n_ports <= 0) {
4093                        dev_err(&pdev->dev, "nr-ports must be positive: %d\n",
4094                                n_ports);
4095                        return -EINVAL;
4096                }
4097
4098                irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
4099        } else {
4100                mv_platform_data = dev_get_platdata(&pdev->dev);
4101                n_ports = mv_platform_data->n_ports;
4102                irq = platform_get_irq(pdev, 0);
4103        }
4104        if (irq < 0)
4105                return irq;
4106        if (!irq)
4107                return -EINVAL;
4108
4109        host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4110        hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4111
4112        if (!host || !hpriv)
4113                return -ENOMEM;
4114        hpriv->port_clks = devm_kcalloc(&pdev->dev,
4115                                        n_ports, sizeof(struct clk *),
4116                                        GFP_KERNEL);
4117        if (!hpriv->port_clks)
4118                return -ENOMEM;
4119        hpriv->port_phys = devm_kcalloc(&pdev->dev,
4120                                        n_ports, sizeof(struct phy *),
4121                                        GFP_KERNEL);
4122        if (!hpriv->port_phys)
4123                return -ENOMEM;
4124        host->private_data = hpriv;
4125        hpriv->board_idx = chip_soc;
4126
4127        host->iomap = NULL;
4128        hpriv->base = devm_ioremap(&pdev->dev, res->start,
4129                                   resource_size(res));
4130        if (!hpriv->base)
4131                return -ENOMEM;
4132
4133        hpriv->base -= SATAHC0_REG_BASE;
4134
4135        hpriv->clk = clk_get(&pdev->dev, NULL);
4136        if (IS_ERR(hpriv->clk))
4137                dev_notice(&pdev->dev, "cannot get optional clkdev\n");
4138        else
4139                clk_prepare_enable(hpriv->clk);
4140
4141        for (port = 0; port < n_ports; port++) {
4142                char port_number[16];
4143                sprintf(port_number, "%d", port);
4144                hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
4145                if (!IS_ERR(hpriv->port_clks[port]))
4146                        clk_prepare_enable(hpriv->port_clks[port]);
4147
4148                sprintf(port_number, "port%d", port);
4149                hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
4150                                                               port_number);
4151                if (IS_ERR(hpriv->port_phys[port])) {
4152                        rc = PTR_ERR(hpriv->port_phys[port]);
4153                        hpriv->port_phys[port] = NULL;
4154                        if (rc != -EPROBE_DEFER)
4155                                dev_warn(&pdev->dev, "error getting phy %d", rc);
4156
4157                        /* Cleanup only the initialized ports */
4158                        hpriv->n_ports = port;
4159                        goto err;
4160                } else
4161                        phy_power_on(hpriv->port_phys[port]);
4162        }
4163
4164        /* All the ports have been initialized */
4165        hpriv->n_ports = n_ports;
4166
4167        /*
4168         * (Re-)program MBUS remapping windows if we are asked to.
4169         */
4170        dram = mv_mbus_dram_info();
4171        if (dram)
4172                mv_conf_mbus_windows(hpriv, dram);
4173
4174        rc = mv_create_dma_pools(hpriv, &pdev->dev);
4175        if (rc)
4176                goto err;
4177
4178        /*
4179         * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
4180         * updated in the LP_PHY_CTL register.
4181         */
4182        if (pdev->dev.of_node &&
4183                of_device_is_compatible(pdev->dev.of_node,
4184                                        "marvell,armada-370-sata"))
4185                hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
4186
4187        /* initialize adapter */
4188        rc = mv_init_host(host);
4189        if (rc)
4190                goto err;
4191
4192        dev_info(&pdev->dev, "slots %u ports %d\n",
4193                 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
4194
4195        rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
4196        if (!rc)
4197                return 0;
4198
4199err:
4200        if (!IS_ERR(hpriv->clk)) {
4201                clk_disable_unprepare(hpriv->clk);
4202                clk_put(hpriv->clk);
4203        }
4204        for (port = 0; port < hpriv->n_ports; port++) {
4205                if (!IS_ERR(hpriv->port_clks[port])) {
4206                        clk_disable_unprepare(hpriv->port_clks[port]);
4207                        clk_put(hpriv->port_clks[port]);
4208                }
4209                phy_power_off(hpriv->port_phys[port]);
4210        }
4211
4212        return rc;
4213}
4214
4215/*
4216 *
4217 *      mv_platform_remove    -       unplug a platform interface
4218 *      @pdev: platform device
4219 *
4220 *      A platform bus SATA device has been unplugged. Perform the needed
4221 *      cleanup. Also called on module unload for any active devices.
4222 */
4223static int mv_platform_remove(struct platform_device *pdev)
4224{
4225        struct ata_host *host = platform_get_drvdata(pdev);
4226        struct mv_host_priv *hpriv = host->private_data;
4227        int port;
4228        ata_host_detach(host);
4229
4230        if (!IS_ERR(hpriv->clk)) {
4231                clk_disable_unprepare(hpriv->clk);
4232                clk_put(hpriv->clk);
4233        }
4234        for (port = 0; port < host->n_ports; port++) {
4235                if (!IS_ERR(hpriv->port_clks[port])) {
4236                        clk_disable_unprepare(hpriv->port_clks[port]);
4237                        clk_put(hpriv->port_clks[port]);
4238                }
4239                phy_power_off(hpriv->port_phys[port]);
4240        }
4241        return 0;
4242}
4243
4244#ifdef CONFIG_PM_SLEEP
4245static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4246{
4247        struct ata_host *host = platform_get_drvdata(pdev);
4248        if (host)
4249                return ata_host_suspend(host, state);
4250        else
4251                return 0;
4252}
4253
4254static int mv_platform_resume(struct platform_device *pdev)
4255{
4256        struct ata_host *host = platform_get_drvdata(pdev);
4257        const struct mbus_dram_target_info *dram;
4258        int ret;
4259
4260        if (host) {
4261                struct mv_host_priv *hpriv = host->private_data;
4262
4263                /*
4264                 * (Re-)program MBUS remapping windows if we are asked to.
4265                 */
4266                dram = mv_mbus_dram_info();
4267                if (dram)
4268                        mv_conf_mbus_windows(hpriv, dram);
4269
4270                /* initialize adapter */
4271                ret = mv_init_host(host);
4272                if (ret) {
4273                        printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4274                        return ret;
4275                }
4276                ata_host_resume(host);
4277        }
4278
4279        return 0;
4280}
4281#else
4282#define mv_platform_suspend NULL
4283#define mv_platform_resume NULL
4284#endif
4285
4286#ifdef CONFIG_OF
4287static const struct of_device_id mv_sata_dt_ids[] = {
4288        { .compatible = "marvell,armada-370-sata", },
4289        { .compatible = "marvell,orion-sata", },
4290        {},
4291};
4292MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
4293#endif
4294
4295static struct platform_driver mv_platform_driver = {
4296        .probe          = mv_platform_probe,
4297        .remove         = mv_platform_remove,
4298        .suspend        = mv_platform_suspend,
4299        .resume         = mv_platform_resume,
4300        .driver         = {
4301                .name = DRV_NAME,
4302                .of_match_table = of_match_ptr(mv_sata_dt_ids),
4303        },
4304};
4305
4306
4307#ifdef CONFIG_PCI
4308static int mv_pci_init_one(struct pci_dev *pdev,
4309                           const struct pci_device_id *ent);
4310#ifdef CONFIG_PM_SLEEP
4311static int mv_pci_device_resume(struct pci_dev *pdev);
4312#endif
4313
4314
4315static struct pci_driver mv_pci_driver = {
4316        .name                   = DRV_NAME,
4317        .id_table               = mv_pci_tbl,
4318        .probe                  = mv_pci_init_one,
4319        .remove                 = ata_pci_remove_one,
4320#ifdef CONFIG_PM_SLEEP
4321        .suspend                = ata_pci_device_suspend,
4322        .resume                 = mv_pci_device_resume,
4323#endif
4324
4325};
4326
4327/**
4328 *      mv_print_info - Dump key info to kernel log for perusal.
4329 *      @host: ATA host to print info about
4330 *
4331 *      FIXME: complete this.
4332 *
4333 *      LOCKING:
4334 *      Inherited from caller.
4335 */
4336static void mv_print_info(struct ata_host *host)
4337{
4338        struct pci_dev *pdev = to_pci_dev(host->dev);
4339        struct mv_host_priv *hpriv = host->private_data;
4340        u8 scc;
4341        const char *scc_s, *gen;
4342
4343        /* Use this to determine the HW stepping of the chip so we know
4344         * what errata to workaround
4345         */
4346        pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4347        if (scc == 0)
4348                scc_s = "SCSI";
4349        else if (scc == 0x01)
4350                scc_s = "RAID";
4351        else
4352                scc_s = "?";
4353
4354        if (IS_GEN_I(hpriv))
4355                gen = "I";
4356        else if (IS_GEN_II(hpriv))
4357                gen = "II";
4358        else if (IS_GEN_IIE(hpriv))
4359                gen = "IIE";
4360        else
4361                gen = "?";
4362
4363        dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4364                 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4365                 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4366}
4367
4368/**
4369 *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
4370 *      @pdev: PCI device found
4371 *      @ent: PCI device ID entry for the matched host
4372 *
4373 *      LOCKING:
4374 *      Inherited from caller.
4375 */
4376static int mv_pci_init_one(struct pci_dev *pdev,
4377                           const struct pci_device_id *ent)
4378{
4379        unsigned int board_idx = (unsigned int)ent->driver_data;
4380        const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4381        struct ata_host *host;
4382        struct mv_host_priv *hpriv;
4383        int n_ports, port, rc;
4384
4385        ata_print_version_once(&pdev->dev, DRV_VERSION);
4386
4387        /* allocate host */
4388        n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4389
4390        host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4391        hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4392        if (!host || !hpriv)
4393                return -ENOMEM;
4394        host->private_data = hpriv;
4395        hpriv->n_ports = n_ports;
4396        hpriv->board_idx = board_idx;
4397
4398        /* acquire resources */
4399        rc = pcim_enable_device(pdev);
4400        if (rc)
4401                return rc;
4402
4403        rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4404        if (rc == -EBUSY)
4405                pcim_pin_device(pdev);
4406        if (rc)
4407                return rc;
4408        host->iomap = pcim_iomap_table(pdev);
4409        hpriv->base = host->iomap[MV_PRIMARY_BAR];
4410
4411        rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4412        if (rc) {
4413                dev_err(&pdev->dev, "DMA enable failed\n");
4414                return rc;
4415        }
4416
4417        rc = mv_create_dma_pools(hpriv, &pdev->dev);
4418        if (rc)
4419                return rc;
4420
4421        for (port = 0; port < host->n_ports; port++) {
4422                struct ata_port *ap = host->ports[port];
4423                void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4424                unsigned int offset = port_mmio - hpriv->base;
4425
4426                ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4427                ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4428        }
4429
4430        /* initialize adapter */
4431        rc = mv_init_host(host);
4432        if (rc)
4433                return rc;
4434
4435        /* Enable message-switched interrupts, if requested */
4436        if (msi && pci_enable_msi(pdev) == 0)
4437                hpriv->hp_flags |= MV_HP_FLAG_MSI;
4438
4439        mv_dump_pci_cfg(pdev, 0x68);
4440        mv_print_info(host);
4441
4442        pci_set_master(pdev);
4443        pci_try_set_mwi(pdev);
4444        return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4445                                 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4446}
4447
4448#ifdef CONFIG_PM_SLEEP
4449static int mv_pci_device_resume(struct pci_dev *pdev)
4450{
4451        struct ata_host *host = pci_get_drvdata(pdev);
4452        int rc;
4453
4454        rc = ata_pci_device_do_resume(pdev);
4455        if (rc)
4456                return rc;
4457
4458        /* initialize adapter */
4459        rc = mv_init_host(host);
4460        if (rc)
4461                return rc;
4462
4463        ata_host_resume(host);
4464
4465        return 0;
4466}
4467#endif
4468#endif
4469
4470static int __init mv_init(void)
4471{
4472        int rc = -ENODEV;
4473#ifdef CONFIG_PCI
4474        rc = pci_register_driver(&mv_pci_driver);
4475        if (rc < 0)
4476                return rc;
4477#endif
4478        rc = platform_driver_register(&mv_platform_driver);
4479
4480#ifdef CONFIG_PCI
4481        if (rc < 0)
4482                pci_unregister_driver(&mv_pci_driver);
4483#endif
4484        return rc;
4485}
4486
4487static void __exit mv_exit(void)
4488{
4489#ifdef CONFIG_PCI
4490        pci_unregister_driver(&mv_pci_driver);
4491#endif
4492        platform_driver_unregister(&mv_platform_driver);
4493}
4494
4495MODULE_AUTHOR("Brett Russ");
4496MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4497MODULE_LICENSE("GPL v2");
4498MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4499MODULE_VERSION(DRV_VERSION);
4500MODULE_ALIAS("platform:" DRV_NAME);
4501
4502module_init(mv_init);
4503module_exit(mv_exit);
4504