linux/drivers/ata/sata_mv.c
<<
>>
Prefs
   1/*
   2 * sata_mv.c - Marvell SATA support
   3 *
   4 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
   5 * Copyright 2005: EMC Corporation, all rights reserved.
   6 * Copyright 2005 Red Hat, Inc.  All rights reserved.
   7 *
   8 * Originally written by Brett Russ.
   9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
  10 *
  11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License as published by
  15 * the Free Software Foundation; version 2 of the License.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 * You should have received a copy of the GNU General Public License
  23 * along with this program; if not, write to the Free Software
  24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  25 *
  26 */
  27
  28/*
  29 * sata_mv TODO list:
  30 *
  31 * --> Develop a low-power-consumption strategy, and implement it.
  32 *
  33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
  34 *
  35 * --> [Experiment, Marvell value added] Is it possible to use target
  36 *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
  37 *       creating LibATA target mode support would be very interesting.
  38 *
  39 *       Target mode, for those without docs, is the ability to directly
  40 *       connect two SATA ports.
  41 */
  42
  43/*
  44 * 80x1-B2 errata PCI#11:
  45 *
  46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
  47 * should be careful to insert those cards only onto PCI-X bus #0,
  48 * and only in device slots 0..7, not higher.  The chips may not
  49 * work correctly otherwise  (note: this is a pretty rare condition).
  50 */
  51
  52#include <linux/kernel.h>
  53#include <linux/module.h>
  54#include <linux/pci.h>
  55#include <linux/init.h>
  56#include <linux/blkdev.h>
  57#include <linux/delay.h>
  58#include <linux/interrupt.h>
  59#include <linux/dmapool.h>
  60#include <linux/dma-mapping.h>
  61#include <linux/device.h>
  62#include <linux/clk.h>
  63#include <linux/platform_device.h>
  64#include <linux/ata_platform.h>
  65#include <linux/mbus.h>
  66#include <linux/bitops.h>
  67#include <linux/gfp.h>
  68#include <linux/of.h>
  69#include <linux/of_irq.h>
  70#include <scsi/scsi_host.h>
  71#include <scsi/scsi_cmnd.h>
  72#include <scsi/scsi_device.h>
  73#include <linux/libata.h>
  74
  75#define DRV_NAME        "sata_mv"
  76#define DRV_VERSION     "1.28"
  77
  78/*
  79 * module options
  80 */
  81
  82#ifdef CONFIG_PCI
  83static int msi;
  84module_param(msi, int, S_IRUGO);
  85MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  86#endif
  87
  88static int irq_coalescing_io_count;
  89module_param(irq_coalescing_io_count, int, S_IRUGO);
  90MODULE_PARM_DESC(irq_coalescing_io_count,
  91                 "IRQ coalescing I/O count threshold (0..255)");
  92
  93static int irq_coalescing_usecs;
  94module_param(irq_coalescing_usecs, int, S_IRUGO);
  95MODULE_PARM_DESC(irq_coalescing_usecs,
  96                 "IRQ coalescing time threshold in usecs");
  97
  98enum {
  99        /* BAR's are enumerated in terms of pci_resource_start() terms */
 100        MV_PRIMARY_BAR          = 0,    /* offset 0x10: memory space */
 101        MV_IO_BAR               = 2,    /* offset 0x18: IO space */
 102        MV_MISC_BAR             = 3,    /* offset 0x1c: FLASH, NVRAM, SRAM */
 103
 104        MV_MAJOR_REG_AREA_SZ    = 0x10000,      /* 64KB */
 105        MV_MINOR_REG_AREA_SZ    = 0x2000,       /* 8KB */
 106
 107        /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
 108        COAL_CLOCKS_PER_USEC    = 150,          /* for calculating COAL_TIMEs */
 109        MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
 110        MAX_COAL_IO_COUNT       = 255,          /* completed I/O count */
 111
 112        MV_PCI_REG_BASE         = 0,
 113
 114        /*
 115         * Per-chip ("all ports") interrupt coalescing feature.
 116         * This is only for GEN_II / GEN_IIE hardware.
 117         *
 118         * Coalescing defers the interrupt until either the IO_THRESHOLD
 119         * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
 120         */
 121        COAL_REG_BASE           = 0x18000,
 122        IRQ_COAL_CAUSE          = (COAL_REG_BASE + 0x08),
 123        ALL_PORTS_COAL_IRQ      = (1 << 4),     /* all ports irq event */
 124
 125        IRQ_COAL_IO_THRESHOLD   = (COAL_REG_BASE + 0xcc),
 126        IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
 127
 128        /*
 129         * Registers for the (unused here) transaction coalescing feature:
 130         */
 131        TRAN_COAL_CAUSE_LO      = (COAL_REG_BASE + 0x88),
 132        TRAN_COAL_CAUSE_HI      = (COAL_REG_BASE + 0x8c),
 133
 134        SATAHC0_REG_BASE        = 0x20000,
 135        FLASH_CTL               = 0x1046c,
 136        GPIO_PORT_CTL           = 0x104f0,
 137        RESET_CFG               = 0x180d8,
 138
 139        MV_PCI_REG_SZ           = MV_MAJOR_REG_AREA_SZ,
 140        MV_SATAHC_REG_SZ        = MV_MAJOR_REG_AREA_SZ,
 141        MV_SATAHC_ARBTR_REG_SZ  = MV_MINOR_REG_AREA_SZ,         /* arbiter */
 142        MV_PORT_REG_SZ          = MV_MINOR_REG_AREA_SZ,
 143
 144        MV_MAX_Q_DEPTH          = 32,
 145        MV_MAX_Q_DEPTH_MASK     = MV_MAX_Q_DEPTH - 1,
 146
 147        /* CRQB needs alignment on a 1KB boundary. Size == 1KB
 148         * CRPB needs alignment on a 256B boundary. Size == 256B
 149         * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
 150         */
 151        MV_CRQB_Q_SZ            = (32 * MV_MAX_Q_DEPTH),
 152        MV_CRPB_Q_SZ            = (8 * MV_MAX_Q_DEPTH),
 153        MV_MAX_SG_CT            = 256,
 154        MV_SG_TBL_SZ            = (16 * MV_MAX_SG_CT),
 155
 156        /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
 157        MV_PORT_HC_SHIFT        = 2,
 158        MV_PORTS_PER_HC         = (1 << MV_PORT_HC_SHIFT), /* 4 */
 159        /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
 160        MV_PORT_MASK            = (MV_PORTS_PER_HC - 1),   /* 3 */
 161
 162        /* Host Flags */
 163        MV_FLAG_DUAL_HC         = (1 << 30),  /* two SATA Host Controllers */
 164
 165        MV_COMMON_FLAGS         = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
 166
 167        MV_GEN_I_FLAGS          = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
 168
 169        MV_GEN_II_FLAGS         = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
 170                                  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
 171
 172        MV_GEN_IIE_FLAGS        = MV_GEN_II_FLAGS | ATA_FLAG_AN,
 173
 174        CRQB_FLAG_READ          = (1 << 0),
 175        CRQB_TAG_SHIFT          = 1,
 176        CRQB_IOID_SHIFT         = 6,    /* CRQB Gen-II/IIE IO Id shift */
 177        CRQB_PMP_SHIFT          = 12,   /* CRQB Gen-II/IIE PMP shift */
 178        CRQB_HOSTQ_SHIFT        = 17,   /* CRQB Gen-II/IIE HostQueTag shift */
 179        CRQB_CMD_ADDR_SHIFT     = 8,
 180        CRQB_CMD_CS             = (0x2 << 11),
 181        CRQB_CMD_LAST           = (1 << 15),
 182
 183        CRPB_FLAG_STATUS_SHIFT  = 8,
 184        CRPB_IOID_SHIFT_6       = 5,    /* CRPB Gen-II IO Id shift */
 185        CRPB_IOID_SHIFT_7       = 7,    /* CRPB Gen-IIE IO Id shift */
 186
 187        EPRD_FLAG_END_OF_TBL    = (1 << 31),
 188
 189        /* PCI interface registers */
 190
 191        MV_PCI_COMMAND          = 0xc00,
 192        MV_PCI_COMMAND_MWRCOM   = (1 << 4),     /* PCI Master Write Combining */
 193        MV_PCI_COMMAND_MRDTRIG  = (1 << 7),     /* PCI Master Read Trigger */
 194
 195        PCI_MAIN_CMD_STS        = 0xd30,
 196        STOP_PCI_MASTER         = (1 << 2),
 197        PCI_MASTER_EMPTY        = (1 << 3),
 198        GLOB_SFT_RST            = (1 << 4),
 199
 200        MV_PCI_MODE             = 0xd00,
 201        MV_PCI_MODE_MASK        = 0x30,
 202
 203        MV_PCI_EXP_ROM_BAR_CTL  = 0xd2c,
 204        MV_PCI_DISC_TIMER       = 0xd04,
 205        MV_PCI_MSI_TRIGGER      = 0xc38,
 206        MV_PCI_SERR_MASK        = 0xc28,
 207        MV_PCI_XBAR_TMOUT       = 0x1d04,
 208        MV_PCI_ERR_LOW_ADDRESS  = 0x1d40,
 209        MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
 210        MV_PCI_ERR_ATTRIBUTE    = 0x1d48,
 211        MV_PCI_ERR_COMMAND      = 0x1d50,
 212
 213        PCI_IRQ_CAUSE           = 0x1d58,
 214        PCI_IRQ_MASK            = 0x1d5c,
 215        PCI_UNMASK_ALL_IRQS     = 0x7fffff,     /* bits 22-0 */
 216
 217        PCIE_IRQ_CAUSE          = 0x1900,
 218        PCIE_IRQ_MASK           = 0x1910,
 219        PCIE_UNMASK_ALL_IRQS    = 0x40a,        /* assorted bits */
 220
 221        /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
 222        PCI_HC_MAIN_IRQ_CAUSE   = 0x1d60,
 223        PCI_HC_MAIN_IRQ_MASK    = 0x1d64,
 224        SOC_HC_MAIN_IRQ_CAUSE   = 0x20020,
 225        SOC_HC_MAIN_IRQ_MASK    = 0x20024,
 226        ERR_IRQ                 = (1 << 0),     /* shift by (2 * port #) */
 227        DONE_IRQ                = (1 << 1),     /* shift by (2 * port #) */
 228        HC0_IRQ_PEND            = 0x1ff,        /* bits 0-8 = HC0's ports */
 229        HC_SHIFT                = 9,            /* bits 9-17 = HC1's ports */
 230        DONE_IRQ_0_3            = 0x000000aa,   /* DONE_IRQ ports 0,1,2,3 */
 231        DONE_IRQ_4_7            = (DONE_IRQ_0_3 << HC_SHIFT),  /* 4,5,6,7 */
 232        PCI_ERR                 = (1 << 18),
 233        TRAN_COAL_LO_DONE       = (1 << 19),    /* transaction coalescing */
 234        TRAN_COAL_HI_DONE       = (1 << 20),    /* transaction coalescing */
 235        PORTS_0_3_COAL_DONE     = (1 << 8),     /* HC0 IRQ coalescing */
 236        PORTS_4_7_COAL_DONE     = (1 << 17),    /* HC1 IRQ coalescing */
 237        ALL_PORTS_COAL_DONE     = (1 << 21),    /* GEN_II(E) IRQ coalescing */
 238        GPIO_INT                = (1 << 22),
 239        SELF_INT                = (1 << 23),
 240        TWSI_INT                = (1 << 24),
 241        HC_MAIN_RSVD            = (0x7f << 25), /* bits 31-25 */
 242        HC_MAIN_RSVD_5          = (0x1fff << 19), /* bits 31-19 */
 243        HC_MAIN_RSVD_SOC        = (0x3fffffb << 6),     /* bits 31-9, 7-6 */
 244
 245        /* SATAHC registers */
 246        HC_CFG                  = 0x00,
 247
 248        HC_IRQ_CAUSE            = 0x14,
 249        DMA_IRQ                 = (1 << 0),     /* shift by port # */
 250        HC_COAL_IRQ             = (1 << 4),     /* IRQ coalescing */
 251        DEV_IRQ                 = (1 << 8),     /* shift by port # */
 252
 253        /*
 254         * Per-HC (Host-Controller) interrupt coalescing feature.
 255         * This is present on all chip generations.
 256         *
 257         * Coalescing defers the interrupt until either the IO_THRESHOLD
 258         * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
 259         */
 260        HC_IRQ_COAL_IO_THRESHOLD        = 0x000c,
 261        HC_IRQ_COAL_TIME_THRESHOLD      = 0x0010,
 262
 263        SOC_LED_CTRL            = 0x2c,
 264        SOC_LED_CTRL_BLINK      = (1 << 0),     /* Active LED blink */
 265        SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),   /* Multiplex dev presence */
 266                                                /*  with dev activity LED */
 267
 268        /* Shadow block registers */
 269        SHD_BLK                 = 0x100,
 270        SHD_CTL_AST             = 0x20,         /* ofs from SHD_BLK */
 271
 272        /* SATA registers */
 273        SATA_STATUS             = 0x300,  /* ctrl, err regs follow status */
 274        SATA_ACTIVE             = 0x350,
 275        FIS_IRQ_CAUSE           = 0x364,
 276        FIS_IRQ_CAUSE_AN        = (1 << 9),     /* async notification */
 277
 278        LTMODE                  = 0x30c,        /* requires read-after-write */
 279        LTMODE_BIT8             = (1 << 8),     /* unknown, but necessary */
 280
 281        PHY_MODE2               = 0x330,
 282        PHY_MODE3               = 0x310,
 283
 284        PHY_MODE4               = 0x314,        /* requires read-after-write */
 285        PHY_MODE4_CFG_MASK      = 0x00000003,   /* phy internal config field */
 286        PHY_MODE4_CFG_VALUE     = 0x00000001,   /* phy internal config field */
 287        PHY_MODE4_RSVD_ZEROS    = 0x5de3fffa,   /* Gen2e always write zeros */
 288        PHY_MODE4_RSVD_ONES     = 0x00000005,   /* Gen2e always write ones */
 289
 290        SATA_IFCTL              = 0x344,
 291        SATA_TESTCTL            = 0x348,
 292        SATA_IFSTAT             = 0x34c,
 293        VENDOR_UNIQUE_FIS       = 0x35c,
 294
 295        FISCFG                  = 0x360,
 296        FISCFG_WAIT_DEV_ERR     = (1 << 8),     /* wait for host on DevErr */
 297        FISCFG_SINGLE_SYNC      = (1 << 16),    /* SYNC on DMA activation */
 298
 299        PHY_MODE9_GEN2          = 0x398,
 300        PHY_MODE9_GEN1          = 0x39c,
 301        PHYCFG_OFS              = 0x3a0,        /* only in 65n devices */
 302
 303        MV5_PHY_MODE            = 0x74,
 304        MV5_LTMODE              = 0x30,
 305        MV5_PHY_CTL             = 0x0C,
 306        SATA_IFCFG              = 0x050,
 307
 308        MV_M2_PREAMP_MASK       = 0x7e0,
 309
 310        /* Port registers */
 311        EDMA_CFG                = 0,
 312        EDMA_CFG_Q_DEPTH        = 0x1f,         /* max device queue depth */
 313        EDMA_CFG_NCQ            = (1 << 5),     /* for R/W FPDMA queued */
 314        EDMA_CFG_NCQ_GO_ON_ERR  = (1 << 14),    /* continue on error */
 315        EDMA_CFG_RD_BRST_EXT    = (1 << 11),    /* read burst 512B */
 316        EDMA_CFG_WR_BUFF_LEN    = (1 << 13),    /* write buffer 512B */
 317        EDMA_CFG_EDMA_FBS       = (1 << 16),    /* EDMA FIS-Based Switching */
 318        EDMA_CFG_FBS            = (1 << 26),    /* FIS-Based Switching */
 319
 320        EDMA_ERR_IRQ_CAUSE      = 0x8,
 321        EDMA_ERR_IRQ_MASK       = 0xc,
 322        EDMA_ERR_D_PAR          = (1 << 0),     /* UDMA data parity err */
 323        EDMA_ERR_PRD_PAR        = (1 << 1),     /* UDMA PRD parity err */
 324        EDMA_ERR_DEV            = (1 << 2),     /* device error */
 325        EDMA_ERR_DEV_DCON       = (1 << 3),     /* device disconnect */
 326        EDMA_ERR_DEV_CON        = (1 << 4),     /* device connected */
 327        EDMA_ERR_SERR           = (1 << 5),     /* SError bits [WBDST] raised */
 328        EDMA_ERR_SELF_DIS       = (1 << 7),     /* Gen II/IIE self-disable */
 329        EDMA_ERR_SELF_DIS_5     = (1 << 8),     /* Gen I self-disable */
 330        EDMA_ERR_BIST_ASYNC     = (1 << 8),     /* BIST FIS or Async Notify */
 331        EDMA_ERR_TRANS_IRQ_7    = (1 << 8),     /* Gen IIE transprt layer irq */
 332        EDMA_ERR_CRQB_PAR       = (1 << 9),     /* CRQB parity error */
 333        EDMA_ERR_CRPB_PAR       = (1 << 10),    /* CRPB parity error */
 334        EDMA_ERR_INTRL_PAR      = (1 << 11),    /* internal parity error */
 335        EDMA_ERR_IORDY          = (1 << 12),    /* IORdy timeout */
 336
 337        EDMA_ERR_LNK_CTRL_RX    = (0xf << 13),  /* link ctrl rx error */
 338        EDMA_ERR_LNK_CTRL_RX_0  = (1 << 13),    /* transient: CRC err */
 339        EDMA_ERR_LNK_CTRL_RX_1  = (1 << 14),    /* transient: FIFO err */
 340        EDMA_ERR_LNK_CTRL_RX_2  = (1 << 15),    /* fatal: caught SYNC */
 341        EDMA_ERR_LNK_CTRL_RX_3  = (1 << 16),    /* transient: FIS rx err */
 342
 343        EDMA_ERR_LNK_DATA_RX    = (0xf << 17),  /* link data rx error */
 344
 345        EDMA_ERR_LNK_CTRL_TX    = (0x1f << 21), /* link ctrl tx error */
 346        EDMA_ERR_LNK_CTRL_TX_0  = (1 << 21),    /* transient: CRC err */
 347        EDMA_ERR_LNK_CTRL_TX_1  = (1 << 22),    /* transient: FIFO err */
 348        EDMA_ERR_LNK_CTRL_TX_2  = (1 << 23),    /* transient: caught SYNC */
 349        EDMA_ERR_LNK_CTRL_TX_3  = (1 << 24),    /* transient: caught DMAT */
 350        EDMA_ERR_LNK_CTRL_TX_4  = (1 << 25),    /* transient: FIS collision */
 351
 352        EDMA_ERR_LNK_DATA_TX    = (0x1f << 26), /* link data tx error */
 353
 354        EDMA_ERR_TRANS_PROTO    = (1 << 31),    /* transport protocol error */
 355        EDMA_ERR_OVERRUN_5      = (1 << 5),
 356        EDMA_ERR_UNDERRUN_5     = (1 << 6),
 357
 358        EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
 359                                  EDMA_ERR_LNK_CTRL_RX_1 |
 360                                  EDMA_ERR_LNK_CTRL_RX_3 |
 361                                  EDMA_ERR_LNK_CTRL_TX,
 362
 363        EDMA_EH_FREEZE          = EDMA_ERR_D_PAR |
 364                                  EDMA_ERR_PRD_PAR |
 365                                  EDMA_ERR_DEV_DCON |
 366                                  EDMA_ERR_DEV_CON |
 367                                  EDMA_ERR_SERR |
 368                                  EDMA_ERR_SELF_DIS |
 369                                  EDMA_ERR_CRQB_PAR |
 370                                  EDMA_ERR_CRPB_PAR |
 371                                  EDMA_ERR_INTRL_PAR |
 372                                  EDMA_ERR_IORDY |
 373                                  EDMA_ERR_LNK_CTRL_RX_2 |
 374                                  EDMA_ERR_LNK_DATA_RX |
 375                                  EDMA_ERR_LNK_DATA_TX |
 376                                  EDMA_ERR_TRANS_PROTO,
 377
 378        EDMA_EH_FREEZE_5        = EDMA_ERR_D_PAR |
 379                                  EDMA_ERR_PRD_PAR |
 380                                  EDMA_ERR_DEV_DCON |
 381                                  EDMA_ERR_DEV_CON |
 382                                  EDMA_ERR_OVERRUN_5 |
 383                                  EDMA_ERR_UNDERRUN_5 |
 384                                  EDMA_ERR_SELF_DIS_5 |
 385                                  EDMA_ERR_CRQB_PAR |
 386                                  EDMA_ERR_CRPB_PAR |
 387                                  EDMA_ERR_INTRL_PAR |
 388                                  EDMA_ERR_IORDY,
 389
 390        EDMA_REQ_Q_BASE_HI      = 0x10,
 391        EDMA_REQ_Q_IN_PTR       = 0x14,         /* also contains BASE_LO */
 392
 393        EDMA_REQ_Q_OUT_PTR      = 0x18,
 394        EDMA_REQ_Q_PTR_SHIFT    = 5,
 395
 396        EDMA_RSP_Q_BASE_HI      = 0x1c,
 397        EDMA_RSP_Q_IN_PTR       = 0x20,
 398        EDMA_RSP_Q_OUT_PTR      = 0x24,         /* also contains BASE_LO */
 399        EDMA_RSP_Q_PTR_SHIFT    = 3,
 400
 401        EDMA_CMD                = 0x28,         /* EDMA command register */
 402        EDMA_EN                 = (1 << 0),     /* enable EDMA */
 403        EDMA_DS                 = (1 << 1),     /* disable EDMA; self-negated */
 404        EDMA_RESET              = (1 << 2),     /* reset eng/trans/link/phy */
 405
 406        EDMA_STATUS             = 0x30,         /* EDMA engine status */
 407        EDMA_STATUS_CACHE_EMPTY = (1 << 6),     /* GenIIe command cache empty */
 408        EDMA_STATUS_IDLE        = (1 << 7),     /* GenIIe EDMA enabled/idle */
 409
 410        EDMA_IORDY_TMOUT        = 0x34,
 411        EDMA_ARB_CFG            = 0x38,
 412
 413        EDMA_HALTCOND           = 0x60,         /* GenIIe halt conditions */
 414        EDMA_UNKNOWN_RSVD       = 0x6C,         /* GenIIe unknown/reserved */
 415
 416        BMDMA_CMD               = 0x224,        /* bmdma command register */
 417        BMDMA_STATUS            = 0x228,        /* bmdma status register */
 418        BMDMA_PRD_LOW           = 0x22c,        /* bmdma PRD addr 31:0 */
 419        BMDMA_PRD_HIGH          = 0x230,        /* bmdma PRD addr 63:32 */
 420
 421        /* Host private flags (hp_flags) */
 422        MV_HP_FLAG_MSI          = (1 << 0),
 423        MV_HP_ERRATA_50XXB0     = (1 << 1),
 424        MV_HP_ERRATA_50XXB2     = (1 << 2),
 425        MV_HP_ERRATA_60X1B2     = (1 << 3),
 426        MV_HP_ERRATA_60X1C0     = (1 << 4),
 427        MV_HP_GEN_I             = (1 << 6),     /* Generation I: 50xx */
 428        MV_HP_GEN_II            = (1 << 7),     /* Generation II: 60xx */
 429        MV_HP_GEN_IIE           = (1 << 8),     /* Generation IIE: 6042/7042 */
 430        MV_HP_PCIE              = (1 << 9),     /* PCIe bus/regs: 7042 */
 431        MV_HP_CUT_THROUGH       = (1 << 10),    /* can use EDMA cut-through */
 432        MV_HP_FLAG_SOC          = (1 << 11),    /* SystemOnChip, no PCI */
 433        MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),   /* is led blinking enabled? */
 434
 435        /* Port private flags (pp_flags) */
 436        MV_PP_FLAG_EDMA_EN      = (1 << 0),     /* is EDMA engine enabled? */
 437        MV_PP_FLAG_NCQ_EN       = (1 << 1),     /* is EDMA set up for NCQ? */
 438        MV_PP_FLAG_FBS_EN       = (1 << 2),     /* is EDMA set up for FBS? */
 439        MV_PP_FLAG_DELAYED_EH   = (1 << 3),     /* delayed dev err handling */
 440        MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),    /* ignore initial ATA_DRDY */
 441};
 442
 443#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
 444#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
 445#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
 446#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
 447#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
 448
 449#define WINDOW_CTRL(i)          (0x20030 + ((i) << 4))
 450#define WINDOW_BASE(i)          (0x20034 + ((i) << 4))
 451
 452enum {
 453        /* DMA boundary 0xffff is required by the s/g splitting
 454         * we need on /length/ in mv_fill-sg().
 455         */
 456        MV_DMA_BOUNDARY         = 0xffffU,
 457
 458        /* mask of register bits containing lower 32 bits
 459         * of EDMA request queue DMA address
 460         */
 461        EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
 462
 463        /* ditto, for response queue */
 464        EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
 465};
 466
 467enum chip_type {
 468        chip_504x,
 469        chip_508x,
 470        chip_5080,
 471        chip_604x,
 472        chip_608x,
 473        chip_6042,
 474        chip_7042,
 475        chip_soc,
 476};
 477
 478/* Command ReQuest Block: 32B */
 479struct mv_crqb {
 480        __le32                  sg_addr;
 481        __le32                  sg_addr_hi;
 482        __le16                  ctrl_flags;
 483        __le16                  ata_cmd[11];
 484};
 485
 486struct mv_crqb_iie {
 487        __le32                  addr;
 488        __le32                  addr_hi;
 489        __le32                  flags;
 490        __le32                  len;
 491        __le32                  ata_cmd[4];
 492};
 493
 494/* Command ResPonse Block: 8B */
 495struct mv_crpb {
 496        __le16                  id;
 497        __le16                  flags;
 498        __le32                  tmstmp;
 499};
 500
 501/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
 502struct mv_sg {
 503        __le32                  addr;
 504        __le32                  flags_size;
 505        __le32                  addr_hi;
 506        __le32                  reserved;
 507};
 508
 509/*
 510 * We keep a local cache of a few frequently accessed port
 511 * registers here, to avoid having to read them (very slow)
 512 * when switching between EDMA and non-EDMA modes.
 513 */
 514struct mv_cached_regs {
 515        u32                     fiscfg;
 516        u32                     ltmode;
 517        u32                     haltcond;
 518        u32                     unknown_rsvd;
 519};
 520
 521struct mv_port_priv {
 522        struct mv_crqb          *crqb;
 523        dma_addr_t              crqb_dma;
 524        struct mv_crpb          *crpb;
 525        dma_addr_t              crpb_dma;
 526        struct mv_sg            *sg_tbl[MV_MAX_Q_DEPTH];
 527        dma_addr_t              sg_tbl_dma[MV_MAX_Q_DEPTH];
 528
 529        unsigned int            req_idx;
 530        unsigned int            resp_idx;
 531
 532        u32                     pp_flags;
 533        struct mv_cached_regs   cached;
 534        unsigned int            delayed_eh_pmp_map;
 535};
 536
 537struct mv_port_signal {
 538        u32                     amps;
 539        u32                     pre;
 540};
 541
 542struct mv_host_priv {
 543        u32                     hp_flags;
 544        unsigned int            board_idx;
 545        u32                     main_irq_mask;
 546        struct mv_port_signal   signal[8];
 547        const struct mv_hw_ops  *ops;
 548        int                     n_ports;
 549        void __iomem            *base;
 550        void __iomem            *main_irq_cause_addr;
 551        void __iomem            *main_irq_mask_addr;
 552        u32                     irq_cause_offset;
 553        u32                     irq_mask_offset;
 554        u32                     unmask_all_irqs;
 555
 556        /*
 557         * Needed on some devices that require their clocks to be enabled.
 558         * These are optional: if the platform device does not have any
 559         * clocks, they won't be used.  Also, if the underlying hardware
 560         * does not support the common clock framework (CONFIG_HAVE_CLK=n),
 561         * all the clock operations become no-ops (see clk.h).
 562         */
 563        struct clk              *clk;
 564        struct clk              **port_clks;
 565        /*
 566         * These consistent DMA memory pools give us guaranteed
 567         * alignment for hardware-accessed data structures,
 568         * and less memory waste in accomplishing the alignment.
 569         */
 570        struct dma_pool         *crqb_pool;
 571        struct dma_pool         *crpb_pool;
 572        struct dma_pool         *sg_tbl_pool;
 573};
 574
 575struct mv_hw_ops {
 576        void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
 577                           unsigned int port);
 578        void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
 579        void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
 580                           void __iomem *mmio);
 581        int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
 582                        unsigned int n_hc);
 583        void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
 584        void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
 585};
 586
 587static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
 588static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
 589static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
 590static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
 591static int mv_port_start(struct ata_port *ap);
 592static void mv_port_stop(struct ata_port *ap);
 593static int mv_qc_defer(struct ata_queued_cmd *qc);
 594static void mv_qc_prep(struct ata_queued_cmd *qc);
 595static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
 596static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
 597static int mv_hardreset(struct ata_link *link, unsigned int *class,
 598                        unsigned long deadline);
 599static void mv_eh_freeze(struct ata_port *ap);
 600static void mv_eh_thaw(struct ata_port *ap);
 601static void mv6_dev_config(struct ata_device *dev);
 602
 603static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
 604                           unsigned int port);
 605static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
 606static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
 607                           void __iomem *mmio);
 608static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
 609                        unsigned int n_hc);
 610static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
 611static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
 612
 613static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
 614                           unsigned int port);
 615static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
 616static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
 617                           void __iomem *mmio);
 618static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
 619                        unsigned int n_hc);
 620static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
 621static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
 622                                      void __iomem *mmio);
 623static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
 624                                      void __iomem *mmio);
 625static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
 626                                  void __iomem *mmio, unsigned int n_hc);
 627static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
 628                                      void __iomem *mmio);
 629static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
 630static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
 631                                  void __iomem *mmio, unsigned int port);
 632static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
 633static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
 634                             unsigned int port_no);
 635static int mv_stop_edma(struct ata_port *ap);
 636static int mv_stop_edma_engine(void __iomem *port_mmio);
 637static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
 638
 639static void mv_pmp_select(struct ata_port *ap, int pmp);
 640static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
 641                                unsigned long deadline);
 642static int  mv_softreset(struct ata_link *link, unsigned int *class,
 643                                unsigned long deadline);
 644static void mv_pmp_error_handler(struct ata_port *ap);
 645static void mv_process_crpb_entries(struct ata_port *ap,
 646                                        struct mv_port_priv *pp);
 647
 648static void mv_sff_irq_clear(struct ata_port *ap);
 649static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
 650static void mv_bmdma_setup(struct ata_queued_cmd *qc);
 651static void mv_bmdma_start(struct ata_queued_cmd *qc);
 652static void mv_bmdma_stop(struct ata_queued_cmd *qc);
 653static u8   mv_bmdma_status(struct ata_port *ap);
 654static u8 mv_sff_check_status(struct ata_port *ap);
 655
 656/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
 657 * because we have to allow room for worst case splitting of
 658 * PRDs for 64K boundaries in mv_fill_sg().
 659 */
 660#ifdef CONFIG_PCI
 661static struct scsi_host_template mv5_sht = {
 662        ATA_BASE_SHT(DRV_NAME),
 663        .sg_tablesize           = MV_MAX_SG_CT / 2,
 664        .dma_boundary           = MV_DMA_BOUNDARY,
 665};
 666#endif
 667static struct scsi_host_template mv6_sht = {
 668        ATA_NCQ_SHT(DRV_NAME),
 669        .can_queue              = MV_MAX_Q_DEPTH - 1,
 670        .sg_tablesize           = MV_MAX_SG_CT / 2,
 671        .dma_boundary           = MV_DMA_BOUNDARY,
 672};
 673
 674static struct ata_port_operations mv5_ops = {
 675        .inherits               = &ata_sff_port_ops,
 676
 677        .lost_interrupt         = ATA_OP_NULL,
 678
 679        .qc_defer               = mv_qc_defer,
 680        .qc_prep                = mv_qc_prep,
 681        .qc_issue               = mv_qc_issue,
 682
 683        .freeze                 = mv_eh_freeze,
 684        .thaw                   = mv_eh_thaw,
 685        .hardreset              = mv_hardreset,
 686
 687        .scr_read               = mv5_scr_read,
 688        .scr_write              = mv5_scr_write,
 689
 690        .port_start             = mv_port_start,
 691        .port_stop              = mv_port_stop,
 692};
 693
 694static struct ata_port_operations mv6_ops = {
 695        .inherits               = &ata_bmdma_port_ops,
 696
 697        .lost_interrupt         = ATA_OP_NULL,
 698
 699        .qc_defer               = mv_qc_defer,
 700        .qc_prep                = mv_qc_prep,
 701        .qc_issue               = mv_qc_issue,
 702
 703        .dev_config             = mv6_dev_config,
 704
 705        .freeze                 = mv_eh_freeze,
 706        .thaw                   = mv_eh_thaw,
 707        .hardreset              = mv_hardreset,
 708        .softreset              = mv_softreset,
 709        .pmp_hardreset          = mv_pmp_hardreset,
 710        .pmp_softreset          = mv_softreset,
 711        .error_handler          = mv_pmp_error_handler,
 712
 713        .scr_read               = mv_scr_read,
 714        .scr_write              = mv_scr_write,
 715
 716        .sff_check_status       = mv_sff_check_status,
 717        .sff_irq_clear          = mv_sff_irq_clear,
 718        .check_atapi_dma        = mv_check_atapi_dma,
 719        .bmdma_setup            = mv_bmdma_setup,
 720        .bmdma_start            = mv_bmdma_start,
 721        .bmdma_stop             = mv_bmdma_stop,
 722        .bmdma_status           = mv_bmdma_status,
 723
 724        .port_start             = mv_port_start,
 725        .port_stop              = mv_port_stop,
 726};
 727
 728static struct ata_port_operations mv_iie_ops = {
 729        .inherits               = &mv6_ops,
 730        .dev_config             = ATA_OP_NULL,
 731        .qc_prep                = mv_qc_prep_iie,
 732};
 733
 734static const struct ata_port_info mv_port_info[] = {
 735        {  /* chip_504x */
 736                .flags          = MV_GEN_I_FLAGS,
 737                .pio_mask       = ATA_PIO4,
 738                .udma_mask      = ATA_UDMA6,
 739                .port_ops       = &mv5_ops,
 740        },
 741        {  /* chip_508x */
 742                .flags          = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
 743                .pio_mask       = ATA_PIO4,
 744                .udma_mask      = ATA_UDMA6,
 745                .port_ops       = &mv5_ops,
 746        },
 747        {  /* chip_5080 */
 748                .flags          = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
 749                .pio_mask       = ATA_PIO4,
 750                .udma_mask      = ATA_UDMA6,
 751                .port_ops       = &mv5_ops,
 752        },
 753        {  /* chip_604x */
 754                .flags          = MV_GEN_II_FLAGS,
 755                .pio_mask       = ATA_PIO4,
 756                .udma_mask      = ATA_UDMA6,
 757                .port_ops       = &mv6_ops,
 758        },
 759        {  /* chip_608x */
 760                .flags          = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
 761                .pio_mask       = ATA_PIO4,
 762                .udma_mask      = ATA_UDMA6,
 763                .port_ops       = &mv6_ops,
 764        },
 765        {  /* chip_6042 */
 766                .flags          = MV_GEN_IIE_FLAGS,
 767                .pio_mask       = ATA_PIO4,
 768                .udma_mask      = ATA_UDMA6,
 769                .port_ops       = &mv_iie_ops,
 770        },
 771        {  /* chip_7042 */
 772                .flags          = MV_GEN_IIE_FLAGS,
 773                .pio_mask       = ATA_PIO4,
 774                .udma_mask      = ATA_UDMA6,
 775                .port_ops       = &mv_iie_ops,
 776        },
 777        {  /* chip_soc */
 778                .flags          = MV_GEN_IIE_FLAGS,
 779                .pio_mask       = ATA_PIO4,
 780                .udma_mask      = ATA_UDMA6,
 781                .port_ops       = &mv_iie_ops,
 782        },
 783};
 784
 785static const struct pci_device_id mv_pci_tbl[] = {
 786        { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
 787        { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
 788        { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
 789        { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
 790        /* RocketRAID 1720/174x have different identifiers */
 791        { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
 792        { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
 793        { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
 794
 795        { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
 796        { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
 797        { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
 798        { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
 799        { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
 800
 801        { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
 802
 803        /* Adaptec 1430SA */
 804        { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
 805
 806        /* Marvell 7042 support */
 807        { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
 808
 809        /* Highpoint RocketRAID PCIe series */
 810        { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
 811        { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
 812
 813        { }                     /* terminate list */
 814};
 815
 816static const struct mv_hw_ops mv5xxx_ops = {
 817        .phy_errata             = mv5_phy_errata,
 818        .enable_leds            = mv5_enable_leds,
 819        .read_preamp            = mv5_read_preamp,
 820        .reset_hc               = mv5_reset_hc,
 821        .reset_flash            = mv5_reset_flash,
 822        .reset_bus              = mv5_reset_bus,
 823};
 824
 825static const struct mv_hw_ops mv6xxx_ops = {
 826        .phy_errata             = mv6_phy_errata,
 827        .enable_leds            = mv6_enable_leds,
 828        .read_preamp            = mv6_read_preamp,
 829        .reset_hc               = mv6_reset_hc,
 830        .reset_flash            = mv6_reset_flash,
 831        .reset_bus              = mv_reset_pci_bus,
 832};
 833
 834static const struct mv_hw_ops mv_soc_ops = {
 835        .phy_errata             = mv6_phy_errata,
 836        .enable_leds            = mv_soc_enable_leds,
 837        .read_preamp            = mv_soc_read_preamp,
 838        .reset_hc               = mv_soc_reset_hc,
 839        .reset_flash            = mv_soc_reset_flash,
 840        .reset_bus              = mv_soc_reset_bus,
 841};
 842
 843static const struct mv_hw_ops mv_soc_65n_ops = {
 844        .phy_errata             = mv_soc_65n_phy_errata,
 845        .enable_leds            = mv_soc_enable_leds,
 846        .reset_hc               = mv_soc_reset_hc,
 847        .reset_flash            = mv_soc_reset_flash,
 848        .reset_bus              = mv_soc_reset_bus,
 849};
 850
 851/*
 852 * Functions
 853 */
 854
 855static inline void writelfl(unsigned long data, void __iomem *addr)
 856{
 857        writel(data, addr);
 858        (void) readl(addr);     /* flush to avoid PCI posted write */
 859}
 860
 861static inline unsigned int mv_hc_from_port(unsigned int port)
 862{
 863        return port >> MV_PORT_HC_SHIFT;
 864}
 865
 866static inline unsigned int mv_hardport_from_port(unsigned int port)
 867{
 868        return port & MV_PORT_MASK;
 869}
 870
 871/*
 872 * Consolidate some rather tricky bit shift calculations.
 873 * This is hot-path stuff, so not a function.
 874 * Simple code, with two return values, so macro rather than inline.
 875 *
 876 * port is the sole input, in range 0..7.
 877 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
 878 * hardport is the other output, in range 0..3.
 879 *
 880 * Note that port and hardport may be the same variable in some cases.
 881 */
 882#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)    \
 883{                                                               \
 884        shift    = mv_hc_from_port(port) * HC_SHIFT;            \
 885        hardport = mv_hardport_from_port(port);                 \
 886        shift   += hardport * 2;                                \
 887}
 888
 889static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
 890{
 891        return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
 892}
 893
 894static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
 895                                                 unsigned int port)
 896{
 897        return mv_hc_base(base, mv_hc_from_port(port));
 898}
 899
 900static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
 901{
 902        return  mv_hc_base_from_port(base, port) +
 903                MV_SATAHC_ARBTR_REG_SZ +
 904                (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
 905}
 906
 907static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
 908{
 909        void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
 910        unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
 911
 912        return hc_mmio + ofs;
 913}
 914
 915static inline void __iomem *mv_host_base(struct ata_host *host)
 916{
 917        struct mv_host_priv *hpriv = host->private_data;
 918        return hpriv->base;
 919}
 920
 921static inline void __iomem *mv_ap_base(struct ata_port *ap)
 922{
 923        return mv_port_base(mv_host_base(ap->host), ap->port_no);
 924}
 925
 926static inline int mv_get_hc_count(unsigned long port_flags)
 927{
 928        return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
 929}
 930
 931/**
 932 *      mv_save_cached_regs - (re-)initialize cached port registers
 933 *      @ap: the port whose registers we are caching
 934 *
 935 *      Initialize the local cache of port registers,
 936 *      so that reading them over and over again can
 937 *      be avoided on the hotter paths of this driver.
 938 *      This saves a few microseconds each time we switch
 939 *      to/from EDMA mode to perform (eg.) a drive cache flush.
 940 */
 941static void mv_save_cached_regs(struct ata_port *ap)
 942{
 943        void __iomem *port_mmio = mv_ap_base(ap);
 944        struct mv_port_priv *pp = ap->private_data;
 945
 946        pp->cached.fiscfg = readl(port_mmio + FISCFG);
 947        pp->cached.ltmode = readl(port_mmio + LTMODE);
 948        pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
 949        pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
 950}
 951
 952/**
 953 *      mv_write_cached_reg - write to a cached port register
 954 *      @addr: hardware address of the register
 955 *      @old: pointer to cached value of the register
 956 *      @new: new value for the register
 957 *
 958 *      Write a new value to a cached register,
 959 *      but only if the value is different from before.
 960 */
 961static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
 962{
 963        if (new != *old) {
 964                unsigned long laddr;
 965                *old = new;
 966                /*
 967                 * Workaround for 88SX60x1-B2 FEr SATA#13:
 968                 * Read-after-write is needed to prevent generating 64-bit
 969                 * write cycles on the PCI bus for SATA interface registers
 970                 * at offsets ending in 0x4 or 0xc.
 971                 *
 972                 * Looks like a lot of fuss, but it avoids an unnecessary
 973                 * +1 usec read-after-write delay for unaffected registers.
 974                 */
 975                laddr = (long)addr & 0xffff;
 976                if (laddr >= 0x300 && laddr <= 0x33c) {
 977                        laddr &= 0x000f;
 978                        if (laddr == 0x4 || laddr == 0xc) {
 979                                writelfl(new, addr); /* read after write */
 980                                return;
 981                        }
 982                }
 983                writel(new, addr); /* unaffected by the errata */
 984        }
 985}
 986
 987static void mv_set_edma_ptrs(void __iomem *port_mmio,
 988                             struct mv_host_priv *hpriv,
 989                             struct mv_port_priv *pp)
 990{
 991        u32 index;
 992
 993        /*
 994         * initialize request queue
 995         */
 996        pp->req_idx &= MV_MAX_Q_DEPTH_MASK;     /* paranoia */
 997        index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
 998
 999        WARN_ON(pp->crqb_dma & 0x3ff);
1000        writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
1001        writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
1002                 port_mmio + EDMA_REQ_Q_IN_PTR);
1003        writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
1004
1005        /*
1006         * initialize response queue
1007         */
1008        pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;    /* paranoia */
1009        index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1010
1011        WARN_ON(pp->crpb_dma & 0xff);
1012        writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1013        writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1014        writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1015                 port_mmio + EDMA_RSP_Q_OUT_PTR);
1016}
1017
1018static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1019{
1020        /*
1021         * When writing to the main_irq_mask in hardware,
1022         * we must ensure exclusivity between the interrupt coalescing bits
1023         * and the corresponding individual port DONE_IRQ bits.
1024         *
1025         * Note that this register is really an "IRQ enable" register,
1026         * not an "IRQ mask" register as Marvell's naming might suggest.
1027         */
1028        if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1029                mask &= ~DONE_IRQ_0_3;
1030        if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1031                mask &= ~DONE_IRQ_4_7;
1032        writelfl(mask, hpriv->main_irq_mask_addr);
1033}
1034
1035static void mv_set_main_irq_mask(struct ata_host *host,
1036                                 u32 disable_bits, u32 enable_bits)
1037{
1038        struct mv_host_priv *hpriv = host->private_data;
1039        u32 old_mask, new_mask;
1040
1041        old_mask = hpriv->main_irq_mask;
1042        new_mask = (old_mask & ~disable_bits) | enable_bits;
1043        if (new_mask != old_mask) {
1044                hpriv->main_irq_mask = new_mask;
1045                mv_write_main_irq_mask(new_mask, hpriv);
1046        }
1047}
1048
1049static void mv_enable_port_irqs(struct ata_port *ap,
1050                                     unsigned int port_bits)
1051{
1052        unsigned int shift, hardport, port = ap->port_no;
1053        u32 disable_bits, enable_bits;
1054
1055        MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1056
1057        disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1058        enable_bits  = port_bits << shift;
1059        mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1060}
1061
1062static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1063                                          void __iomem *port_mmio,
1064                                          unsigned int port_irqs)
1065{
1066        struct mv_host_priv *hpriv = ap->host->private_data;
1067        int hardport = mv_hardport_from_port(ap->port_no);
1068        void __iomem *hc_mmio = mv_hc_base_from_port(
1069                                mv_host_base(ap->host), ap->port_no);
1070        u32 hc_irq_cause;
1071
1072        /* clear EDMA event indicators, if any */
1073        writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1074
1075        /* clear pending irq events */
1076        hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1077        writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1078
1079        /* clear FIS IRQ Cause */
1080        if (IS_GEN_IIE(hpriv))
1081                writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1082
1083        mv_enable_port_irqs(ap, port_irqs);
1084}
1085
1086static void mv_set_irq_coalescing(struct ata_host *host,
1087                                  unsigned int count, unsigned int usecs)
1088{
1089        struct mv_host_priv *hpriv = host->private_data;
1090        void __iomem *mmio = hpriv->base, *hc_mmio;
1091        u32 coal_enable = 0;
1092        unsigned long flags;
1093        unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1094        const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1095                                                        ALL_PORTS_COAL_DONE;
1096
1097        /* Disable IRQ coalescing if either threshold is zero */
1098        if (!usecs || !count) {
1099                clks = count = 0;
1100        } else {
1101                /* Respect maximum limits of the hardware */
1102                clks = usecs * COAL_CLOCKS_PER_USEC;
1103                if (clks > MAX_COAL_TIME_THRESHOLD)
1104                        clks = MAX_COAL_TIME_THRESHOLD;
1105                if (count > MAX_COAL_IO_COUNT)
1106                        count = MAX_COAL_IO_COUNT;
1107        }
1108
1109        spin_lock_irqsave(&host->lock, flags);
1110        mv_set_main_irq_mask(host, coal_disable, 0);
1111
1112        if (is_dual_hc && !IS_GEN_I(hpriv)) {
1113                /*
1114                 * GEN_II/GEN_IIE with dual host controllers:
1115                 * one set of global thresholds for the entire chip.
1116                 */
1117                writel(clks,  mmio + IRQ_COAL_TIME_THRESHOLD);
1118                writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1119                /* clear leftover coal IRQ bit */
1120                writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1121                if (count)
1122                        coal_enable = ALL_PORTS_COAL_DONE;
1123                clks = count = 0; /* force clearing of regular regs below */
1124        }
1125
1126        /*
1127         * All chips: independent thresholds for each HC on the chip.
1128         */
1129        hc_mmio = mv_hc_base_from_port(mmio, 0);
1130        writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1131        writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1132        writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1133        if (count)
1134                coal_enable |= PORTS_0_3_COAL_DONE;
1135        if (is_dual_hc) {
1136                hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1137                writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1138                writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1139                writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1140                if (count)
1141                        coal_enable |= PORTS_4_7_COAL_DONE;
1142        }
1143
1144        mv_set_main_irq_mask(host, 0, coal_enable);
1145        spin_unlock_irqrestore(&host->lock, flags);
1146}
1147
1148/**
1149 *      mv_start_edma - Enable eDMA engine
1150 *      @base: port base address
1151 *      @pp: port private data
1152 *
1153 *      Verify the local cache of the eDMA state is accurate with a
1154 *      WARN_ON.
1155 *
1156 *      LOCKING:
1157 *      Inherited from caller.
1158 */
1159static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1160                         struct mv_port_priv *pp, u8 protocol)
1161{
1162        int want_ncq = (protocol == ATA_PROT_NCQ);
1163
1164        if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1165                int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1166                if (want_ncq != using_ncq)
1167                        mv_stop_edma(ap);
1168        }
1169        if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1170                struct mv_host_priv *hpriv = ap->host->private_data;
1171
1172                mv_edma_cfg(ap, want_ncq, 1);
1173
1174                mv_set_edma_ptrs(port_mmio, hpriv, pp);
1175                mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1176
1177                writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1178                pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1179        }
1180}
1181
1182static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1183{
1184        void __iomem *port_mmio = mv_ap_base(ap);
1185        const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1186        const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1187        int i;
1188
1189        /*
1190         * Wait for the EDMA engine to finish transactions in progress.
1191         * No idea what a good "timeout" value might be, but measurements
1192         * indicate that it often requires hundreds of microseconds
1193         * with two drives in-use.  So we use the 15msec value above
1194         * as a rough guess at what even more drives might require.
1195         */
1196        for (i = 0; i < timeout; ++i) {
1197                u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1198                if ((edma_stat & empty_idle) == empty_idle)
1199                        break;
1200                udelay(per_loop);
1201        }
1202        /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
1203}
1204
1205/**
1206 *      mv_stop_edma_engine - Disable eDMA engine
1207 *      @port_mmio: io base address
1208 *
1209 *      LOCKING:
1210 *      Inherited from caller.
1211 */
1212static int mv_stop_edma_engine(void __iomem *port_mmio)
1213{
1214        int i;
1215
1216        /* Disable eDMA.  The disable bit auto clears. */
1217        writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1218
1219        /* Wait for the chip to confirm eDMA is off. */
1220        for (i = 10000; i > 0; i--) {
1221                u32 reg = readl(port_mmio + EDMA_CMD);
1222                if (!(reg & EDMA_EN))
1223                        return 0;
1224                udelay(10);
1225        }
1226        return -EIO;
1227}
1228
1229static int mv_stop_edma(struct ata_port *ap)
1230{
1231        void __iomem *port_mmio = mv_ap_base(ap);
1232        struct mv_port_priv *pp = ap->private_data;
1233        int err = 0;
1234
1235        if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1236                return 0;
1237        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1238        mv_wait_for_edma_empty_idle(ap);
1239        if (mv_stop_edma_engine(port_mmio)) {
1240                ata_port_err(ap, "Unable to stop eDMA\n");
1241                err = -EIO;
1242        }
1243        mv_edma_cfg(ap, 0, 0);
1244        return err;
1245}
1246
1247#ifdef ATA_DEBUG
1248static void mv_dump_mem(void __iomem *start, unsigned bytes)
1249{
1250        int b, w;
1251        for (b = 0; b < bytes; ) {
1252                DPRINTK("%p: ", start + b);
1253                for (w = 0; b < bytes && w < 4; w++) {
1254                        printk("%08x ", readl(start + b));
1255                        b += sizeof(u32);
1256                }
1257                printk("\n");
1258        }
1259}
1260#endif
1261#if defined(ATA_DEBUG) || defined(CONFIG_PCI)
1262static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1263{
1264#ifdef ATA_DEBUG
1265        int b, w;
1266        u32 dw;
1267        for (b = 0; b < bytes; ) {
1268                DPRINTK("%02x: ", b);
1269                for (w = 0; b < bytes && w < 4; w++) {
1270                        (void) pci_read_config_dword(pdev, b, &dw);
1271                        printk("%08x ", dw);
1272                        b += sizeof(u32);
1273                }
1274                printk("\n");
1275        }
1276#endif
1277}
1278#endif
1279static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1280                             struct pci_dev *pdev)
1281{
1282#ifdef ATA_DEBUG
1283        void __iomem *hc_base = mv_hc_base(mmio_base,
1284                                           port >> MV_PORT_HC_SHIFT);
1285        void __iomem *port_base;
1286        int start_port, num_ports, p, start_hc, num_hcs, hc;
1287
1288        if (0 > port) {
1289                start_hc = start_port = 0;
1290                num_ports = 8;          /* shld be benign for 4 port devs */
1291                num_hcs = 2;
1292        } else {
1293                start_hc = port >> MV_PORT_HC_SHIFT;
1294                start_port = port;
1295                num_ports = num_hcs = 1;
1296        }
1297        DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1298                num_ports > 1 ? num_ports - 1 : start_port);
1299
1300        if (NULL != pdev) {
1301                DPRINTK("PCI config space regs:\n");
1302                mv_dump_pci_cfg(pdev, 0x68);
1303        }
1304        DPRINTK("PCI regs:\n");
1305        mv_dump_mem(mmio_base+0xc00, 0x3c);
1306        mv_dump_mem(mmio_base+0xd00, 0x34);
1307        mv_dump_mem(mmio_base+0xf00, 0x4);
1308        mv_dump_mem(mmio_base+0x1d00, 0x6c);
1309        for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1310                hc_base = mv_hc_base(mmio_base, hc);
1311                DPRINTK("HC regs (HC %i):\n", hc);
1312                mv_dump_mem(hc_base, 0x1c);
1313        }
1314        for (p = start_port; p < start_port + num_ports; p++) {
1315                port_base = mv_port_base(mmio_base, p);
1316                DPRINTK("EDMA regs (port %i):\n", p);
1317                mv_dump_mem(port_base, 0x54);
1318                DPRINTK("SATA regs (port %i):\n", p);
1319                mv_dump_mem(port_base+0x300, 0x60);
1320        }
1321#endif
1322}
1323
1324static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1325{
1326        unsigned int ofs;
1327
1328        switch (sc_reg_in) {
1329        case SCR_STATUS:
1330        case SCR_CONTROL:
1331        case SCR_ERROR:
1332                ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1333                break;
1334        case SCR_ACTIVE:
1335                ofs = SATA_ACTIVE;   /* active is not with the others */
1336                break;
1337        default:
1338                ofs = 0xffffffffU;
1339                break;
1340        }
1341        return ofs;
1342}
1343
1344static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1345{
1346        unsigned int ofs = mv_scr_offset(sc_reg_in);
1347
1348        if (ofs != 0xffffffffU) {
1349                *val = readl(mv_ap_base(link->ap) + ofs);
1350                return 0;
1351        } else
1352                return -EINVAL;
1353}
1354
1355static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1356{
1357        unsigned int ofs = mv_scr_offset(sc_reg_in);
1358
1359        if (ofs != 0xffffffffU) {
1360                void __iomem *addr = mv_ap_base(link->ap) + ofs;
1361                if (sc_reg_in == SCR_CONTROL) {
1362                        /*
1363                         * Workaround for 88SX60x1 FEr SATA#26:
1364                         *
1365                         * COMRESETs have to take care not to accidentally
1366                         * put the drive to sleep when writing SCR_CONTROL.
1367                         * Setting bits 12..15 prevents this problem.
1368                         *
1369                         * So if we see an outbound COMMRESET, set those bits.
1370                         * Ditto for the followup write that clears the reset.
1371                         *
1372                         * The proprietary driver does this for
1373                         * all chip versions, and so do we.
1374                         */
1375                        if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1376                                val |= 0xf000;
1377                }
1378                writelfl(val, addr);
1379                return 0;
1380        } else
1381                return -EINVAL;
1382}
1383
1384static void mv6_dev_config(struct ata_device *adev)
1385{
1386        /*
1387         * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1388         *
1389         * Gen-II does not support NCQ over a port multiplier
1390         *  (no FIS-based switching).
1391         */
1392        if (adev->flags & ATA_DFLAG_NCQ) {
1393                if (sata_pmp_attached(adev->link->ap)) {
1394                        adev->flags &= ~ATA_DFLAG_NCQ;
1395                        ata_dev_info(adev,
1396                                "NCQ disabled for command-based switching\n");
1397                }
1398        }
1399}
1400
1401static int mv_qc_defer(struct ata_queued_cmd *qc)
1402{
1403        struct ata_link *link = qc->dev->link;
1404        struct ata_port *ap = link->ap;
1405        struct mv_port_priv *pp = ap->private_data;
1406
1407        /*
1408         * Don't allow new commands if we're in a delayed EH state
1409         * for NCQ and/or FIS-based switching.
1410         */
1411        if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1412                return ATA_DEFER_PORT;
1413
1414        /* PIO commands need exclusive link: no other commands [DMA or PIO]
1415         * can run concurrently.
1416         * set excl_link when we want to send a PIO command in DMA mode
1417         * or a non-NCQ command in NCQ mode.
1418         * When we receive a command from that link, and there are no
1419         * outstanding commands, mark a flag to clear excl_link and let
1420         * the command go through.
1421         */
1422        if (unlikely(ap->excl_link)) {
1423                if (link == ap->excl_link) {
1424                        if (ap->nr_active_links)
1425                                return ATA_DEFER_PORT;
1426                        qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1427                        return 0;
1428                } else
1429                        return ATA_DEFER_PORT;
1430        }
1431
1432        /*
1433         * If the port is completely idle, then allow the new qc.
1434         */
1435        if (ap->nr_active_links == 0)
1436                return 0;
1437
1438        /*
1439         * The port is operating in host queuing mode (EDMA) with NCQ
1440         * enabled, allow multiple NCQ commands.  EDMA also allows
1441         * queueing multiple DMA commands but libata core currently
1442         * doesn't allow it.
1443         */
1444        if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1445            (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1446                if (ata_is_ncq(qc->tf.protocol))
1447                        return 0;
1448                else {
1449                        ap->excl_link = link;
1450                        return ATA_DEFER_PORT;
1451                }
1452        }
1453
1454        return ATA_DEFER_PORT;
1455}
1456
1457static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1458{
1459        struct mv_port_priv *pp = ap->private_data;
1460        void __iomem *port_mmio;
1461
1462        u32 fiscfg,   *old_fiscfg   = &pp->cached.fiscfg;
1463        u32 ltmode,   *old_ltmode   = &pp->cached.ltmode;
1464        u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1465
1466        ltmode   = *old_ltmode & ~LTMODE_BIT8;
1467        haltcond = *old_haltcond | EDMA_ERR_DEV;
1468
1469        if (want_fbs) {
1470                fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1471                ltmode = *old_ltmode | LTMODE_BIT8;
1472                if (want_ncq)
1473                        haltcond &= ~EDMA_ERR_DEV;
1474                else
1475                        fiscfg |=  FISCFG_WAIT_DEV_ERR;
1476        } else {
1477                fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1478        }
1479
1480        port_mmio = mv_ap_base(ap);
1481        mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1482        mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1483        mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1484}
1485
1486static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1487{
1488        struct mv_host_priv *hpriv = ap->host->private_data;
1489        u32 old, new;
1490
1491        /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1492        old = readl(hpriv->base + GPIO_PORT_CTL);
1493        if (want_ncq)
1494                new = old | (1 << 22);
1495        else
1496                new = old & ~(1 << 22);
1497        if (new != old)
1498                writel(new, hpriv->base + GPIO_PORT_CTL);
1499}
1500
1501/**
1502 *      mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1503 *      @ap: Port being initialized
1504 *
1505 *      There are two DMA modes on these chips:  basic DMA, and EDMA.
1506 *
1507 *      Bit-0 of the "EDMA RESERVED" register enables/disables use
1508 *      of basic DMA on the GEN_IIE versions of the chips.
1509 *
1510 *      This bit survives EDMA resets, and must be set for basic DMA
1511 *      to function, and should be cleared when EDMA is active.
1512 */
1513static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1514{
1515        struct mv_port_priv *pp = ap->private_data;
1516        u32 new, *old = &pp->cached.unknown_rsvd;
1517
1518        if (enable_bmdma)
1519                new = *old | 1;
1520        else
1521                new = *old & ~1;
1522        mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1523}
1524
1525/*
1526 * SOC chips have an issue whereby the HDD LEDs don't always blink
1527 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1528 * of the SOC takes care of it, generating a steady blink rate when
1529 * any drive on the chip is active.
1530 *
1531 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1532 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1533 *
1534 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1535 * LED operation works then, and provides better (more accurate) feedback.
1536 *
1537 * Note that this code assumes that an SOC never has more than one HC onboard.
1538 */
1539static void mv_soc_led_blink_enable(struct ata_port *ap)
1540{
1541        struct ata_host *host = ap->host;
1542        struct mv_host_priv *hpriv = host->private_data;
1543        void __iomem *hc_mmio;
1544        u32 led_ctrl;
1545
1546        if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1547                return;
1548        hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1549        hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1550        led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1551        writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1552}
1553
1554static void mv_soc_led_blink_disable(struct ata_port *ap)
1555{
1556        struct ata_host *host = ap->host;
1557        struct mv_host_priv *hpriv = host->private_data;
1558        void __iomem *hc_mmio;
1559        u32 led_ctrl;
1560        unsigned int port;
1561
1562        if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1563                return;
1564
1565        /* disable led-blink only if no ports are using NCQ */
1566        for (port = 0; port < hpriv->n_ports; port++) {
1567                struct ata_port *this_ap = host->ports[port];
1568                struct mv_port_priv *pp = this_ap->private_data;
1569
1570                if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1571                        return;
1572        }
1573
1574        hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1575        hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1576        led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1577        writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1578}
1579
1580static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1581{
1582        u32 cfg;
1583        struct mv_port_priv *pp    = ap->private_data;
1584        struct mv_host_priv *hpriv = ap->host->private_data;
1585        void __iomem *port_mmio    = mv_ap_base(ap);
1586
1587        /* set up non-NCQ EDMA configuration */
1588        cfg = EDMA_CFG_Q_DEPTH;         /* always 0x1f for *all* chips */
1589        pp->pp_flags &=
1590          ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1591
1592        if (IS_GEN_I(hpriv))
1593                cfg |= (1 << 8);        /* enab config burst size mask */
1594
1595        else if (IS_GEN_II(hpriv)) {
1596                cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1597                mv_60x1_errata_sata25(ap, want_ncq);
1598
1599        } else if (IS_GEN_IIE(hpriv)) {
1600                int want_fbs = sata_pmp_attached(ap);
1601                /*
1602                 * Possible future enhancement:
1603                 *
1604                 * The chip can use FBS with non-NCQ, if we allow it,
1605                 * But first we need to have the error handling in place
1606                 * for this mode (datasheet section 7.3.15.4.2.3).
1607                 * So disallow non-NCQ FBS for now.
1608                 */
1609                want_fbs &= want_ncq;
1610
1611                mv_config_fbs(ap, want_ncq, want_fbs);
1612
1613                if (want_fbs) {
1614                        pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1615                        cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1616                }
1617
1618                cfg |= (1 << 23);       /* do not mask PM field in rx'd FIS */
1619                if (want_edma) {
1620                        cfg |= (1 << 22); /* enab 4-entry host queue cache */
1621                        if (!IS_SOC(hpriv))
1622                                cfg |= (1 << 18); /* enab early completion */
1623                }
1624                if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1625                        cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1626                mv_bmdma_enable_iie(ap, !want_edma);
1627
1628                if (IS_SOC(hpriv)) {
1629                        if (want_ncq)
1630                                mv_soc_led_blink_enable(ap);
1631                        else
1632                                mv_soc_led_blink_disable(ap);
1633                }
1634        }
1635
1636        if (want_ncq) {
1637                cfg |= EDMA_CFG_NCQ;
1638                pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
1639        }
1640
1641        writelfl(cfg, port_mmio + EDMA_CFG);
1642}
1643
1644static void mv_port_free_dma_mem(struct ata_port *ap)
1645{
1646        struct mv_host_priv *hpriv = ap->host->private_data;
1647        struct mv_port_priv *pp = ap->private_data;
1648        int tag;
1649
1650        if (pp->crqb) {
1651                dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1652                pp->crqb = NULL;
1653        }
1654        if (pp->crpb) {
1655                dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1656                pp->crpb = NULL;
1657        }
1658        /*
1659         * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1660         * For later hardware, we have one unique sg_tbl per NCQ tag.
1661         */
1662        for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1663                if (pp->sg_tbl[tag]) {
1664                        if (tag == 0 || !IS_GEN_I(hpriv))
1665                                dma_pool_free(hpriv->sg_tbl_pool,
1666                                              pp->sg_tbl[tag],
1667                                              pp->sg_tbl_dma[tag]);
1668                        pp->sg_tbl[tag] = NULL;
1669                }
1670        }
1671}
1672
1673/**
1674 *      mv_port_start - Port specific init/start routine.
1675 *      @ap: ATA channel to manipulate
1676 *
1677 *      Allocate and point to DMA memory, init port private memory,
1678 *      zero indices.
1679 *
1680 *      LOCKING:
1681 *      Inherited from caller.
1682 */
1683static int mv_port_start(struct ata_port *ap)
1684{
1685        struct device *dev = ap->host->dev;
1686        struct mv_host_priv *hpriv = ap->host->private_data;
1687        struct mv_port_priv *pp;
1688        unsigned long flags;
1689        int tag;
1690
1691        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1692        if (!pp)
1693                return -ENOMEM;
1694        ap->private_data = pp;
1695
1696        pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1697        if (!pp->crqb)
1698                return -ENOMEM;
1699        memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1700
1701        pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1702        if (!pp->crpb)
1703                goto out_port_free_dma_mem;
1704        memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1705
1706        /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1707        if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1708                ap->flags |= ATA_FLAG_AN;
1709        /*
1710         * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1711         * For later hardware, we need one unique sg_tbl per NCQ tag.
1712         */
1713        for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1714                if (tag == 0 || !IS_GEN_I(hpriv)) {
1715                        pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1716                                              GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1717                        if (!pp->sg_tbl[tag])
1718                                goto out_port_free_dma_mem;
1719                } else {
1720                        pp->sg_tbl[tag]     = pp->sg_tbl[0];
1721                        pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1722                }
1723        }
1724
1725        spin_lock_irqsave(ap->lock, flags);
1726        mv_save_cached_regs(ap);
1727        mv_edma_cfg(ap, 0, 0);
1728        spin_unlock_irqrestore(ap->lock, flags);
1729
1730        return 0;
1731
1732out_port_free_dma_mem:
1733        mv_port_free_dma_mem(ap);
1734        return -ENOMEM;
1735}
1736
1737/**
1738 *      mv_port_stop - Port specific cleanup/stop routine.
1739 *      @ap: ATA channel to manipulate
1740 *
1741 *      Stop DMA, cleanup port memory.
1742 *
1743 *      LOCKING:
1744 *      This routine uses the host lock to protect the DMA stop.
1745 */
1746static void mv_port_stop(struct ata_port *ap)
1747{
1748        unsigned long flags;
1749
1750        spin_lock_irqsave(ap->lock, flags);
1751        mv_stop_edma(ap);
1752        mv_enable_port_irqs(ap, 0);
1753        spin_unlock_irqrestore(ap->lock, flags);
1754        mv_port_free_dma_mem(ap);
1755}
1756
1757/**
1758 *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1759 *      @qc: queued command whose SG list to source from
1760 *
1761 *      Populate the SG list and mark the last entry.
1762 *
1763 *      LOCKING:
1764 *      Inherited from caller.
1765 */
1766static void mv_fill_sg(struct ata_queued_cmd *qc)
1767{
1768        struct mv_port_priv *pp = qc->ap->private_data;
1769        struct scatterlist *sg;
1770        struct mv_sg *mv_sg, *last_sg = NULL;
1771        unsigned int si;
1772
1773        mv_sg = pp->sg_tbl[qc->tag];
1774        for_each_sg(qc->sg, sg, qc->n_elem, si) {
1775                dma_addr_t addr = sg_dma_address(sg);
1776                u32 sg_len = sg_dma_len(sg);
1777
1778                while (sg_len) {
1779                        u32 offset = addr & 0xffff;
1780                        u32 len = sg_len;
1781
1782                        if (offset + len > 0x10000)
1783                                len = 0x10000 - offset;
1784
1785                        mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1786                        mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1787                        mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1788                        mv_sg->reserved = 0;
1789
1790                        sg_len -= len;
1791                        addr += len;
1792
1793                        last_sg = mv_sg;
1794                        mv_sg++;
1795                }
1796        }
1797
1798        if (likely(last_sg))
1799                last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1800        mb(); /* ensure data structure is visible to the chipset */
1801}
1802
1803static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1804{
1805        u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1806                (last ? CRQB_CMD_LAST : 0);
1807        *cmdw = cpu_to_le16(tmp);
1808}
1809
1810/**
1811 *      mv_sff_irq_clear - Clear hardware interrupt after DMA.
1812 *      @ap: Port associated with this ATA transaction.
1813 *
1814 *      We need this only for ATAPI bmdma transactions,
1815 *      as otherwise we experience spurious interrupts
1816 *      after libata-sff handles the bmdma interrupts.
1817 */
1818static void mv_sff_irq_clear(struct ata_port *ap)
1819{
1820        mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1821}
1822
1823/**
1824 *      mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1825 *      @qc: queued command to check for chipset/DMA compatibility.
1826 *
1827 *      The bmdma engines cannot handle speculative data sizes
1828 *      (bytecount under/over flow).  So only allow DMA for
1829 *      data transfer commands with known data sizes.
1830 *
1831 *      LOCKING:
1832 *      Inherited from caller.
1833 */
1834static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1835{
1836        struct scsi_cmnd *scmd = qc->scsicmd;
1837
1838        if (scmd) {
1839                switch (scmd->cmnd[0]) {
1840                case READ_6:
1841                case READ_10:
1842                case READ_12:
1843                case WRITE_6:
1844                case WRITE_10:
1845                case WRITE_12:
1846                case GPCMD_READ_CD:
1847                case GPCMD_SEND_DVD_STRUCTURE:
1848                case GPCMD_SEND_CUE_SHEET:
1849                        return 0; /* DMA is safe */
1850                }
1851        }
1852        return -EOPNOTSUPP; /* use PIO instead */
1853}
1854
1855/**
1856 *      mv_bmdma_setup - Set up BMDMA transaction
1857 *      @qc: queued command to prepare DMA for.
1858 *
1859 *      LOCKING:
1860 *      Inherited from caller.
1861 */
1862static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1863{
1864        struct ata_port *ap = qc->ap;
1865        void __iomem *port_mmio = mv_ap_base(ap);
1866        struct mv_port_priv *pp = ap->private_data;
1867
1868        mv_fill_sg(qc);
1869
1870        /* clear all DMA cmd bits */
1871        writel(0, port_mmio + BMDMA_CMD);
1872
1873        /* load PRD table addr. */
1874        writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1875                port_mmio + BMDMA_PRD_HIGH);
1876        writelfl(pp->sg_tbl_dma[qc->tag],
1877                port_mmio + BMDMA_PRD_LOW);
1878
1879        /* issue r/w command */
1880        ap->ops->sff_exec_command(ap, &qc->tf);
1881}
1882
1883/**
1884 *      mv_bmdma_start - Start a BMDMA transaction
1885 *      @qc: queued command to start DMA on.
1886 *
1887 *      LOCKING:
1888 *      Inherited from caller.
1889 */
1890static void mv_bmdma_start(struct ata_queued_cmd *qc)
1891{
1892        struct ata_port *ap = qc->ap;
1893        void __iomem *port_mmio = mv_ap_base(ap);
1894        unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1895        u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1896
1897        /* start host DMA transaction */
1898        writelfl(cmd, port_mmio + BMDMA_CMD);
1899}
1900
1901/**
1902 *      mv_bmdma_stop - Stop BMDMA transfer
1903 *      @qc: queued command to stop DMA on.
1904 *
1905 *      Clears the ATA_DMA_START flag in the bmdma control register
1906 *
1907 *      LOCKING:
1908 *      Inherited from caller.
1909 */
1910static void mv_bmdma_stop_ap(struct ata_port *ap)
1911{
1912        void __iomem *port_mmio = mv_ap_base(ap);
1913        u32 cmd;
1914
1915        /* clear start/stop bit */
1916        cmd = readl(port_mmio + BMDMA_CMD);
1917        if (cmd & ATA_DMA_START) {
1918                cmd &= ~ATA_DMA_START;
1919                writelfl(cmd, port_mmio + BMDMA_CMD);
1920
1921                /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1922                ata_sff_dma_pause(ap);
1923        }
1924}
1925
1926static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1927{
1928        mv_bmdma_stop_ap(qc->ap);
1929}
1930
1931/**
1932 *      mv_bmdma_status - Read BMDMA status
1933 *      @ap: port for which to retrieve DMA status.
1934 *
1935 *      Read and return equivalent of the sff BMDMA status register.
1936 *
1937 *      LOCKING:
1938 *      Inherited from caller.
1939 */
1940static u8 mv_bmdma_status(struct ata_port *ap)
1941{
1942        void __iomem *port_mmio = mv_ap_base(ap);
1943        u32 reg, status;
1944
1945        /*
1946         * Other bits are valid only if ATA_DMA_ACTIVE==0,
1947         * and the ATA_DMA_INTR bit doesn't exist.
1948         */
1949        reg = readl(port_mmio + BMDMA_STATUS);
1950        if (reg & ATA_DMA_ACTIVE)
1951                status = ATA_DMA_ACTIVE;
1952        else if (reg & ATA_DMA_ERR)
1953                status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1954        else {
1955                /*
1956                 * Just because DMA_ACTIVE is 0 (DMA completed),
1957                 * this does _not_ mean the device is "done".
1958                 * So we should not yet be signalling ATA_DMA_INTR
1959                 * in some cases.  Eg. DSM/TRIM, and perhaps others.
1960                 */
1961                mv_bmdma_stop_ap(ap);
1962                if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1963                        status = 0;
1964                else
1965                        status = ATA_DMA_INTR;
1966        }
1967        return status;
1968}
1969
1970static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1971{
1972        struct ata_taskfile *tf = &qc->tf;
1973        /*
1974         * Workaround for 88SX60x1 FEr SATA#24.
1975         *
1976         * Chip may corrupt WRITEs if multi_count >= 4kB.
1977         * Note that READs are unaffected.
1978         *
1979         * It's not clear if this errata really means "4K bytes",
1980         * or if it always happens for multi_count > 7
1981         * regardless of device sector_size.
1982         *
1983         * So, for safety, any write with multi_count > 7
1984         * gets converted here into a regular PIO write instead:
1985         */
1986        if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
1987                if (qc->dev->multi_count > 7) {
1988                        switch (tf->command) {
1989                        case ATA_CMD_WRITE_MULTI:
1990                                tf->command = ATA_CMD_PIO_WRITE;
1991                                break;
1992                        case ATA_CMD_WRITE_MULTI_FUA_EXT:
1993                                tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
1994                                /* fall through */
1995                        case ATA_CMD_WRITE_MULTI_EXT:
1996                                tf->command = ATA_CMD_PIO_WRITE_EXT;
1997                                break;
1998                        }
1999                }
2000        }
2001}
2002
2003/**
2004 *      mv_qc_prep - Host specific command preparation.
2005 *      @qc: queued command to prepare
2006 *
2007 *      This routine simply redirects to the general purpose routine
2008 *      if command is not DMA.  Else, it handles prep of the CRQB
2009 *      (command request block), does some sanity checking, and calls
2010 *      the SG load routine.
2011 *
2012 *      LOCKING:
2013 *      Inherited from caller.
2014 */
2015static void mv_qc_prep(struct ata_queued_cmd *qc)
2016{
2017        struct ata_port *ap = qc->ap;
2018        struct mv_port_priv *pp = ap->private_data;
2019        __le16 *cw;
2020        struct ata_taskfile *tf = &qc->tf;
2021        u16 flags = 0;
2022        unsigned in_index;
2023
2024        switch (tf->protocol) {
2025        case ATA_PROT_DMA:
2026                if (tf->command == ATA_CMD_DSM)
2027                        return;
2028                /* fall-thru */
2029        case ATA_PROT_NCQ:
2030                break;  /* continue below */
2031        case ATA_PROT_PIO:
2032                mv_rw_multi_errata_sata24(qc);
2033                return;
2034        default:
2035                return;
2036        }
2037
2038        /* Fill in command request block
2039         */
2040        if (!(tf->flags & ATA_TFLAG_WRITE))
2041                flags |= CRQB_FLAG_READ;
2042        WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2043        flags |= qc->tag << CRQB_TAG_SHIFT;
2044        flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2045
2046        /* get current queue index from software */
2047        in_index = pp->req_idx;
2048
2049        pp->crqb[in_index].sg_addr =
2050                cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2051        pp->crqb[in_index].sg_addr_hi =
2052                cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2053        pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2054
2055        cw = &pp->crqb[in_index].ata_cmd[0];
2056
2057        /* Sadly, the CRQB cannot accommodate all registers--there are
2058         * only 11 bytes...so we must pick and choose required
2059         * registers based on the command.  So, we drop feature and
2060         * hob_feature for [RW] DMA commands, but they are needed for
2061         * NCQ.  NCQ will drop hob_nsect, which is not needed there
2062         * (nsect is used only for the tag; feat/hob_feat hold true nsect).
2063         */
2064        switch (tf->command) {
2065        case ATA_CMD_READ:
2066        case ATA_CMD_READ_EXT:
2067        case ATA_CMD_WRITE:
2068        case ATA_CMD_WRITE_EXT:
2069        case ATA_CMD_WRITE_FUA_EXT:
2070                mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2071                break;
2072        case ATA_CMD_FPDMA_READ:
2073        case ATA_CMD_FPDMA_WRITE:
2074                mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2075                mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2076                break;
2077        default:
2078                /* The only other commands EDMA supports in non-queued and
2079                 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2080                 * of which are defined/used by Linux.  If we get here, this
2081                 * driver needs work.
2082                 *
2083                 * FIXME: modify libata to give qc_prep a return value and
2084                 * return error here.
2085                 */
2086                BUG_ON(tf->command);
2087                break;
2088        }
2089        mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2090        mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2091        mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2092        mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2093        mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2094        mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2095        mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2096        mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2097        mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);    /* last */
2098
2099        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2100                return;
2101        mv_fill_sg(qc);
2102}
2103
2104/**
2105 *      mv_qc_prep_iie - Host specific command preparation.
2106 *      @qc: queued command to prepare
2107 *
2108 *      This routine simply redirects to the general purpose routine
2109 *      if command is not DMA.  Else, it handles prep of the CRQB
2110 *      (command request block), does some sanity checking, and calls
2111 *      the SG load routine.
2112 *
2113 *      LOCKING:
2114 *      Inherited from caller.
2115 */
2116static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2117{
2118        struct ata_port *ap = qc->ap;
2119        struct mv_port_priv *pp = ap->private_data;
2120        struct mv_crqb_iie *crqb;
2121        struct ata_taskfile *tf = &qc->tf;
2122        unsigned in_index;
2123        u32 flags = 0;
2124
2125        if ((tf->protocol != ATA_PROT_DMA) &&
2126            (tf->protocol != ATA_PROT_NCQ))
2127                return;
2128        if (tf->command == ATA_CMD_DSM)
2129                return;  /* use bmdma for this */
2130
2131        /* Fill in Gen IIE command request block */
2132        if (!(tf->flags & ATA_TFLAG_WRITE))
2133                flags |= CRQB_FLAG_READ;
2134
2135        WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2136        flags |= qc->tag << CRQB_TAG_SHIFT;
2137        flags |= qc->tag << CRQB_HOSTQ_SHIFT;
2138        flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2139
2140        /* get current queue index from software */
2141        in_index = pp->req_idx;
2142
2143        crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2144        crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2145        crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2146        crqb->flags = cpu_to_le32(flags);
2147
2148        crqb->ata_cmd[0] = cpu_to_le32(
2149                        (tf->command << 16) |
2150                        (tf->feature << 24)
2151                );
2152        crqb->ata_cmd[1] = cpu_to_le32(
2153                        (tf->lbal << 0) |
2154                        (tf->lbam << 8) |
2155                        (tf->lbah << 16) |
2156                        (tf->device << 24)
2157                );
2158        crqb->ata_cmd[2] = cpu_to_le32(
2159                        (tf->hob_lbal << 0) |
2160                        (tf->hob_lbam << 8) |
2161                        (tf->hob_lbah << 16) |
2162                        (tf->hob_feature << 24)
2163                );
2164        crqb->ata_cmd[3] = cpu_to_le32(
2165                        (tf->nsect << 0) |
2166                        (tf->hob_nsect << 8)
2167                );
2168
2169        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2170                return;
2171        mv_fill_sg(qc);
2172}
2173
2174/**
2175 *      mv_sff_check_status - fetch device status, if valid
2176 *      @ap: ATA port to fetch status from
2177 *
2178 *      When using command issue via mv_qc_issue_fis(),
2179 *      the initial ATA_BUSY state does not show up in the
2180 *      ATA status (shadow) register.  This can confuse libata!
2181 *
2182 *      So we have a hook here to fake ATA_BUSY for that situation,
2183 *      until the first time a BUSY, DRQ, or ERR bit is seen.
2184 *
2185 *      The rest of the time, it simply returns the ATA status register.
2186 */
2187static u8 mv_sff_check_status(struct ata_port *ap)
2188{
2189        u8 stat = ioread8(ap->ioaddr.status_addr);
2190        struct mv_port_priv *pp = ap->private_data;
2191
2192        if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2193                if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2194                        pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2195                else
2196                        stat = ATA_BUSY;
2197        }
2198        return stat;
2199}
2200
2201/**
2202 *      mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2203 *      @fis: fis to be sent
2204 *      @nwords: number of 32-bit words in the fis
2205 */
2206static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2207{
2208        void __iomem *port_mmio = mv_ap_base(ap);
2209        u32 ifctl, old_ifctl, ifstat;
2210        int i, timeout = 200, final_word = nwords - 1;
2211
2212        /* Initiate FIS transmission mode */
2213        old_ifctl = readl(port_mmio + SATA_IFCTL);
2214        ifctl = 0x100 | (old_ifctl & 0xf);
2215        writelfl(ifctl, port_mmio + SATA_IFCTL);
2216
2217        /* Send all words of the FIS except for the final word */
2218        for (i = 0; i < final_word; ++i)
2219                writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2220
2221        /* Flag end-of-transmission, and then send the final word */
2222        writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2223        writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2224
2225        /*
2226         * Wait for FIS transmission to complete.
2227         * This typically takes just a single iteration.
2228         */
2229        do {
2230                ifstat = readl(port_mmio + SATA_IFSTAT);
2231        } while (!(ifstat & 0x1000) && --timeout);
2232
2233        /* Restore original port configuration */
2234        writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2235
2236        /* See if it worked */
2237        if ((ifstat & 0x3000) != 0x1000) {
2238                ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2239                              __func__, ifstat);
2240                return AC_ERR_OTHER;
2241        }
2242        return 0;
2243}
2244
2245/**
2246 *      mv_qc_issue_fis - Issue a command directly as a FIS
2247 *      @qc: queued command to start
2248 *
2249 *      Note that the ATA shadow registers are not updated
2250 *      after command issue, so the device will appear "READY"
2251 *      if polled, even while it is BUSY processing the command.
2252 *
2253 *      So we use a status hook to fake ATA_BUSY until the drive changes state.
2254 *
2255 *      Note: we don't get updated shadow regs on *completion*
2256 *      of non-data commands. So avoid sending them via this function,
2257 *      as they will appear to have completed immediately.
2258 *
2259 *      GEN_IIE has special registers that we could get the result tf from,
2260 *      but earlier chipsets do not.  For now, we ignore those registers.
2261 */
2262static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2263{
2264        struct ata_port *ap = qc->ap;
2265        struct mv_port_priv *pp = ap->private_data;
2266        struct ata_link *link = qc->dev->link;
2267        u32 fis[5];
2268        int err = 0;
2269
2270        ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2271        err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2272        if (err)
2273                return err;
2274
2275        switch (qc->tf.protocol) {
2276        case ATAPI_PROT_PIO:
2277                pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2278                /* fall through */
2279        case ATAPI_PROT_NODATA:
2280                ap->hsm_task_state = HSM_ST_FIRST;
2281                break;
2282        case ATA_PROT_PIO:
2283                pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2284                if (qc->tf.flags & ATA_TFLAG_WRITE)
2285                        ap->hsm_task_state = HSM_ST_FIRST;
2286                else
2287                        ap->hsm_task_state = HSM_ST;
2288                break;
2289        default:
2290                ap->hsm_task_state = HSM_ST_LAST;
2291                break;
2292        }
2293
2294        if (qc->tf.flags & ATA_TFLAG_POLLING)
2295                ata_sff_queue_pio_task(link, 0);
2296        return 0;
2297}
2298
2299/**
2300 *      mv_qc_issue - Initiate a command to the host
2301 *      @qc: queued command to start
2302 *
2303 *      This routine simply redirects to the general purpose routine
2304 *      if command is not DMA.  Else, it sanity checks our local
2305 *      caches of the request producer/consumer indices then enables
2306 *      DMA and bumps the request producer index.
2307 *
2308 *      LOCKING:
2309 *      Inherited from caller.
2310 */
2311static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2312{
2313        static int limit_warnings = 10;
2314        struct ata_port *ap = qc->ap;
2315        void __iomem *port_mmio = mv_ap_base(ap);
2316        struct mv_port_priv *pp = ap->private_data;
2317        u32 in_index;
2318        unsigned int port_irqs;
2319
2320        pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2321
2322        switch (qc->tf.protocol) {
2323        case ATA_PROT_DMA:
2324                if (qc->tf.command == ATA_CMD_DSM) {
2325                        if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
2326                                return AC_ERR_OTHER;
2327                        break;  /* use bmdma for this */
2328                }
2329                /* fall thru */
2330        case ATA_PROT_NCQ:
2331                mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2332                pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2333                in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2334
2335                /* Write the request in pointer to kick the EDMA to life */
2336                writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2337                                        port_mmio + EDMA_REQ_Q_IN_PTR);
2338                return 0;
2339
2340        case ATA_PROT_PIO:
2341                /*
2342                 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2343                 *
2344                 * Someday, we might implement special polling workarounds
2345                 * for these, but it all seems rather unnecessary since we
2346                 * normally use only DMA for commands which transfer more
2347                 * than a single block of data.
2348                 *
2349                 * Much of the time, this could just work regardless.
2350                 * So for now, just log the incident, and allow the attempt.
2351                 */
2352                if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2353                        --limit_warnings;
2354                        ata_link_warn(qc->dev->link, DRV_NAME
2355                                      ": attempting PIO w/multiple DRQ: "
2356                                      "this may fail due to h/w errata\n");
2357                }
2358                /* drop through */
2359        case ATA_PROT_NODATA:
2360        case ATAPI_PROT_PIO:
2361        case ATAPI_PROT_NODATA:
2362                if (ap->flags & ATA_FLAG_PIO_POLLING)
2363                        qc->tf.flags |= ATA_TFLAG_POLLING;
2364                break;
2365        }
2366
2367        if (qc->tf.flags & ATA_TFLAG_POLLING)
2368                port_irqs = ERR_IRQ;    /* mask device interrupt when polling */
2369        else
2370                port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2371
2372        /*
2373         * We're about to send a non-EDMA capable command to the
2374         * port.  Turn off EDMA so there won't be problems accessing
2375         * shadow block, etc registers.
2376         */
2377        mv_stop_edma(ap);
2378        mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2379        mv_pmp_select(ap, qc->dev->link->pmp);
2380
2381        if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2382                struct mv_host_priv *hpriv = ap->host->private_data;
2383                /*
2384                 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2385                 *
2386                 * After any NCQ error, the READ_LOG_EXT command
2387                 * from libata-eh *must* use mv_qc_issue_fis().
2388                 * Otherwise it might fail, due to chip errata.
2389                 *
2390                 * Rather than special-case it, we'll just *always*
2391                 * use this method here for READ_LOG_EXT, making for
2392                 * easier testing.
2393                 */
2394                if (IS_GEN_II(hpriv))
2395                        return mv_qc_issue_fis(qc);
2396        }
2397        return ata_bmdma_qc_issue(qc);
2398}
2399
2400static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2401{
2402        struct mv_port_priv *pp = ap->private_data;
2403        struct ata_queued_cmd *qc;
2404
2405        if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2406                return NULL;
2407        qc = ata_qc_from_tag(ap, ap->link.active_tag);
2408        if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2409                return qc;
2410        return NULL;
2411}
2412
2413static void mv_pmp_error_handler(struct ata_port *ap)
2414{
2415        unsigned int pmp, pmp_map;
2416        struct mv_port_priv *pp = ap->private_data;
2417
2418        if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2419                /*
2420                 * Perform NCQ error analysis on failed PMPs
2421                 * before we freeze the port entirely.
2422                 *
2423                 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2424                 */
2425                pmp_map = pp->delayed_eh_pmp_map;
2426                pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2427                for (pmp = 0; pmp_map != 0; pmp++) {
2428                        unsigned int this_pmp = (1 << pmp);
2429                        if (pmp_map & this_pmp) {
2430                                struct ata_link *link = &ap->pmp_link[pmp];
2431                                pmp_map &= ~this_pmp;
2432                                ata_eh_analyze_ncq_error(link);
2433                        }
2434                }
2435                ata_port_freeze(ap);
2436        }
2437        sata_pmp_error_handler(ap);
2438}
2439
2440static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2441{
2442        void __iomem *port_mmio = mv_ap_base(ap);
2443
2444        return readl(port_mmio + SATA_TESTCTL) >> 16;
2445}
2446
2447static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2448{
2449        struct ata_eh_info *ehi;
2450        unsigned int pmp;
2451
2452        /*
2453         * Initialize EH info for PMPs which saw device errors
2454         */
2455        ehi = &ap->link.eh_info;
2456        for (pmp = 0; pmp_map != 0; pmp++) {
2457                unsigned int this_pmp = (1 << pmp);
2458                if (pmp_map & this_pmp) {
2459                        struct ata_link *link = &ap->pmp_link[pmp];
2460
2461                        pmp_map &= ~this_pmp;
2462                        ehi = &link->eh_info;
2463                        ata_ehi_clear_desc(ehi);
2464                        ata_ehi_push_desc(ehi, "dev err");
2465                        ehi->err_mask |= AC_ERR_DEV;
2466                        ehi->action |= ATA_EH_RESET;
2467                        ata_link_abort(link);
2468                }
2469        }
2470}
2471
2472static int mv_req_q_empty(struct ata_port *ap)
2473{
2474        void __iomem *port_mmio = mv_ap_base(ap);
2475        u32 in_ptr, out_ptr;
2476
2477        in_ptr  = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2478                        >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2479        out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2480                        >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2481        return (in_ptr == out_ptr);     /* 1 == queue_is_empty */
2482}
2483
2484static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2485{
2486        struct mv_port_priv *pp = ap->private_data;
2487        int failed_links;
2488        unsigned int old_map, new_map;
2489
2490        /*
2491         * Device error during FBS+NCQ operation:
2492         *
2493         * Set a port flag to prevent further I/O being enqueued.
2494         * Leave the EDMA running to drain outstanding commands from this port.
2495         * Perform the post-mortem/EH only when all responses are complete.
2496         * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2497         */
2498        if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2499                pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2500                pp->delayed_eh_pmp_map = 0;
2501        }
2502        old_map = pp->delayed_eh_pmp_map;
2503        new_map = old_map | mv_get_err_pmp_map(ap);
2504
2505        if (old_map != new_map) {
2506                pp->delayed_eh_pmp_map = new_map;
2507                mv_pmp_eh_prep(ap, new_map & ~old_map);
2508        }
2509        failed_links = hweight16(new_map);
2510
2511        ata_port_info(ap,
2512                      "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n",
2513                      __func__, pp->delayed_eh_pmp_map,
2514                      ap->qc_active, failed_links,
2515                      ap->nr_active_links);
2516
2517        if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2518                mv_process_crpb_entries(ap, pp);
2519                mv_stop_edma(ap);
2520                mv_eh_freeze(ap);
2521                ata_port_info(ap, "%s: done\n", __func__);
2522                return 1;       /* handled */
2523        }
2524        ata_port_info(ap, "%s: waiting\n", __func__);
2525        return 1;       /* handled */
2526}
2527
2528static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2529{
2530        /*
2531         * Possible future enhancement:
2532         *
2533         * FBS+non-NCQ operation is not yet implemented.
2534         * See related notes in mv_edma_cfg().
2535         *
2536         * Device error during FBS+non-NCQ operation:
2537         *
2538         * We need to snapshot the shadow registers for each failed command.
2539         * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2540         */
2541        return 0;       /* not handled */
2542}
2543
2544static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2545{
2546        struct mv_port_priv *pp = ap->private_data;
2547
2548        if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2549                return 0;       /* EDMA was not active: not handled */
2550        if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2551                return 0;       /* FBS was not active: not handled */
2552
2553        if (!(edma_err_cause & EDMA_ERR_DEV))
2554                return 0;       /* non DEV error: not handled */
2555        edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2556        if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2557                return 0;       /* other problems: not handled */
2558
2559        if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2560                /*
2561                 * EDMA should NOT have self-disabled for this case.
2562                 * If it did, then something is wrong elsewhere,
2563                 * and we cannot handle it here.
2564                 */
2565                if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2566                        ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2567                                      __func__, edma_err_cause, pp->pp_flags);
2568                        return 0; /* not handled */
2569                }
2570                return mv_handle_fbs_ncq_dev_err(ap);
2571        } else {
2572                /*
2573                 * EDMA should have self-disabled for this case.
2574                 * If it did not, then something is wrong elsewhere,
2575                 * and we cannot handle it here.
2576                 */
2577                if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2578                        ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2579                                      __func__, edma_err_cause, pp->pp_flags);
2580                        return 0; /* not handled */
2581                }
2582                return mv_handle_fbs_non_ncq_dev_err(ap);
2583        }
2584        return 0;       /* not handled */
2585}
2586
2587static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2588{
2589        struct ata_eh_info *ehi = &ap->link.eh_info;
2590        char *when = "idle";
2591
2592        ata_ehi_clear_desc(ehi);
2593        if (edma_was_enabled) {
2594                when = "EDMA enabled";
2595        } else {
2596                struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2597                if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2598                        when = "polling";
2599        }
2600        ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2601        ehi->err_mask |= AC_ERR_OTHER;
2602        ehi->action   |= ATA_EH_RESET;
2603        ata_port_freeze(ap);
2604}
2605
2606/**
2607 *      mv_err_intr - Handle error interrupts on the port
2608 *      @ap: ATA channel to manipulate
2609 *
2610 *      Most cases require a full reset of the chip's state machine,
2611 *      which also performs a COMRESET.
2612 *      Also, if the port disabled DMA, update our cached copy to match.
2613 *
2614 *      LOCKING:
2615 *      Inherited from caller.
2616 */
2617static void mv_err_intr(struct ata_port *ap)
2618{
2619        void __iomem *port_mmio = mv_ap_base(ap);
2620        u32 edma_err_cause, eh_freeze_mask, serr = 0;
2621        u32 fis_cause = 0;
2622        struct mv_port_priv *pp = ap->private_data;
2623        struct mv_host_priv *hpriv = ap->host->private_data;
2624        unsigned int action = 0, err_mask = 0;
2625        struct ata_eh_info *ehi = &ap->link.eh_info;
2626        struct ata_queued_cmd *qc;
2627        int abort = 0;
2628
2629        /*
2630         * Read and clear the SError and err_cause bits.
2631         * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2632         * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2633         */
2634        sata_scr_read(&ap->link, SCR_ERROR, &serr);
2635        sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2636
2637        edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2638        if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2639                fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2640                writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2641        }
2642        writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2643
2644        if (edma_err_cause & EDMA_ERR_DEV) {
2645                /*
2646                 * Device errors during FIS-based switching operation
2647                 * require special handling.
2648                 */
2649                if (mv_handle_dev_err(ap, edma_err_cause))
2650                        return;
2651        }
2652
2653        qc = mv_get_active_qc(ap);
2654        ata_ehi_clear_desc(ehi);
2655        ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2656                          edma_err_cause, pp->pp_flags);
2657
2658        if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2659                ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2660                if (fis_cause & FIS_IRQ_CAUSE_AN) {
2661                        u32 ec = edma_err_cause &
2662                               ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2663                        sata_async_notification(ap);
2664                        if (!ec)
2665                                return; /* Just an AN; no need for the nukes */
2666                        ata_ehi_push_desc(ehi, "SDB notify");
2667                }
2668        }
2669        /*
2670         * All generations share these EDMA error cause bits:
2671         */
2672        if (edma_err_cause & EDMA_ERR_DEV) {
2673                err_mask |= AC_ERR_DEV;
2674                action |= ATA_EH_RESET;
2675                ata_ehi_push_desc(ehi, "dev error");
2676        }
2677        if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2678                        EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2679                        EDMA_ERR_INTRL_PAR)) {
2680                err_mask |= AC_ERR_ATA_BUS;
2681                action |= ATA_EH_RESET;
2682                ata_ehi_push_desc(ehi, "parity error");
2683        }
2684        if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2685                ata_ehi_hotplugged(ehi);
2686                ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2687                        "dev disconnect" : "dev connect");
2688                action |= ATA_EH_RESET;
2689        }
2690
2691        /*
2692         * Gen-I has a different SELF_DIS bit,
2693         * different FREEZE bits, and no SERR bit:
2694         */
2695        if (IS_GEN_I(hpriv)) {
2696                eh_freeze_mask = EDMA_EH_FREEZE_5;
2697                if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2698                        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2699                        ata_ehi_push_desc(ehi, "EDMA self-disable");
2700                }
2701        } else {
2702                eh_freeze_mask = EDMA_EH_FREEZE;
2703                if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2704                        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2705                        ata_ehi_push_desc(ehi, "EDMA self-disable");
2706                }
2707                if (edma_err_cause & EDMA_ERR_SERR) {
2708                        ata_ehi_push_desc(ehi, "SError=%08x", serr);
2709                        err_mask |= AC_ERR_ATA_BUS;
2710                        action |= ATA_EH_RESET;
2711                }
2712        }
2713
2714        if (!err_mask) {
2715                err_mask = AC_ERR_OTHER;
2716                action |= ATA_EH_RESET;
2717        }
2718
2719        ehi->serror |= serr;
2720        ehi->action |= action;
2721
2722        if (qc)
2723                qc->err_mask |= err_mask;
2724        else
2725                ehi->err_mask |= err_mask;
2726
2727        if (err_mask == AC_ERR_DEV) {
2728                /*
2729                 * Cannot do ata_port_freeze() here,
2730                 * because it would kill PIO access,
2731                 * which is needed for further diagnosis.
2732                 */
2733                mv_eh_freeze(ap);
2734                abort = 1;
2735        } else if (edma_err_cause & eh_freeze_mask) {
2736                /*
2737                 * Note to self: ata_port_freeze() calls ata_port_abort()
2738                 */
2739                ata_port_freeze(ap);
2740        } else {
2741                abort = 1;
2742        }
2743
2744        if (abort) {
2745                if (qc)
2746                        ata_link_abort(qc->dev->link);
2747                else
2748                        ata_port_abort(ap);
2749        }
2750}
2751
2752static bool mv_process_crpb_response(struct ata_port *ap,
2753                struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2754{
2755        u8 ata_status;
2756        u16 edma_status = le16_to_cpu(response->flags);
2757
2758        /*
2759         * edma_status from a response queue entry:
2760         *   LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2761         *   MSB is saved ATA status from command completion.
2762         */
2763        if (!ncq_enabled) {
2764                u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2765                if (err_cause) {
2766                        /*
2767                         * Error will be seen/handled by
2768                         * mv_err_intr().  So do nothing at all here.
2769                         */
2770                        return false;
2771                }
2772        }
2773        ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2774        if (!ac_err_mask(ata_status))
2775                return true;
2776        /* else: leave it for mv_err_intr() */
2777        return false;
2778}
2779
2780static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2781{
2782        void __iomem *port_mmio = mv_ap_base(ap);
2783        struct mv_host_priv *hpriv = ap->host->private_data;
2784        u32 in_index;
2785        bool work_done = false;
2786        u32 done_mask = 0;
2787        int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2788
2789        /* Get the hardware queue position index */
2790        in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2791                        >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2792
2793        /* Process new responses from since the last time we looked */
2794        while (in_index != pp->resp_idx) {
2795                unsigned int tag;
2796                struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2797
2798                pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2799
2800                if (IS_GEN_I(hpriv)) {
2801                        /* 50xx: no NCQ, only one command active at a time */
2802                        tag = ap->link.active_tag;
2803                } else {
2804                        /* Gen II/IIE: get command tag from CRPB entry */
2805                        tag = le16_to_cpu(response->id) & 0x1f;
2806                }
2807                if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2808                        done_mask |= 1 << tag;
2809                work_done = true;
2810        }
2811
2812        if (work_done) {
2813                ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2814
2815                /* Update the software queue position index in hardware */
2816                writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2817                         (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2818                         port_mmio + EDMA_RSP_Q_OUT_PTR);
2819        }
2820}
2821
2822static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2823{
2824        struct mv_port_priv *pp;
2825        int edma_was_enabled;
2826
2827        /*
2828         * Grab a snapshot of the EDMA_EN flag setting,
2829         * so that we have a consistent view for this port,
2830         * even if something we call of our routines changes it.
2831         */
2832        pp = ap->private_data;
2833        edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2834        /*
2835         * Process completed CRPB response(s) before other events.
2836         */
2837        if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2838                mv_process_crpb_entries(ap, pp);
2839                if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2840                        mv_handle_fbs_ncq_dev_err(ap);
2841        }
2842        /*
2843         * Handle chip-reported errors, or continue on to handle PIO.
2844         */
2845        if (unlikely(port_cause & ERR_IRQ)) {
2846                mv_err_intr(ap);
2847        } else if (!edma_was_enabled) {
2848                struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2849                if (qc)
2850                        ata_bmdma_port_intr(ap, qc);
2851                else
2852                        mv_unexpected_intr(ap, edma_was_enabled);
2853        }
2854}
2855
2856/**
2857 *      mv_host_intr - Handle all interrupts on the given host controller
2858 *      @host: host specific structure
2859 *      @main_irq_cause: Main interrupt cause register for the chip.
2860 *
2861 *      LOCKING:
2862 *      Inherited from caller.
2863 */
2864static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2865{
2866        struct mv_host_priv *hpriv = host->private_data;
2867        void __iomem *mmio = hpriv->base, *hc_mmio;
2868        unsigned int handled = 0, port;
2869
2870        /* If asserted, clear the "all ports" IRQ coalescing bit */
2871        if (main_irq_cause & ALL_PORTS_COAL_DONE)
2872                writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2873
2874        for (port = 0; port < hpriv->n_ports; port++) {
2875                struct ata_port *ap = host->ports[port];
2876                unsigned int p, shift, hardport, port_cause;
2877
2878                MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2879                /*
2880                 * Each hc within the host has its own hc_irq_cause register,
2881                 * where the interrupting ports bits get ack'd.
2882                 */
2883                if (hardport == 0) {    /* first port on this hc ? */
2884                        u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2885                        u32 port_mask, ack_irqs;
2886                        /*
2887                         * Skip this entire hc if nothing pending for any ports
2888                         */
2889                        if (!hc_cause) {
2890                                port += MV_PORTS_PER_HC - 1;
2891                                continue;
2892                        }
2893                        /*
2894                         * We don't need/want to read the hc_irq_cause register,
2895                         * because doing so hurts performance, and
2896                         * main_irq_cause already gives us everything we need.
2897                         *
2898                         * But we do have to *write* to the hc_irq_cause to ack
2899                         * the ports that we are handling this time through.
2900                         *
2901                         * This requires that we create a bitmap for those
2902                         * ports which interrupted us, and use that bitmap
2903                         * to ack (only) those ports via hc_irq_cause.
2904                         */
2905                        ack_irqs = 0;
2906                        if (hc_cause & PORTS_0_3_COAL_DONE)
2907                                ack_irqs = HC_COAL_IRQ;
2908                        for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2909                                if ((port + p) >= hpriv->n_ports)
2910                                        break;
2911                                port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2912                                if (hc_cause & port_mask)
2913                                        ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2914                        }
2915                        hc_mmio = mv_hc_base_from_port(mmio, port);
2916                        writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2917                        handled = 1;
2918                }
2919                /*
2920                 * Handle interrupts signalled for this port:
2921                 */
2922                port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2923                if (port_cause)
2924                        mv_port_intr(ap, port_cause);
2925        }
2926        return handled;
2927}
2928
2929static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2930{
2931        struct mv_host_priv *hpriv = host->private_data;
2932        struct ata_port *ap;
2933        struct ata_queued_cmd *qc;
2934        struct ata_eh_info *ehi;
2935        unsigned int i, err_mask, printed = 0;
2936        u32 err_cause;
2937
2938        err_cause = readl(mmio + hpriv->irq_cause_offset);
2939
2940        dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
2941
2942        DPRINTK("All regs @ PCI error\n");
2943        mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2944
2945        writelfl(0, mmio + hpriv->irq_cause_offset);
2946
2947        for (i = 0; i < host->n_ports; i++) {
2948                ap = host->ports[i];
2949                if (!ata_link_offline(&ap->link)) {
2950                        ehi = &ap->link.eh_info;
2951                        ata_ehi_clear_desc(ehi);
2952                        if (!printed++)
2953                                ata_ehi_push_desc(ehi,
2954                                        "PCI err cause 0x%08x", err_cause);
2955                        err_mask = AC_ERR_HOST_BUS;
2956                        ehi->action = ATA_EH_RESET;
2957                        qc = ata_qc_from_tag(ap, ap->link.active_tag);
2958                        if (qc)
2959                                qc->err_mask |= err_mask;
2960                        else
2961                                ehi->err_mask |= err_mask;
2962
2963                        ata_port_freeze(ap);
2964                }
2965        }
2966        return 1;       /* handled */
2967}
2968
2969/**
2970 *      mv_interrupt - Main interrupt event handler
2971 *      @irq: unused
2972 *      @dev_instance: private data; in this case the host structure
2973 *
2974 *      Read the read only register to determine if any host
2975 *      controllers have pending interrupts.  If so, call lower level
2976 *      routine to handle.  Also check for PCI errors which are only
2977 *      reported here.
2978 *
2979 *      LOCKING:
2980 *      This routine holds the host lock while processing pending
2981 *      interrupts.
2982 */
2983static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2984{
2985        struct ata_host *host = dev_instance;
2986        struct mv_host_priv *hpriv = host->private_data;
2987        unsigned int handled = 0;
2988        int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
2989        u32 main_irq_cause, pending_irqs;
2990
2991        spin_lock(&host->lock);
2992
2993        /* for MSI:  block new interrupts while in here */
2994        if (using_msi)
2995                mv_write_main_irq_mask(0, hpriv);
2996
2997        main_irq_cause = readl(hpriv->main_irq_cause_addr);
2998        pending_irqs   = main_irq_cause & hpriv->main_irq_mask;
2999        /*
3000         * Deal with cases where we either have nothing pending, or have read
3001         * a bogus register value which can indicate HW removal or PCI fault.
3002         */
3003        if (pending_irqs && main_irq_cause != 0xffffffffU) {
3004                if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
3005                        handled = mv_pci_error(host, hpriv->base);
3006                else
3007                        handled = mv_host_intr(host, pending_irqs);
3008        }
3009
3010        /* for MSI: unmask; interrupt cause bits will retrigger now */
3011        if (using_msi)
3012                mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3013
3014        spin_unlock(&host->lock);
3015
3016        return IRQ_RETVAL(handled);
3017}
3018
3019static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3020{
3021        unsigned int ofs;
3022
3023        switch (sc_reg_in) {
3024        case SCR_STATUS:
3025        case SCR_ERROR:
3026        case SCR_CONTROL:
3027                ofs = sc_reg_in * sizeof(u32);
3028                break;
3029        default:
3030                ofs = 0xffffffffU;
3031                break;
3032        }
3033        return ofs;
3034}
3035
3036static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3037{
3038        struct mv_host_priv *hpriv = link->ap->host->private_data;
3039        void __iomem *mmio = hpriv->base;
3040        void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3041        unsigned int ofs = mv5_scr_offset(sc_reg_in);
3042
3043        if (ofs != 0xffffffffU) {
3044                *val = readl(addr + ofs);
3045                return 0;
3046        } else
3047                return -EINVAL;
3048}
3049
3050static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3051{
3052        struct mv_host_priv *hpriv = link->ap->host->private_data;
3053        void __iomem *mmio = hpriv->base;
3054        void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3055        unsigned int ofs = mv5_scr_offset(sc_reg_in);
3056
3057        if (ofs != 0xffffffffU) {
3058                writelfl(val, addr + ofs);
3059                return 0;
3060        } else
3061                return -EINVAL;
3062}
3063
3064static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3065{
3066        struct pci_dev *pdev = to_pci_dev(host->dev);
3067        int early_5080;
3068
3069        early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3070
3071        if (!early_5080) {
3072                u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3073                tmp |= (1 << 0);
3074                writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3075        }
3076
3077        mv_reset_pci_bus(host, mmio);
3078}
3079
3080static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3081{
3082        writel(0x0fcfffff, mmio + FLASH_CTL);
3083}
3084
3085static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3086                           void __iomem *mmio)
3087{
3088        void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3089        u32 tmp;
3090
3091        tmp = readl(phy_mmio + MV5_PHY_MODE);
3092
3093        hpriv->signal[idx].pre = tmp & 0x1800;  /* bits 12:11 */
3094        hpriv->signal[idx].amps = tmp & 0xe0;   /* bits 7:5 */
3095}
3096
3097static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3098{
3099        u32 tmp;
3100
3101        writel(0, mmio + GPIO_PORT_CTL);
3102
3103        /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3104
3105        tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3106        tmp |= ~(1 << 0);
3107        writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3108}
3109
3110static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3111                           unsigned int port)
3112{
3113        void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3114        const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3115        u32 tmp;
3116        int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3117
3118        if (fix_apm_sq) {
3119                tmp = readl(phy_mmio + MV5_LTMODE);
3120                tmp |= (1 << 19);
3121                writel(tmp, phy_mmio + MV5_LTMODE);
3122
3123                tmp = readl(phy_mmio + MV5_PHY_CTL);
3124                tmp &= ~0x3;
3125                tmp |= 0x1;
3126                writel(tmp, phy_mmio + MV5_PHY_CTL);
3127        }
3128
3129        tmp = readl(phy_mmio + MV5_PHY_MODE);
3130        tmp &= ~mask;
3131        tmp |= hpriv->signal[port].pre;
3132        tmp |= hpriv->signal[port].amps;
3133        writel(tmp, phy_mmio + MV5_PHY_MODE);
3134}
3135
3136
3137#undef ZERO
3138#define ZERO(reg) writel(0, port_mmio + (reg))
3139static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3140                             unsigned int port)
3141{
3142        void __iomem *port_mmio = mv_port_base(mmio, port);
3143
3144        mv_reset_channel(hpriv, mmio, port);
3145
3146        ZERO(0x028);    /* command */
3147        writel(0x11f, port_mmio + EDMA_CFG);
3148        ZERO(0x004);    /* timer */
3149        ZERO(0x008);    /* irq err cause */
3150        ZERO(0x00c);    /* irq err mask */
3151        ZERO(0x010);    /* rq bah */
3152        ZERO(0x014);    /* rq inp */
3153        ZERO(0x018);    /* rq outp */
3154        ZERO(0x01c);    /* respq bah */
3155        ZERO(0x024);    /* respq outp */
3156        ZERO(0x020);    /* respq inp */
3157        ZERO(0x02c);    /* test control */
3158        writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3159}
3160#undef ZERO
3161
3162#define ZERO(reg) writel(0, hc_mmio + (reg))
3163static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3164                        unsigned int hc)
3165{
3166        void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3167        u32 tmp;
3168
3169        ZERO(0x00c);
3170        ZERO(0x010);
3171        ZERO(0x014);
3172        ZERO(0x018);
3173
3174        tmp = readl(hc_mmio + 0x20);
3175        tmp &= 0x1c1c1c1c;
3176        tmp |= 0x03030303;
3177        writel(tmp, hc_mmio + 0x20);
3178}
3179#undef ZERO
3180
3181static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3182                        unsigned int n_hc)
3183{
3184        unsigned int hc, port;
3185
3186        for (hc = 0; hc < n_hc; hc++) {
3187                for (port = 0; port < MV_PORTS_PER_HC; port++)
3188                        mv5_reset_hc_port(hpriv, mmio,
3189                                          (hc * MV_PORTS_PER_HC) + port);
3190
3191                mv5_reset_one_hc(hpriv, mmio, hc);
3192        }
3193
3194        return 0;
3195}
3196
3197#undef ZERO
3198#define ZERO(reg) writel(0, mmio + (reg))
3199static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3200{
3201        struct mv_host_priv *hpriv = host->private_data;
3202        u32 tmp;
3203
3204        tmp = readl(mmio + MV_PCI_MODE);
3205        tmp &= 0xff00ffff;
3206        writel(tmp, mmio + MV_PCI_MODE);
3207
3208        ZERO(MV_PCI_DISC_TIMER);
3209        ZERO(MV_PCI_MSI_TRIGGER);
3210        writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3211        ZERO(MV_PCI_SERR_MASK);
3212        ZERO(hpriv->irq_cause_offset);
3213        ZERO(hpriv->irq_mask_offset);
3214        ZERO(MV_PCI_ERR_LOW_ADDRESS);
3215        ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3216        ZERO(MV_PCI_ERR_ATTRIBUTE);
3217        ZERO(MV_PCI_ERR_COMMAND);
3218}
3219#undef ZERO
3220
3221static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3222{
3223        u32 tmp;
3224
3225        mv5_reset_flash(hpriv, mmio);
3226
3227        tmp = readl(mmio + GPIO_PORT_CTL);
3228        tmp &= 0x3;
3229        tmp |= (1 << 5) | (1 << 6);
3230        writel(tmp, mmio + GPIO_PORT_CTL);
3231}
3232
3233/**
3234 *      mv6_reset_hc - Perform the 6xxx global soft reset
3235 *      @mmio: base address of the HBA
3236 *
3237 *      This routine only applies to 6xxx parts.
3238 *
3239 *      LOCKING:
3240 *      Inherited from caller.
3241 */
3242static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3243                        unsigned int n_hc)
3244{
3245        void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3246        int i, rc = 0;
3247        u32 t;
3248
3249        /* Following procedure defined in PCI "main command and status
3250         * register" table.
3251         */
3252        t = readl(reg);
3253        writel(t | STOP_PCI_MASTER, reg);
3254
3255        for (i = 0; i < 1000; i++) {
3256                udelay(1);
3257                t = readl(reg);
3258                if (PCI_MASTER_EMPTY & t)
3259                        break;
3260        }
3261        if (!(PCI_MASTER_EMPTY & t)) {
3262                printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3263                rc = 1;
3264                goto done;
3265        }
3266
3267        /* set reset */
3268        i = 5;
3269        do {
3270                writel(t | GLOB_SFT_RST, reg);
3271                t = readl(reg);
3272                udelay(1);
3273        } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3274
3275        if (!(GLOB_SFT_RST & t)) {
3276                printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3277                rc = 1;
3278                goto done;
3279        }
3280
3281        /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3282        i = 5;
3283        do {
3284                writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3285                t = readl(reg);
3286                udelay(1);
3287        } while ((GLOB_SFT_RST & t) && (i-- > 0));
3288
3289        if (GLOB_SFT_RST & t) {
3290                printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3291                rc = 1;
3292        }
3293done:
3294        return rc;
3295}
3296
3297static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3298                           void __iomem *mmio)
3299{
3300        void __iomem *port_mmio;
3301        u32 tmp;
3302
3303        tmp = readl(mmio + RESET_CFG);
3304        if ((tmp & (1 << 0)) == 0) {
3305                hpriv->signal[idx].amps = 0x7 << 8;
3306                hpriv->signal[idx].pre = 0x1 << 5;
3307                return;
3308        }
3309
3310        port_mmio = mv_port_base(mmio, idx);
3311        tmp = readl(port_mmio + PHY_MODE2);
3312
3313        hpriv->signal[idx].amps = tmp & 0x700;  /* bits 10:8 */
3314        hpriv->signal[idx].pre = tmp & 0xe0;    /* bits 7:5 */
3315}
3316
3317static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3318{
3319        writel(0x00000060, mmio + GPIO_PORT_CTL);
3320}
3321
3322static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3323                           unsigned int port)
3324{
3325        void __iomem *port_mmio = mv_port_base(mmio, port);
3326
3327        u32 hp_flags = hpriv->hp_flags;
3328        int fix_phy_mode2 =
3329                hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3330        int fix_phy_mode4 =
3331                hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3332        u32 m2, m3;
3333
3334        if (fix_phy_mode2) {
3335                m2 = readl(port_mmio + PHY_MODE2);
3336                m2 &= ~(1 << 16);
3337                m2 |= (1 << 31);
3338                writel(m2, port_mmio + PHY_MODE2);
3339
3340                udelay(200);
3341
3342                m2 = readl(port_mmio + PHY_MODE2);
3343                m2 &= ~((1 << 16) | (1 << 31));
3344                writel(m2, port_mmio + PHY_MODE2);
3345
3346                udelay(200);
3347        }
3348
3349        /*
3350         * Gen-II/IIe PHY_MODE3 errata RM#2:
3351         * Achieves better receiver noise performance than the h/w default:
3352         */
3353        m3 = readl(port_mmio + PHY_MODE3);
3354        m3 = (m3 & 0x1f) | (0x5555601 << 5);
3355
3356        /* Guideline 88F5182 (GL# SATA-S11) */
3357        if (IS_SOC(hpriv))
3358                m3 &= ~0x1c;
3359
3360        if (fix_phy_mode4) {
3361                u32 m4 = readl(port_mmio + PHY_MODE4);
3362                /*
3363                 * Enforce reserved-bit restrictions on GenIIe devices only.
3364                 * For earlier chipsets, force only the internal config field
3365                 *  (workaround for errata FEr SATA#10 part 1).
3366                 */
3367                if (IS_GEN_IIE(hpriv))
3368                        m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3369                else
3370                        m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3371                writel(m4, port_mmio + PHY_MODE4);
3372        }
3373        /*
3374         * Workaround for 60x1-B2 errata SATA#13:
3375         * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3376         * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3377         * Or ensure we use writelfl() when writing PHY_MODE4.
3378         */
3379        writel(m3, port_mmio + PHY_MODE3);
3380
3381        /* Revert values of pre-emphasis and signal amps to the saved ones */
3382        m2 = readl(port_mmio + PHY_MODE2);
3383
3384        m2 &= ~MV_M2_PREAMP_MASK;
3385        m2 |= hpriv->signal[port].amps;
3386        m2 |= hpriv->signal[port].pre;
3387        m2 &= ~(1 << 16);
3388
3389        /* according to mvSata 3.6.1, some IIE values are fixed */
3390        if (IS_GEN_IIE(hpriv)) {
3391                m2 &= ~0xC30FF01F;
3392                m2 |= 0x0000900F;
3393        }
3394
3395        writel(m2, port_mmio + PHY_MODE2);
3396}
3397
3398/* TODO: use the generic LED interface to configure the SATA Presence */
3399/* & Acitivy LEDs on the board */
3400static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3401                                      void __iomem *mmio)
3402{
3403        return;
3404}
3405
3406static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3407                           void __iomem *mmio)
3408{
3409        void __iomem *port_mmio;
3410        u32 tmp;
3411
3412        port_mmio = mv_port_base(mmio, idx);
3413        tmp = readl(port_mmio + PHY_MODE2);
3414
3415        hpriv->signal[idx].amps = tmp & 0x700;  /* bits 10:8 */
3416        hpriv->signal[idx].pre = tmp & 0xe0;    /* bits 7:5 */
3417}
3418
3419#undef ZERO
3420#define ZERO(reg) writel(0, port_mmio + (reg))
3421static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3422                                        void __iomem *mmio, unsigned int port)
3423{
3424        void __iomem *port_mmio = mv_port_base(mmio, port);
3425
3426        mv_reset_channel(hpriv, mmio, port);
3427
3428        ZERO(0x028);            /* command */
3429        writel(0x101f, port_mmio + EDMA_CFG);
3430        ZERO(0x004);            /* timer */
3431        ZERO(0x008);            /* irq err cause */
3432        ZERO(0x00c);            /* irq err mask */
3433        ZERO(0x010);            /* rq bah */
3434        ZERO(0x014);            /* rq inp */
3435        ZERO(0x018);            /* rq outp */
3436        ZERO(0x01c);            /* respq bah */
3437        ZERO(0x024);            /* respq outp */
3438        ZERO(0x020);            /* respq inp */
3439        ZERO(0x02c);            /* test control */
3440        writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3441}
3442
3443#undef ZERO
3444
3445#define ZERO(reg) writel(0, hc_mmio + (reg))
3446static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3447                                       void __iomem *mmio)
3448{
3449        void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3450
3451        ZERO(0x00c);
3452        ZERO(0x010);
3453        ZERO(0x014);
3454
3455}
3456
3457#undef ZERO
3458
3459static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3460                                  void __iomem *mmio, unsigned int n_hc)
3461{
3462        unsigned int port;
3463
3464        for (port = 0; port < hpriv->n_ports; port++)
3465                mv_soc_reset_hc_port(hpriv, mmio, port);
3466
3467        mv_soc_reset_one_hc(hpriv, mmio);
3468
3469        return 0;
3470}
3471
3472static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3473                                      void __iomem *mmio)
3474{
3475        return;
3476}
3477
3478static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3479{
3480        return;
3481}
3482
3483static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3484                                  void __iomem *mmio, unsigned int port)
3485{
3486        void __iomem *port_mmio = mv_port_base(mmio, port);
3487        u32     reg;
3488
3489        reg = readl(port_mmio + PHY_MODE3);
3490        reg &= ~(0x3 << 27);    /* SELMUPF (bits 28:27) to 1 */
3491        reg |= (0x1 << 27);
3492        reg &= ~(0x3 << 29);    /* SELMUPI (bits 30:29) to 1 */
3493        reg |= (0x1 << 29);
3494        writel(reg, port_mmio + PHY_MODE3);
3495
3496        reg = readl(port_mmio + PHY_MODE4);
3497        reg &= ~0x1;    /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3498        reg |= (0x1 << 16);
3499        writel(reg, port_mmio + PHY_MODE4);
3500
3501        reg = readl(port_mmio + PHY_MODE9_GEN2);
3502        reg &= ~0xf;    /* TXAMP[3:0] (bits 3:0) to 8 */
3503        reg |= 0x8;
3504        reg &= ~(0x1 << 14);    /* TXAMP[4] (bit 14) to 0 */
3505        writel(reg, port_mmio + PHY_MODE9_GEN2);
3506
3507        reg = readl(port_mmio + PHY_MODE9_GEN1);
3508        reg &= ~0xf;    /* TXAMP[3:0] (bits 3:0) to 8 */
3509        reg |= 0x8;
3510        reg &= ~(0x1 << 14);    /* TXAMP[4] (bit 14) to 0 */
3511        writel(reg, port_mmio + PHY_MODE9_GEN1);
3512}
3513
3514/**
3515 *      soc_is_65 - check if the soc is 65 nano device
3516 *
3517 *      Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3518 *      register, this register should contain non-zero value and it exists only
3519 *      in the 65 nano devices, when reading it from older devices we get 0.
3520 */
3521static bool soc_is_65n(struct mv_host_priv *hpriv)
3522{
3523        void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3524
3525        if (readl(port0_mmio + PHYCFG_OFS))
3526                return true;
3527        return false;
3528}
3529
3530static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3531{
3532        u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3533
3534        ifcfg = (ifcfg & 0xf7f) | 0x9b1000;     /* from chip spec */
3535        if (want_gen2i)
3536                ifcfg |= (1 << 7);              /* enable gen2i speed */
3537        writelfl(ifcfg, port_mmio + SATA_IFCFG);
3538}
3539
3540static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3541                             unsigned int port_no)
3542{
3543        void __iomem *port_mmio = mv_port_base(mmio, port_no);
3544
3545        /*
3546         * The datasheet warns against setting EDMA_RESET when EDMA is active
3547         * (but doesn't say what the problem might be).  So we first try
3548         * to disable the EDMA engine before doing the EDMA_RESET operation.
3549         */
3550        mv_stop_edma_engine(port_mmio);
3551        writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3552
3553        if (!IS_GEN_I(hpriv)) {
3554                /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3555                mv_setup_ifcfg(port_mmio, 1);
3556        }
3557        /*
3558         * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3559         * link, and physical layers.  It resets all SATA interface registers
3560         * (except for SATA_IFCFG), and issues a COMRESET to the dev.
3561         */
3562        writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3563        udelay(25);     /* allow reset propagation */
3564        writelfl(0, port_mmio + EDMA_CMD);
3565
3566        hpriv->ops->phy_errata(hpriv, mmio, port_no);
3567
3568        if (IS_GEN_I(hpriv))
3569                mdelay(1);
3570}
3571
3572static void mv_pmp_select(struct ata_port *ap, int pmp)
3573{
3574        if (sata_pmp_supported(ap)) {
3575                void __iomem *port_mmio = mv_ap_base(ap);
3576                u32 reg = readl(port_mmio + SATA_IFCTL);
3577                int old = reg & 0xf;
3578
3579                if (old != pmp) {
3580                        reg = (reg & ~0xf) | pmp;
3581                        writelfl(reg, port_mmio + SATA_IFCTL);
3582                }
3583        }
3584}
3585
3586static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3587                                unsigned long deadline)
3588{
3589        mv_pmp_select(link->ap, sata_srst_pmp(link));
3590        return sata_std_hardreset(link, class, deadline);
3591}
3592
3593static int mv_softreset(struct ata_link *link, unsigned int *class,
3594                                unsigned long deadline)
3595{
3596        mv_pmp_select(link->ap, sata_srst_pmp(link));
3597        return ata_sff_softreset(link, class, deadline);
3598}
3599
3600static int mv_hardreset(struct ata_link *link, unsigned int *class,
3601                        unsigned long deadline)
3602{
3603        struct ata_port *ap = link->ap;
3604        struct mv_host_priv *hpriv = ap->host->private_data;
3605        struct mv_port_priv *pp = ap->private_data;
3606        void __iomem *mmio = hpriv->base;
3607        int rc, attempts = 0, extra = 0;
3608        u32 sstatus;
3609        bool online;
3610
3611        mv_reset_channel(hpriv, mmio, ap->port_no);
3612        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3613        pp->pp_flags &=
3614          ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3615
3616        /* Workaround for errata FEr SATA#10 (part 2) */
3617        do {
3618                const unsigned long *timing =
3619                                sata_ehc_deb_timing(&link->eh_context);
3620
3621                rc = sata_link_hardreset(link, timing, deadline + extra,
3622                                         &online, NULL);
3623                rc = online ? -EAGAIN : rc;
3624                if (rc)
3625                        return rc;
3626                sata_scr_read(link, SCR_STATUS, &sstatus);
3627                if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3628                        /* Force 1.5gb/s link speed and try again */
3629                        mv_setup_ifcfg(mv_ap_base(ap), 0);
3630                        if (time_after(jiffies + HZ, deadline))
3631                                extra = HZ; /* only extend it once, max */
3632                }
3633        } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3634        mv_save_cached_regs(ap);
3635        mv_edma_cfg(ap, 0, 0);
3636
3637        return rc;
3638}
3639
3640static void mv_eh_freeze(struct ata_port *ap)
3641{
3642        mv_stop_edma(ap);
3643        mv_enable_port_irqs(ap, 0);
3644}
3645
3646static void mv_eh_thaw(struct ata_port *ap)
3647{
3648        struct mv_host_priv *hpriv = ap->host->private_data;
3649        unsigned int port = ap->port_no;
3650        unsigned int hardport = mv_hardport_from_port(port);
3651        void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3652        void __iomem *port_mmio = mv_ap_base(ap);
3653        u32 hc_irq_cause;
3654
3655        /* clear EDMA errors on this port */
3656        writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3657
3658        /* clear pending irq events */
3659        hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3660        writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3661
3662        mv_enable_port_irqs(ap, ERR_IRQ);
3663}
3664
3665/**
3666 *      mv_port_init - Perform some early initialization on a single port.
3667 *      @port: libata data structure storing shadow register addresses
3668 *      @port_mmio: base address of the port
3669 *
3670 *      Initialize shadow register mmio addresses, clear outstanding
3671 *      interrupts on the port, and unmask interrupts for the future
3672 *      start of the port.
3673 *
3674 *      LOCKING:
3675 *      Inherited from caller.
3676 */
3677static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
3678{
3679        void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3680
3681        /* PIO related setup
3682         */
3683        port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3684        port->error_addr =
3685                port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3686        port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3687        port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3688        port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3689        port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3690        port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3691        port->status_addr =
3692                port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3693        /* special case: control/altstatus doesn't have ATA_REG_ address */
3694        port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3695
3696        /* Clear any currently outstanding port interrupt conditions */
3697        serr = port_mmio + mv_scr_offset(SCR_ERROR);
3698        writelfl(readl(serr), serr);
3699        writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3700
3701        /* unmask all non-transient EDMA error interrupts */
3702        writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3703
3704        VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3705                readl(port_mmio + EDMA_CFG),
3706                readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3707                readl(port_mmio + EDMA_ERR_IRQ_MASK));
3708}
3709
3710static unsigned int mv_in_pcix_mode(struct ata_host *host)
3711{
3712        struct mv_host_priv *hpriv = host->private_data;
3713        void __iomem *mmio = hpriv->base;
3714        u32 reg;
3715
3716        if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3717                return 0;       /* not PCI-X capable */
3718        reg = readl(mmio + MV_PCI_MODE);
3719        if ((reg & MV_PCI_MODE_MASK) == 0)
3720                return 0;       /* conventional PCI mode */
3721        return 1;       /* chip is in PCI-X mode */
3722}
3723
3724static int mv_pci_cut_through_okay(struct ata_host *host)
3725{
3726        struct mv_host_priv *hpriv = host->private_data;
3727        void __iomem *mmio = hpriv->base;
3728        u32 reg;
3729
3730        if (!mv_in_pcix_mode(host)) {
3731                reg = readl(mmio + MV_PCI_COMMAND);
3732                if (reg & MV_PCI_COMMAND_MRDTRIG)
3733                        return 0; /* not okay */
3734        }
3735        return 1; /* okay */
3736}
3737
3738static void mv_60x1b2_errata_pci7(struct ata_host *host)
3739{
3740        struct mv_host_priv *hpriv = host->private_data;
3741        void __iomem *mmio = hpriv->base;
3742
3743        /* workaround for 60x1-B2 errata PCI#7 */
3744        if (mv_in_pcix_mode(host)) {
3745                u32 reg = readl(mmio + MV_PCI_COMMAND);
3746                writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3747        }
3748}
3749
3750static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3751{
3752        struct pci_dev *pdev = to_pci_dev(host->dev);
3753        struct mv_host_priv *hpriv = host->private_data;
3754        u32 hp_flags = hpriv->hp_flags;
3755
3756        switch (board_idx) {
3757        case chip_5080:
3758                hpriv->ops = &mv5xxx_ops;
3759                hp_flags |= MV_HP_GEN_I;
3760
3761                switch (pdev->revision) {
3762                case 0x1:
3763                        hp_flags |= MV_HP_ERRATA_50XXB0;
3764                        break;
3765                case 0x3:
3766                        hp_flags |= MV_HP_ERRATA_50XXB2;
3767                        break;
3768                default:
3769                        dev_warn(&pdev->dev,
3770                                 "Applying 50XXB2 workarounds to unknown rev\n");
3771                        hp_flags |= MV_HP_ERRATA_50XXB2;
3772                        break;
3773                }
3774                break;
3775
3776        case chip_504x:
3777        case chip_508x:
3778                hpriv->ops = &mv5xxx_ops;
3779                hp_flags |= MV_HP_GEN_I;
3780
3781                switch (pdev->revision) {
3782                case 0x0:
3783                        hp_flags |= MV_HP_ERRATA_50XXB0;
3784                        break;
3785                case 0x3:
3786                        hp_flags |= MV_HP_ERRATA_50XXB2;
3787                        break;
3788                default:
3789                        dev_warn(&pdev->dev,
3790                                 "Applying B2 workarounds to unknown rev\n");
3791                        hp_flags |= MV_HP_ERRATA_50XXB2;
3792                        break;
3793                }
3794                break;
3795
3796        case chip_604x:
3797        case chip_608x:
3798                hpriv->ops = &mv6xxx_ops;
3799                hp_flags |= MV_HP_GEN_II;
3800
3801                switch (pdev->revision) {
3802                case 0x7:
3803                        mv_60x1b2_errata_pci7(host);
3804                        hp_flags |= MV_HP_ERRATA_60X1B2;
3805                        break;
3806                case 0x9:
3807                        hp_flags |= MV_HP_ERRATA_60X1C0;
3808                        break;
3809                default:
3810                        dev_warn(&pdev->dev,
3811                                 "Applying B2 workarounds to unknown rev\n");
3812                        hp_flags |= MV_HP_ERRATA_60X1B2;
3813                        break;
3814                }
3815                break;
3816
3817        case chip_7042:
3818                hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3819                if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3820                    (pdev->device == 0x2300 || pdev->device == 0x2310))
3821                {
3822                        /*
3823                         * Highpoint RocketRAID PCIe 23xx series cards:
3824                         *
3825                         * Unconfigured drives are treated as "Legacy"
3826                         * by the BIOS, and it overwrites sector 8 with
3827                         * a "Lgcy" metadata block prior to Linux boot.
3828                         *
3829                         * Configured drives (RAID or JBOD) leave sector 8
3830                         * alone, but instead overwrite a high numbered
3831                         * sector for the RAID metadata.  This sector can
3832                         * be determined exactly, by truncating the physical
3833                         * drive capacity to a nice even GB value.
3834                         *
3835                         * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3836                         *
3837                         * Warn the user, lest they think we're just buggy.
3838                         */
3839                        printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3840                                " BIOS CORRUPTS DATA on all attached drives,"
3841                                " regardless of if/how they are configured."
3842                                " BEWARE!\n");
3843                        printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3844                                " use sectors 8-9 on \"Legacy\" drives,"
3845                                " and avoid the final two gigabytes on"
3846                                " all RocketRAID BIOS initialized drives.\n");
3847                }
3848                /* drop through */
3849        case chip_6042:
3850                hpriv->ops = &mv6xxx_ops;
3851                hp_flags |= MV_HP_GEN_IIE;
3852                if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3853                        hp_flags |= MV_HP_CUT_THROUGH;
3854
3855                switch (pdev->revision) {
3856                case 0x2: /* Rev.B0: the first/only public release */
3857                        hp_flags |= MV_HP_ERRATA_60X1C0;
3858                        break;
3859                default:
3860                        dev_warn(&pdev->dev,
3861                                 "Applying 60X1C0 workarounds to unknown rev\n");
3862                        hp_flags |= MV_HP_ERRATA_60X1C0;
3863                        break;
3864                }
3865                break;
3866        case chip_soc:
3867                if (soc_is_65n(hpriv))
3868                        hpriv->ops = &mv_soc_65n_ops;
3869                else
3870                        hpriv->ops = &mv_soc_ops;
3871                hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3872                        MV_HP_ERRATA_60X1C0;
3873                break;
3874
3875        default:
3876                dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
3877                return 1;
3878        }
3879
3880        hpriv->hp_flags = hp_flags;
3881        if (hp_flags & MV_HP_PCIE) {
3882                hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3883                hpriv->irq_mask_offset  = PCIE_IRQ_MASK;
3884                hpriv->unmask_all_irqs  = PCIE_UNMASK_ALL_IRQS;
3885        } else {
3886                hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3887                hpriv->irq_mask_offset  = PCI_IRQ_MASK;
3888                hpriv->unmask_all_irqs  = PCI_UNMASK_ALL_IRQS;
3889        }
3890
3891        return 0;
3892}
3893
3894/**
3895 *      mv_init_host - Perform some early initialization of the host.
3896 *      @host: ATA host to initialize
3897 *
3898 *      If possible, do an early global reset of the host.  Then do
3899 *      our port init and clear/unmask all/relevant host interrupts.
3900 *
3901 *      LOCKING:
3902 *      Inherited from caller.
3903 */
3904static int mv_init_host(struct ata_host *host)
3905{
3906        int rc = 0, n_hc, port, hc;
3907        struct mv_host_priv *hpriv = host->private_data;
3908        void __iomem *mmio = hpriv->base;
3909
3910        rc = mv_chip_id(host, hpriv->board_idx);
3911        if (rc)
3912                goto done;
3913
3914        if (IS_SOC(hpriv)) {
3915                hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3916                hpriv->main_irq_mask_addr  = mmio + SOC_HC_MAIN_IRQ_MASK;
3917        } else {
3918                hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3919                hpriv->main_irq_mask_addr  = mmio + PCI_HC_MAIN_IRQ_MASK;
3920        }
3921
3922        /* initialize shadow irq mask with register's value */
3923        hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3924
3925        /* global interrupt mask: 0 == mask everything */
3926        mv_set_main_irq_mask(host, ~0, 0);
3927
3928        n_hc = mv_get_hc_count(host->ports[0]->flags);
3929
3930        for (port = 0; port < host->n_ports; port++)
3931                if (hpriv->ops->read_preamp)
3932                        hpriv->ops->read_preamp(hpriv, port, mmio);
3933
3934        rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3935        if (rc)
3936                goto done;
3937
3938        hpriv->ops->reset_flash(hpriv, mmio);
3939        hpriv->ops->reset_bus(host, mmio);
3940        hpriv->ops->enable_leds(hpriv, mmio);
3941
3942        for (port = 0; port < host->n_ports; port++) {
3943                struct ata_port *ap = host->ports[port];
3944                void __iomem *port_mmio = mv_port_base(mmio, port);
3945
3946                mv_port_init(&ap->ioaddr, port_mmio);
3947        }
3948
3949        for (hc = 0; hc < n_hc; hc++) {
3950                void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3951
3952                VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3953                        "(before clear)=0x%08x\n", hc,
3954                        readl(hc_mmio + HC_CFG),
3955                        readl(hc_mmio + HC_IRQ_CAUSE));
3956
3957                /* Clear any currently outstanding hc interrupt conditions */
3958                writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3959        }
3960
3961        if (!IS_SOC(hpriv)) {
3962                /* Clear any currently outstanding host interrupt conditions */
3963                writelfl(0, mmio + hpriv->irq_cause_offset);
3964
3965                /* and unmask interrupt generation for host regs */
3966                writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3967        }
3968
3969        /*
3970         * enable only global host interrupts for now.
3971         * The per-port interrupts get done later as ports are set up.
3972         */
3973        mv_set_main_irq_mask(host, 0, PCI_ERR);
3974        mv_set_irq_coalescing(host, irq_coalescing_io_count,
3975                                    irq_coalescing_usecs);
3976done:
3977        return rc;
3978}
3979
3980static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3981{
3982        hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3983                                                             MV_CRQB_Q_SZ, 0);
3984        if (!hpriv->crqb_pool)
3985                return -ENOMEM;
3986
3987        hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3988                                                             MV_CRPB_Q_SZ, 0);
3989        if (!hpriv->crpb_pool)
3990                return -ENOMEM;
3991
3992        hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3993                                                             MV_SG_TBL_SZ, 0);
3994        if (!hpriv->sg_tbl_pool)
3995                return -ENOMEM;
3996
3997        return 0;
3998}
3999
4000static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
4001                                 const struct mbus_dram_target_info *dram)
4002{
4003        int i;
4004
4005        for (i = 0; i < 4; i++) {
4006                writel(0, hpriv->base + WINDOW_CTRL(i));
4007                writel(0, hpriv->base + WINDOW_BASE(i));
4008        }
4009
4010        for (i = 0; i < dram->num_cs; i++) {
4011                const struct mbus_dram_window *cs = dram->cs + i;
4012
4013                writel(((cs->size - 1) & 0xffff0000) |
4014                        (cs->mbus_attr << 8) |
4015                        (dram->mbus_dram_target_id << 4) | 1,
4016                        hpriv->base + WINDOW_CTRL(i));
4017                writel(cs->base, hpriv->base + WINDOW_BASE(i));
4018        }
4019}
4020
4021/**
4022 *      mv_platform_probe - handle a positive probe of an soc Marvell
4023 *      host
4024 *      @pdev: platform device found
4025 *
4026 *      LOCKING:
4027 *      Inherited from caller.
4028 */
4029static int mv_platform_probe(struct platform_device *pdev)
4030{
4031        const struct mv_sata_platform_data *mv_platform_data;
4032        const struct mbus_dram_target_info *dram;
4033        const struct ata_port_info *ppi[] =
4034            { &mv_port_info[chip_soc], NULL };
4035        struct ata_host *host;
4036        struct mv_host_priv *hpriv;
4037        struct resource *res;
4038        int n_ports = 0, irq = 0;
4039        int rc;
4040        int port;
4041
4042        ata_print_version_once(&pdev->dev, DRV_VERSION);
4043
4044        /*
4045         * Simple resource validation ..
4046         */
4047        if (unlikely(pdev->num_resources != 2)) {
4048                dev_err(&pdev->dev, "invalid number of resources\n");
4049                return -EINVAL;
4050        }
4051
4052        /*
4053         * Get the register base first
4054         */
4055        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4056        if (res == NULL)
4057                return -EINVAL;
4058
4059        /* allocate host */
4060        if (pdev->dev.of_node) {
4061                of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports);
4062                irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
4063        } else {
4064                mv_platform_data = dev_get_platdata(&pdev->dev);
4065                n_ports = mv_platform_data->n_ports;
4066                irq = platform_get_irq(pdev, 0);
4067        }
4068
4069        host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4070        hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4071
4072        if (!host || !hpriv)
4073                return -ENOMEM;
4074        hpriv->port_clks = devm_kzalloc(&pdev->dev,
4075                                        sizeof(struct clk *) * n_ports,
4076                                        GFP_KERNEL);
4077        if (!hpriv->port_clks)
4078                return -ENOMEM;
4079        host->private_data = hpriv;
4080        hpriv->n_ports = n_ports;
4081        hpriv->board_idx = chip_soc;
4082
4083        host->iomap = NULL;
4084        hpriv->base = devm_ioremap(&pdev->dev, res->start,
4085                                   resource_size(res));
4086        hpriv->base -= SATAHC0_REG_BASE;
4087
4088        hpriv->clk = clk_get(&pdev->dev, NULL);
4089        if (IS_ERR(hpriv->clk))
4090                dev_notice(&pdev->dev, "cannot get optional clkdev\n");
4091        else
4092                clk_prepare_enable(hpriv->clk);
4093
4094        for (port = 0; port < n_ports; port++) {
4095                char port_number[16];
4096                sprintf(port_number, "%d", port);
4097                hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
4098                if (!IS_ERR(hpriv->port_clks[port]))
4099                        clk_prepare_enable(hpriv->port_clks[port]);
4100        }
4101
4102        /*
4103         * (Re-)program MBUS remapping windows if we are asked to.
4104         */
4105        dram = mv_mbus_dram_info();
4106        if (dram)
4107                mv_conf_mbus_windows(hpriv, dram);
4108
4109        rc = mv_create_dma_pools(hpriv, &pdev->dev);
4110        if (rc)
4111                goto err;
4112
4113        /* initialize adapter */
4114        rc = mv_init_host(host);
4115        if (rc)
4116                goto err;
4117
4118        dev_info(&pdev->dev, "slots %u ports %d\n",
4119                 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
4120
4121        rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
4122        if (!rc)
4123                return 0;
4124
4125err:
4126        if (!IS_ERR(hpriv->clk)) {
4127                clk_disable_unprepare(hpriv->clk);
4128                clk_put(hpriv->clk);
4129        }
4130        for (port = 0; port < n_ports; port++) {
4131                if (!IS_ERR(hpriv->port_clks[port])) {
4132                        clk_disable_unprepare(hpriv->port_clks[port]);
4133                        clk_put(hpriv->port_clks[port]);
4134                }
4135        }
4136
4137        return rc;
4138}
4139
4140/*
4141 *
4142 *      mv_platform_remove    -       unplug a platform interface
4143 *      @pdev: platform device
4144 *
4145 *      A platform bus SATA device has been unplugged. Perform the needed
4146 *      cleanup. Also called on module unload for any active devices.
4147 */
4148static int mv_platform_remove(struct platform_device *pdev)
4149{
4150        struct ata_host *host = platform_get_drvdata(pdev);
4151        struct mv_host_priv *hpriv = host->private_data;
4152        int port;
4153        ata_host_detach(host);
4154
4155        if (!IS_ERR(hpriv->clk)) {
4156                clk_disable_unprepare(hpriv->clk);
4157                clk_put(hpriv->clk);
4158        }
4159        for (port = 0; port < host->n_ports; port++) {
4160                if (!IS_ERR(hpriv->port_clks[port])) {
4161                        clk_disable_unprepare(hpriv->port_clks[port]);
4162                        clk_put(hpriv->port_clks[port]);
4163                }
4164        }
4165        return 0;
4166}
4167
4168#ifdef CONFIG_PM
4169static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4170{
4171        struct ata_host *host = platform_get_drvdata(pdev);
4172        if (host)
4173                return ata_host_suspend(host, state);
4174        else
4175                return 0;
4176}
4177
4178static int mv_platform_resume(struct platform_device *pdev)
4179{
4180        struct ata_host *host = platform_get_drvdata(pdev);
4181        const struct mbus_dram_target_info *dram;
4182        int ret;
4183
4184        if (host) {
4185                struct mv_host_priv *hpriv = host->private_data;
4186
4187                /*
4188                 * (Re-)program MBUS remapping windows if we are asked to.
4189                 */
4190                dram = mv_mbus_dram_info();
4191                if (dram)
4192                        mv_conf_mbus_windows(hpriv, dram);
4193
4194                /* initialize adapter */
4195                ret = mv_init_host(host);
4196                if (ret) {
4197                        printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4198                        return ret;
4199                }
4200                ata_host_resume(host);
4201        }
4202
4203        return 0;
4204}
4205#else
4206#define mv_platform_suspend NULL
4207#define mv_platform_resume NULL
4208#endif
4209
4210#ifdef CONFIG_OF
4211static struct of_device_id mv_sata_dt_ids[] = {
4212        { .compatible = "marvell,orion-sata", },
4213        {},
4214};
4215MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
4216#endif
4217
4218static struct platform_driver mv_platform_driver = {
4219        .probe          = mv_platform_probe,
4220        .remove         = mv_platform_remove,
4221        .suspend        = mv_platform_suspend,
4222        .resume         = mv_platform_resume,
4223        .driver         = {
4224                .name = DRV_NAME,
4225                .owner = THIS_MODULE,
4226                .of_match_table = of_match_ptr(mv_sata_dt_ids),
4227        },
4228};
4229
4230
4231#ifdef CONFIG_PCI
4232static int mv_pci_init_one(struct pci_dev *pdev,
4233                           const struct pci_device_id *ent);
4234#ifdef CONFIG_PM
4235static int mv_pci_device_resume(struct pci_dev *pdev);
4236#endif
4237
4238
4239static struct pci_driver mv_pci_driver = {
4240        .name                   = DRV_NAME,
4241        .id_table               = mv_pci_tbl,
4242        .probe                  = mv_pci_init_one,
4243        .remove                 = ata_pci_remove_one,
4244#ifdef CONFIG_PM
4245        .suspend                = ata_pci_device_suspend,
4246        .resume                 = mv_pci_device_resume,
4247#endif
4248
4249};
4250
4251/* move to PCI layer or libata core? */
4252static int pci_go_64(struct pci_dev *pdev)
4253{
4254        int rc;
4255
4256        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4257                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4258                if (rc) {
4259                        rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4260                        if (rc) {
4261                                dev_err(&pdev->dev,
4262                                        "64-bit DMA enable failed\n");
4263                                return rc;
4264                        }
4265                }
4266        } else {
4267                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4268                if (rc) {
4269                        dev_err(&pdev->dev, "32-bit DMA enable failed\n");
4270                        return rc;
4271                }
4272                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4273                if (rc) {
4274                        dev_err(&pdev->dev,
4275                                "32-bit consistent DMA enable failed\n");
4276                        return rc;
4277                }
4278        }
4279
4280        return rc;
4281}
4282
4283/**
4284 *      mv_print_info - Dump key info to kernel log for perusal.
4285 *      @host: ATA host to print info about
4286 *
4287 *      FIXME: complete this.
4288 *
4289 *      LOCKING:
4290 *      Inherited from caller.
4291 */
4292static void mv_print_info(struct ata_host *host)
4293{
4294        struct pci_dev *pdev = to_pci_dev(host->dev);
4295        struct mv_host_priv *hpriv = host->private_data;
4296        u8 scc;
4297        const char *scc_s, *gen;
4298
4299        /* Use this to determine the HW stepping of the chip so we know
4300         * what errata to workaround
4301         */
4302        pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4303        if (scc == 0)
4304                scc_s = "SCSI";
4305        else if (scc == 0x01)
4306                scc_s = "RAID";
4307        else
4308                scc_s = "?";
4309
4310        if (IS_GEN_I(hpriv))
4311                gen = "I";
4312        else if (IS_GEN_II(hpriv))
4313                gen = "II";
4314        else if (IS_GEN_IIE(hpriv))
4315                gen = "IIE";
4316        else
4317                gen = "?";
4318
4319        dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4320                 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4321                 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4322}
4323
4324/**
4325 *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
4326 *      @pdev: PCI device found
4327 *      @ent: PCI device ID entry for the matched host
4328 *
4329 *      LOCKING:
4330 *      Inherited from caller.
4331 */
4332static int mv_pci_init_one(struct pci_dev *pdev,
4333                           const struct pci_device_id *ent)
4334{
4335        unsigned int board_idx = (unsigned int)ent->driver_data;
4336        const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4337        struct ata_host *host;
4338        struct mv_host_priv *hpriv;
4339        int n_ports, port, rc;
4340
4341        ata_print_version_once(&pdev->dev, DRV_VERSION);
4342
4343        /* allocate host */
4344        n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4345
4346        host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4347        hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4348        if (!host || !hpriv)
4349                return -ENOMEM;
4350        host->private_data = hpriv;
4351        hpriv->n_ports = n_ports;
4352        hpriv->board_idx = board_idx;
4353
4354        /* acquire resources */
4355        rc = pcim_enable_device(pdev);
4356        if (rc)
4357                return rc;
4358
4359        rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4360        if (rc == -EBUSY)
4361                pcim_pin_device(pdev);
4362        if (rc)
4363                return rc;
4364        host->iomap = pcim_iomap_table(pdev);
4365        hpriv->base = host->iomap[MV_PRIMARY_BAR];
4366
4367        rc = pci_go_64(pdev);
4368        if (rc)
4369                return rc;
4370
4371        rc = mv_create_dma_pools(hpriv, &pdev->dev);
4372        if (rc)
4373                return rc;
4374
4375        for (port = 0; port < host->n_ports; port++) {
4376                struct ata_port *ap = host->ports[port];
4377                void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4378                unsigned int offset = port_mmio - hpriv->base;
4379
4380                ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4381                ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4382        }
4383
4384        /* initialize adapter */
4385        rc = mv_init_host(host);
4386        if (rc)
4387                return rc;
4388
4389        /* Enable message-switched interrupts, if requested */
4390        if (msi && pci_enable_msi(pdev) == 0)
4391                hpriv->hp_flags |= MV_HP_FLAG_MSI;
4392
4393        mv_dump_pci_cfg(pdev, 0x68);
4394        mv_print_info(host);
4395
4396        pci_set_master(pdev);
4397        pci_try_set_mwi(pdev);
4398        return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4399                                 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4400}
4401
4402#ifdef CONFIG_PM
4403static int mv_pci_device_resume(struct pci_dev *pdev)
4404{
4405        struct ata_host *host = pci_get_drvdata(pdev);
4406        int rc;
4407
4408        rc = ata_pci_device_do_resume(pdev);
4409        if (rc)
4410                return rc;
4411
4412        /* initialize adapter */
4413        rc = mv_init_host(host);
4414        if (rc)
4415                return rc;
4416
4417        ata_host_resume(host);
4418
4419        return 0;
4420}
4421#endif
4422#endif
4423
4424static int __init mv_init(void)
4425{
4426        int rc = -ENODEV;
4427#ifdef CONFIG_PCI
4428        rc = pci_register_driver(&mv_pci_driver);
4429        if (rc < 0)
4430                return rc;
4431#endif
4432        rc = platform_driver_register(&mv_platform_driver);
4433
4434#ifdef CONFIG_PCI
4435        if (rc < 0)
4436                pci_unregister_driver(&mv_pci_driver);
4437#endif
4438        return rc;
4439}
4440
4441static void __exit mv_exit(void)
4442{
4443#ifdef CONFIG_PCI
4444        pci_unregister_driver(&mv_pci_driver);
4445#endif
4446        platform_driver_unregister(&mv_platform_driver);
4447}
4448
4449MODULE_AUTHOR("Brett Russ");
4450MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4451MODULE_LICENSE("GPL");
4452MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4453MODULE_VERSION(DRV_VERSION);
4454MODULE_ALIAS("platform:" DRV_NAME);
4455
4456module_init(mv_init);
4457module_exit(mv_exit);
4458