linux/drivers/ata/sata_nv.c
<<
>>
Prefs
   1/*
   2 *  sata_nv.c - NVIDIA nForce SATA
   3 *
   4 *  Copyright 2004 NVIDIA Corp.  All rights reserved.
   5 *  Copyright 2004 Andrew Chew
   6 *
   7 *
   8 *  This program is free software; you can redistribute it and/or modify
   9 *  it under the terms of the GNU General Public License as published by
  10 *  the Free Software Foundation; either version 2, or (at your option)
  11 *  any later version.
  12 *
  13 *  This program is distributed in the hope that it will be useful,
  14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 *  GNU General Public License for more details.
  17 *
  18 *  You should have received a copy of the GNU General Public License
  19 *  along with this program; see the file COPYING.  If not, write to
  20 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  21 *
  22 *
  23 *  libata documentation is available via 'make {ps|pdf}docs',
  24 *  as Documentation/DocBook/libata.*
  25 *
  26 *  No hardware documentation available outside of NVIDIA.
  27 *  This driver programs the NVIDIA SATA controller in a similar
  28 *  fashion as with other PCI IDE BMDMA controllers, with a few
  29 *  NV-specific details such as register offsets, SATA phy location,
  30 *  hotplug info, etc.
  31 *
  32 *  CK804/MCP04 controllers support an alternate programming interface
  33 *  similar to the ADMA specification (with some modifications).
  34 *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
  35 *  sent through the legacy interface.
  36 *
  37 */
  38
  39#include <linux/kernel.h>
  40#include <linux/module.h>
  41#include <linux/gfp.h>
  42#include <linux/pci.h>
  43#include <linux/init.h>
  44#include <linux/blkdev.h>
  45#include <linux/delay.h>
  46#include <linux/interrupt.h>
  47#include <linux/device.h>
  48#include <scsi/scsi_host.h>
  49#include <scsi/scsi_device.h>
  50#include <linux/libata.h>
  51
  52#define DRV_NAME                        "sata_nv"
  53#define DRV_VERSION                     "3.5"
  54
  55#define NV_ADMA_DMA_BOUNDARY            0xffffffffUL
  56
  57enum {
  58        NV_MMIO_BAR                     = 5,
  59
  60        NV_PORTS                        = 2,
  61        NV_PIO_MASK                     = ATA_PIO4,
  62        NV_MWDMA_MASK                   = ATA_MWDMA2,
  63        NV_UDMA_MASK                    = ATA_UDMA6,
  64        NV_PORT0_SCR_REG_OFFSET         = 0x00,
  65        NV_PORT1_SCR_REG_OFFSET         = 0x40,
  66
  67        /* INT_STATUS/ENABLE */
  68        NV_INT_STATUS                   = 0x10,
  69        NV_INT_ENABLE                   = 0x11,
  70        NV_INT_STATUS_CK804             = 0x440,
  71        NV_INT_ENABLE_CK804             = 0x441,
  72
  73        /* INT_STATUS/ENABLE bits */
  74        NV_INT_DEV                      = 0x01,
  75        NV_INT_PM                       = 0x02,
  76        NV_INT_ADDED                    = 0x04,
  77        NV_INT_REMOVED                  = 0x08,
  78
  79        NV_INT_PORT_SHIFT               = 4,    /* each port occupies 4 bits */
  80
  81        NV_INT_ALL                      = 0x0f,
  82        NV_INT_MASK                     = NV_INT_DEV |
  83                                          NV_INT_ADDED | NV_INT_REMOVED,
  84
  85        /* INT_CONFIG */
  86        NV_INT_CONFIG                   = 0x12,
  87        NV_INT_CONFIG_METHD             = 0x01, // 0 = INT, 1 = SMI
  88
  89        // For PCI config register 20
  90        NV_MCP_SATA_CFG_20              = 0x50,
  91        NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
  92        NV_MCP_SATA_CFG_20_PORT0_EN     = (1 << 17),
  93        NV_MCP_SATA_CFG_20_PORT1_EN     = (1 << 16),
  94        NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
  95        NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
  96
  97        NV_ADMA_MAX_CPBS                = 32,
  98        NV_ADMA_CPB_SZ                  = 128,
  99        NV_ADMA_APRD_SZ                 = 16,
 100        NV_ADMA_SGTBL_LEN               = (1024 - NV_ADMA_CPB_SZ) /
 101                                           NV_ADMA_APRD_SZ,
 102        NV_ADMA_SGTBL_TOTAL_LEN         = NV_ADMA_SGTBL_LEN + 5,
 103        NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
 104        NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
 105                                           (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
 106
 107        /* BAR5 offset to ADMA general registers */
 108        NV_ADMA_GEN                     = 0x400,
 109        NV_ADMA_GEN_CTL                 = 0x00,
 110        NV_ADMA_NOTIFIER_CLEAR          = 0x30,
 111
 112        /* BAR5 offset to ADMA ports */
 113        NV_ADMA_PORT                    = 0x480,
 114
 115        /* size of ADMA port register space  */
 116        NV_ADMA_PORT_SIZE               = 0x100,
 117
 118        /* ADMA port registers */
 119        NV_ADMA_CTL                     = 0x40,
 120        NV_ADMA_CPB_COUNT               = 0x42,
 121        NV_ADMA_NEXT_CPB_IDX            = 0x43,
 122        NV_ADMA_STAT                    = 0x44,
 123        NV_ADMA_CPB_BASE_LOW            = 0x48,
 124        NV_ADMA_CPB_BASE_HIGH           = 0x4C,
 125        NV_ADMA_APPEND                  = 0x50,
 126        NV_ADMA_NOTIFIER                = 0x68,
 127        NV_ADMA_NOTIFIER_ERROR          = 0x6C,
 128
 129        /* NV_ADMA_CTL register bits */
 130        NV_ADMA_CTL_HOTPLUG_IEN         = (1 << 0),
 131        NV_ADMA_CTL_CHANNEL_RESET       = (1 << 5),
 132        NV_ADMA_CTL_GO                  = (1 << 7),
 133        NV_ADMA_CTL_AIEN                = (1 << 8),
 134        NV_ADMA_CTL_READ_NON_COHERENT   = (1 << 11),
 135        NV_ADMA_CTL_WRITE_NON_COHERENT  = (1 << 12),
 136
 137        /* CPB response flag bits */
 138        NV_CPB_RESP_DONE                = (1 << 0),
 139        NV_CPB_RESP_ATA_ERR             = (1 << 3),
 140        NV_CPB_RESP_CMD_ERR             = (1 << 4),
 141        NV_CPB_RESP_CPB_ERR             = (1 << 7),
 142
 143        /* CPB control flag bits */
 144        NV_CPB_CTL_CPB_VALID            = (1 << 0),
 145        NV_CPB_CTL_QUEUE                = (1 << 1),
 146        NV_CPB_CTL_APRD_VALID           = (1 << 2),
 147        NV_CPB_CTL_IEN                  = (1 << 3),
 148        NV_CPB_CTL_FPDMA                = (1 << 4),
 149
 150        /* APRD flags */
 151        NV_APRD_WRITE                   = (1 << 1),
 152        NV_APRD_END                     = (1 << 2),
 153        NV_APRD_CONT                    = (1 << 3),
 154
 155        /* NV_ADMA_STAT flags */
 156        NV_ADMA_STAT_TIMEOUT            = (1 << 0),
 157        NV_ADMA_STAT_HOTUNPLUG          = (1 << 1),
 158        NV_ADMA_STAT_HOTPLUG            = (1 << 2),
 159        NV_ADMA_STAT_CPBERR             = (1 << 4),
 160        NV_ADMA_STAT_SERROR             = (1 << 5),
 161        NV_ADMA_STAT_CMD_COMPLETE       = (1 << 6),
 162        NV_ADMA_STAT_IDLE               = (1 << 8),
 163        NV_ADMA_STAT_LEGACY             = (1 << 9),
 164        NV_ADMA_STAT_STOPPED            = (1 << 10),
 165        NV_ADMA_STAT_DONE               = (1 << 12),
 166        NV_ADMA_STAT_ERR                = NV_ADMA_STAT_CPBERR |
 167                                          NV_ADMA_STAT_TIMEOUT,
 168
 169        /* port flags */
 170        NV_ADMA_PORT_REGISTER_MODE      = (1 << 0),
 171        NV_ADMA_ATAPI_SETUP_COMPLETE    = (1 << 1),
 172
 173        /* MCP55 reg offset */
 174        NV_CTL_MCP55                    = 0x400,
 175        NV_INT_STATUS_MCP55             = 0x440,
 176        NV_INT_ENABLE_MCP55             = 0x444,
 177        NV_NCQ_REG_MCP55                = 0x448,
 178
 179        /* MCP55 */
 180        NV_INT_ALL_MCP55                = 0xffff,
 181        NV_INT_PORT_SHIFT_MCP55         = 16,   /* each port occupies 16 bits */
 182        NV_INT_MASK_MCP55               = NV_INT_ALL_MCP55 & 0xfffd,
 183
 184        /* SWNCQ ENABLE BITS*/
 185        NV_CTL_PRI_SWNCQ                = 0x02,
 186        NV_CTL_SEC_SWNCQ                = 0x04,
 187
 188        /* SW NCQ status bits*/
 189        NV_SWNCQ_IRQ_DEV                = (1 << 0),
 190        NV_SWNCQ_IRQ_PM                 = (1 << 1),
 191        NV_SWNCQ_IRQ_ADDED              = (1 << 2),
 192        NV_SWNCQ_IRQ_REMOVED            = (1 << 3),
 193
 194        NV_SWNCQ_IRQ_BACKOUT            = (1 << 4),
 195        NV_SWNCQ_IRQ_SDBFIS             = (1 << 5),
 196        NV_SWNCQ_IRQ_DHREGFIS           = (1 << 6),
 197        NV_SWNCQ_IRQ_DMASETUP           = (1 << 7),
 198
 199        NV_SWNCQ_IRQ_HOTPLUG            = NV_SWNCQ_IRQ_ADDED |
 200                                          NV_SWNCQ_IRQ_REMOVED,
 201
 202};
 203
 204/* ADMA Physical Region Descriptor - one SG segment */
 205struct nv_adma_prd {
 206        __le64                  addr;
 207        __le32                  len;
 208        u8                      flags;
 209        u8                      packet_len;
 210        __le16                  reserved;
 211};
 212
 213enum nv_adma_regbits {
 214        CMDEND  = (1 << 15),            /* end of command list */
 215        WNB     = (1 << 14),            /* wait-not-BSY */
 216        IGN     = (1 << 13),            /* ignore this entry */
 217        CS1n    = (1 << (4 + 8)),       /* std. PATA signals follow... */
 218        DA2     = (1 << (2 + 8)),
 219        DA1     = (1 << (1 + 8)),
 220        DA0     = (1 << (0 + 8)),
 221};
 222
 223/* ADMA Command Parameter Block
 224   The first 5 SG segments are stored inside the Command Parameter Block itself.
 225   If there are more than 5 segments the remainder are stored in a separate
 226   memory area indicated by next_aprd. */
 227struct nv_adma_cpb {
 228        u8                      resp_flags;    /* 0 */
 229        u8                      reserved1;     /* 1 */
 230        u8                      ctl_flags;     /* 2 */
 231        /* len is length of taskfile in 64 bit words */
 232        u8                      len;            /* 3  */
 233        u8                      tag;           /* 4 */
 234        u8                      next_cpb_idx;  /* 5 */
 235        __le16                  reserved2;     /* 6-7 */
 236        __le16                  tf[12];        /* 8-31 */
 237        struct nv_adma_prd      aprd[5];       /* 32-111 */
 238        __le64                  next_aprd;     /* 112-119 */
 239        __le64                  reserved3;     /* 120-127 */
 240};
 241
 242
 243struct nv_adma_port_priv {
 244        struct nv_adma_cpb      *cpb;
 245        dma_addr_t              cpb_dma;
 246        struct nv_adma_prd      *aprd;
 247        dma_addr_t              aprd_dma;
 248        void __iomem            *ctl_block;
 249        void __iomem            *gen_block;
 250        void __iomem            *notifier_clear_block;
 251        u64                     adma_dma_mask;
 252        u8                      flags;
 253        int                     last_issue_ncq;
 254};
 255
 256struct nv_host_priv {
 257        unsigned long           type;
 258};
 259
 260struct defer_queue {
 261        u32             defer_bits;
 262        unsigned int    head;
 263        unsigned int    tail;
 264        unsigned int    tag[ATA_MAX_QUEUE];
 265};
 266
 267enum ncq_saw_flag_list {
 268        ncq_saw_d2h     = (1U << 0),
 269        ncq_saw_dmas    = (1U << 1),
 270        ncq_saw_sdb     = (1U << 2),
 271        ncq_saw_backout = (1U << 3),
 272};
 273
 274struct nv_swncq_port_priv {
 275        struct ata_bmdma_prd *prd;       /* our SG list */
 276        dma_addr_t      prd_dma; /* and its DMA mapping */
 277        void __iomem    *sactive_block;
 278        void __iomem    *irq_block;
 279        void __iomem    *tag_block;
 280        u32             qc_active;
 281
 282        unsigned int    last_issue_tag;
 283
 284        /* fifo circular queue to store deferral command */
 285        struct defer_queue defer_queue;
 286
 287        /* for NCQ interrupt analysis */
 288        u32             dhfis_bits;
 289        u32             dmafis_bits;
 290        u32             sdbfis_bits;
 291
 292        unsigned int    ncq_flags;
 293};
 294
 295
 296#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
 297
 298static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 299#ifdef CONFIG_PM
 300static int nv_pci_device_resume(struct pci_dev *pdev);
 301#endif
 302static void nv_ck804_host_stop(struct ata_host *host);
 303static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
 304static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
 305static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
 306static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 307static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
 308
 309static int nv_hardreset(struct ata_link *link, unsigned int *class,
 310                        unsigned long deadline);
 311static void nv_nf2_freeze(struct ata_port *ap);
 312static void nv_nf2_thaw(struct ata_port *ap);
 313static void nv_ck804_freeze(struct ata_port *ap);
 314static void nv_ck804_thaw(struct ata_port *ap);
 315static int nv_adma_slave_config(struct scsi_device *sdev);
 316static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
 317static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
 318static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
 319static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
 320static void nv_adma_irq_clear(struct ata_port *ap);
 321static int nv_adma_port_start(struct ata_port *ap);
 322static void nv_adma_port_stop(struct ata_port *ap);
 323#ifdef CONFIG_PM
 324static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
 325static int nv_adma_port_resume(struct ata_port *ap);
 326#endif
 327static void nv_adma_freeze(struct ata_port *ap);
 328static void nv_adma_thaw(struct ata_port *ap);
 329static void nv_adma_error_handler(struct ata_port *ap);
 330static void nv_adma_host_stop(struct ata_host *host);
 331static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
 332static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 333
 334static void nv_mcp55_thaw(struct ata_port *ap);
 335static void nv_mcp55_freeze(struct ata_port *ap);
 336static void nv_swncq_error_handler(struct ata_port *ap);
 337static int nv_swncq_slave_config(struct scsi_device *sdev);
 338static int nv_swncq_port_start(struct ata_port *ap);
 339static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
 340static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
 341static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
 342static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
 343static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
 344#ifdef CONFIG_PM
 345static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
 346static int nv_swncq_port_resume(struct ata_port *ap);
 347#endif
 348
 349enum nv_host_type
 350{
 351        GENERIC,
 352        NFORCE2,
 353        NFORCE3 = NFORCE2,      /* NF2 == NF3 as far as sata_nv is concerned */
 354        CK804,
 355        ADMA,
 356        MCP5x,
 357        SWNCQ,
 358};
 359
 360static const struct pci_device_id nv_pci_tbl[] = {
 361        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
 362        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
 363        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
 364        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
 365        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
 366        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
 367        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
 368        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
 369        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
 370        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
 371        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
 372        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
 373        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
 374        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
 375
 376        { } /* terminate list */
 377};
 378
 379static struct pci_driver nv_pci_driver = {
 380        .name                   = DRV_NAME,
 381        .id_table               = nv_pci_tbl,
 382        .probe                  = nv_init_one,
 383#ifdef CONFIG_PM
 384        .suspend                = ata_pci_device_suspend,
 385        .resume                 = nv_pci_device_resume,
 386#endif
 387        .remove                 = ata_pci_remove_one,
 388};
 389
 390static struct scsi_host_template nv_sht = {
 391        ATA_BMDMA_SHT(DRV_NAME),
 392};
 393
 394static struct scsi_host_template nv_adma_sht = {
 395        ATA_NCQ_SHT(DRV_NAME),
 396        .can_queue              = NV_ADMA_MAX_CPBS,
 397        .sg_tablesize           = NV_ADMA_SGTBL_TOTAL_LEN,
 398        .dma_boundary           = NV_ADMA_DMA_BOUNDARY,
 399        .slave_configure        = nv_adma_slave_config,
 400};
 401
 402static struct scsi_host_template nv_swncq_sht = {
 403        ATA_NCQ_SHT(DRV_NAME),
 404        .can_queue              = ATA_MAX_QUEUE,
 405        .sg_tablesize           = LIBATA_MAX_PRD,
 406        .dma_boundary           = ATA_DMA_BOUNDARY,
 407        .slave_configure        = nv_swncq_slave_config,
 408};
 409
 410/*
 411 * NV SATA controllers have various different problems with hardreset
 412 * protocol depending on the specific controller and device.
 413 *
 414 * GENERIC:
 415 *
 416 *  bko11195 reports that link doesn't come online after hardreset on
 417 *  generic nv's and there have been several other similar reports on
 418 *  linux-ide.
 419 *
 420 *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
 421 *  softreset.
 422 *
 423 * NF2/3:
 424 *
 425 *  bko3352 reports nf2/3 controllers can't determine device signature
 426 *  reliably after hardreset.  The following thread reports detection
 427 *  failure on cold boot with the standard debouncing timing.
 428 *
 429 *  http://thread.gmane.org/gmane.linux.ide/34098
 430 *
 431 *  bko12176 reports that hardreset fails to bring up the link during
 432 *  boot on nf2.
 433 *
 434 * CK804:
 435 *
 436 *  For initial probing after boot and hot plugging, hardreset mostly
 437 *  works fine on CK804 but curiously, reprobing on the initial port
 438 *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
 439 *  FIS in somewhat undeterministic way.
 440 *
 441 * SWNCQ:
 442 *
 443 *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
 444 *  hardreset should be used and hardreset can't report proper
 445 *  signature, which suggests that mcp5x is closer to nf2 as long as
 446 *  reset quirkiness is concerned.
 447 *
 448 *  bko12703 reports that boot probing fails for intel SSD with
 449 *  hardreset.  Link fails to come online.  Softreset works fine.
 450 *
 451 * The failures are varied but the following patterns seem true for
 452 * all flavors.
 453 *
 454 * - Softreset during boot always works.
 455 *
 456 * - Hardreset during boot sometimes fails to bring up the link on
 457 *   certain comibnations and device signature acquisition is
 458 *   unreliable.
 459 *
 460 * - Hardreset is often necessary after hotplug.
 461 *
 462 * So, preferring softreset for boot probing and error handling (as
 463 * hardreset might bring down the link) but using hardreset for
 464 * post-boot probing should work around the above issues in most
 465 * cases.  Define nv_hardreset() which only kicks in for post-boot
 466 * probing and use it for all variants.
 467 */
 468static struct ata_port_operations nv_generic_ops = {
 469        .inherits               = &ata_bmdma_port_ops,
 470        .lost_interrupt         = ATA_OP_NULL,
 471        .scr_read               = nv_scr_read,
 472        .scr_write              = nv_scr_write,
 473        .hardreset              = nv_hardreset,
 474};
 475
 476static struct ata_port_operations nv_nf2_ops = {
 477        .inherits               = &nv_generic_ops,
 478        .freeze                 = nv_nf2_freeze,
 479        .thaw                   = nv_nf2_thaw,
 480};
 481
 482static struct ata_port_operations nv_ck804_ops = {
 483        .inherits               = &nv_generic_ops,
 484        .freeze                 = nv_ck804_freeze,
 485        .thaw                   = nv_ck804_thaw,
 486        .host_stop              = nv_ck804_host_stop,
 487};
 488
 489static struct ata_port_operations nv_adma_ops = {
 490        .inherits               = &nv_ck804_ops,
 491
 492        .check_atapi_dma        = nv_adma_check_atapi_dma,
 493        .sff_tf_read            = nv_adma_tf_read,
 494        .qc_defer               = ata_std_qc_defer,
 495        .qc_prep                = nv_adma_qc_prep,
 496        .qc_issue               = nv_adma_qc_issue,
 497        .sff_irq_clear          = nv_adma_irq_clear,
 498
 499        .freeze                 = nv_adma_freeze,
 500        .thaw                   = nv_adma_thaw,
 501        .error_handler          = nv_adma_error_handler,
 502        .post_internal_cmd      = nv_adma_post_internal_cmd,
 503
 504        .port_start             = nv_adma_port_start,
 505        .port_stop              = nv_adma_port_stop,
 506#ifdef CONFIG_PM
 507        .port_suspend           = nv_adma_port_suspend,
 508        .port_resume            = nv_adma_port_resume,
 509#endif
 510        .host_stop              = nv_adma_host_stop,
 511};
 512
 513static struct ata_port_operations nv_swncq_ops = {
 514        .inherits               = &nv_generic_ops,
 515
 516        .qc_defer               = ata_std_qc_defer,
 517        .qc_prep                = nv_swncq_qc_prep,
 518        .qc_issue               = nv_swncq_qc_issue,
 519
 520        .freeze                 = nv_mcp55_freeze,
 521        .thaw                   = nv_mcp55_thaw,
 522        .error_handler          = nv_swncq_error_handler,
 523
 524#ifdef CONFIG_PM
 525        .port_suspend           = nv_swncq_port_suspend,
 526        .port_resume            = nv_swncq_port_resume,
 527#endif
 528        .port_start             = nv_swncq_port_start,
 529};
 530
 531struct nv_pi_priv {
 532        irq_handler_t                   irq_handler;
 533        struct scsi_host_template       *sht;
 534};
 535
 536#define NV_PI_PRIV(_irq_handler, _sht) \
 537        &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
 538
 539static const struct ata_port_info nv_port_info[] = {
 540        /* generic */
 541        {
 542                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
 543                .pio_mask       = NV_PIO_MASK,
 544                .mwdma_mask     = NV_MWDMA_MASK,
 545                .udma_mask      = NV_UDMA_MASK,
 546                .port_ops       = &nv_generic_ops,
 547                .private_data   = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 548        },
 549        /* nforce2/3 */
 550        {
 551                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
 552                .pio_mask       = NV_PIO_MASK,
 553                .mwdma_mask     = NV_MWDMA_MASK,
 554                .udma_mask      = NV_UDMA_MASK,
 555                .port_ops       = &nv_nf2_ops,
 556                .private_data   = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
 557        },
 558        /* ck804 */
 559        {
 560                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
 561                .pio_mask       = NV_PIO_MASK,
 562                .mwdma_mask     = NV_MWDMA_MASK,
 563                .udma_mask      = NV_UDMA_MASK,
 564                .port_ops       = &nv_ck804_ops,
 565                .private_data   = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
 566        },
 567        /* ADMA */
 568        {
 569                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 570                                  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
 571                .pio_mask       = NV_PIO_MASK,
 572                .mwdma_mask     = NV_MWDMA_MASK,
 573                .udma_mask      = NV_UDMA_MASK,
 574                .port_ops       = &nv_adma_ops,
 575                .private_data   = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
 576        },
 577        /* MCP5x */
 578        {
 579                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
 580                .pio_mask       = NV_PIO_MASK,
 581                .mwdma_mask     = NV_MWDMA_MASK,
 582                .udma_mask      = NV_UDMA_MASK,
 583                .port_ops       = &nv_generic_ops,
 584                .private_data   = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 585        },
 586        /* SWNCQ */
 587        {
 588                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 589                                  ATA_FLAG_NCQ,
 590                .pio_mask       = NV_PIO_MASK,
 591                .mwdma_mask     = NV_MWDMA_MASK,
 592                .udma_mask      = NV_UDMA_MASK,
 593                .port_ops       = &nv_swncq_ops,
 594                .private_data   = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
 595        },
 596};
 597
 598MODULE_AUTHOR("NVIDIA");
 599MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
 600MODULE_LICENSE("GPL");
 601MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
 602MODULE_VERSION(DRV_VERSION);
 603
 604static int adma_enabled;
 605static int swncq_enabled = 1;
 606static int msi_enabled;
 607
 608static void nv_adma_register_mode(struct ata_port *ap)
 609{
 610        struct nv_adma_port_priv *pp = ap->private_data;
 611        void __iomem *mmio = pp->ctl_block;
 612        u16 tmp, status;
 613        int count = 0;
 614
 615        if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
 616                return;
 617
 618        status = readw(mmio + NV_ADMA_STAT);
 619        while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
 620                ndelay(50);
 621                status = readw(mmio + NV_ADMA_STAT);
 622                count++;
 623        }
 624        if (count == 20)
 625                ata_port_printk(ap, KERN_WARNING,
 626                        "timeout waiting for ADMA IDLE, stat=0x%hx\n",
 627                        status);
 628
 629        tmp = readw(mmio + NV_ADMA_CTL);
 630        writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 631
 632        count = 0;
 633        status = readw(mmio + NV_ADMA_STAT);
 634        while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
 635                ndelay(50);
 636                status = readw(mmio + NV_ADMA_STAT);
 637                count++;
 638        }
 639        if (count == 20)
 640                ata_port_printk(ap, KERN_WARNING,
 641                         "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
 642                         status);
 643
 644        pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
 645}
 646
 647static void nv_adma_mode(struct ata_port *ap)
 648{
 649        struct nv_adma_port_priv *pp = ap->private_data;
 650        void __iomem *mmio = pp->ctl_block;
 651        u16 tmp, status;
 652        int count = 0;
 653
 654        if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
 655                return;
 656
 657        WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 658
 659        tmp = readw(mmio + NV_ADMA_CTL);
 660        writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 661
 662        status = readw(mmio + NV_ADMA_STAT);
 663        while (((status & NV_ADMA_STAT_LEGACY) ||
 664              !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
 665                ndelay(50);
 666                status = readw(mmio + NV_ADMA_STAT);
 667                count++;
 668        }
 669        if (count == 20)
 670                ata_port_printk(ap, KERN_WARNING,
 671                        "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
 672                        status);
 673
 674        pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
 675}
 676
 677static int nv_adma_slave_config(struct scsi_device *sdev)
 678{
 679        struct ata_port *ap = ata_shost_to_port(sdev->host);
 680        struct nv_adma_port_priv *pp = ap->private_data;
 681        struct nv_adma_port_priv *port0, *port1;
 682        struct scsi_device *sdev0, *sdev1;
 683        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 684        unsigned long segment_boundary, flags;
 685        unsigned short sg_tablesize;
 686        int rc;
 687        int adma_enable;
 688        u32 current_reg, new_reg, config_mask;
 689
 690        rc = ata_scsi_slave_config(sdev);
 691
 692        if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
 693                /* Not a proper libata device, ignore */
 694                return rc;
 695
 696        spin_lock_irqsave(ap->lock, flags);
 697
 698        if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
 699                /*
 700                 * NVIDIA reports that ADMA mode does not support ATAPI commands.
 701                 * Therefore ATAPI commands are sent through the legacy interface.
 702                 * However, the legacy interface only supports 32-bit DMA.
 703                 * Restrict DMA parameters as required by the legacy interface
 704                 * when an ATAPI device is connected.
 705                 */
 706                segment_boundary = ATA_DMA_BOUNDARY;
 707                /* Subtract 1 since an extra entry may be needed for padding, see
 708                   libata-scsi.c */
 709                sg_tablesize = LIBATA_MAX_PRD - 1;
 710
 711                /* Since the legacy DMA engine is in use, we need to disable ADMA
 712                   on the port. */
 713                adma_enable = 0;
 714                nv_adma_register_mode(ap);
 715        } else {
 716                segment_boundary = NV_ADMA_DMA_BOUNDARY;
 717                sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
 718                adma_enable = 1;
 719        }
 720
 721        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
 722
 723        if (ap->port_no == 1)
 724                config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
 725                              NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
 726        else
 727                config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
 728                              NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
 729
 730        if (adma_enable) {
 731                new_reg = current_reg | config_mask;
 732                pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
 733        } else {
 734                new_reg = current_reg & ~config_mask;
 735                pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
 736        }
 737
 738        if (current_reg != new_reg)
 739                pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
 740
 741        port0 = ap->host->ports[0]->private_data;
 742        port1 = ap->host->ports[1]->private_data;
 743        sdev0 = ap->host->ports[0]->link.device[0].sdev;
 744        sdev1 = ap->host->ports[1]->link.device[0].sdev;
 745        if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
 746            (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
 747                /** We have to set the DMA mask to 32-bit if either port is in
 748                    ATAPI mode, since they are on the same PCI device which is
 749                    used for DMA mapping. If we set the mask we also need to set
 750                    the bounce limit on both ports to ensure that the block
 751                    layer doesn't feed addresses that cause DMA mapping to
 752                    choke. If either SCSI device is not allocated yet, it's OK
 753                    since that port will discover its correct setting when it
 754                    does get allocated.
 755                    Note: Setting 32-bit mask should not fail. */
 756                if (sdev0)
 757                        blk_queue_bounce_limit(sdev0->request_queue,
 758                                               ATA_DMA_MASK);
 759                if (sdev1)
 760                        blk_queue_bounce_limit(sdev1->request_queue,
 761                                               ATA_DMA_MASK);
 762
 763                pci_set_dma_mask(pdev, ATA_DMA_MASK);
 764        } else {
 765                /** This shouldn't fail as it was set to this value before */
 766                pci_set_dma_mask(pdev, pp->adma_dma_mask);
 767                if (sdev0)
 768                        blk_queue_bounce_limit(sdev0->request_queue,
 769                                               pp->adma_dma_mask);
 770                if (sdev1)
 771                        blk_queue_bounce_limit(sdev1->request_queue,
 772                                               pp->adma_dma_mask);
 773        }
 774
 775        blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
 776        blk_queue_max_segments(sdev->request_queue, sg_tablesize);
 777        ata_port_printk(ap, KERN_INFO,
 778                "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
 779                (unsigned long long)*ap->host->dev->dma_mask,
 780                segment_boundary, sg_tablesize);
 781
 782        spin_unlock_irqrestore(ap->lock, flags);
 783
 784        return rc;
 785}
 786
 787static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
 788{
 789        struct nv_adma_port_priv *pp = qc->ap->private_data;
 790        return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 791}
 792
 793static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 794{
 795        /* Other than when internal or pass-through commands are executed,
 796           the only time this function will be called in ADMA mode will be
 797           if a command fails. In the failure case we don't care about going
 798           into register mode with ADMA commands pending, as the commands will
 799           all shortly be aborted anyway. We assume that NCQ commands are not
 800           issued via passthrough, which is the only way that switching into
 801           ADMA mode could abort outstanding commands. */
 802        nv_adma_register_mode(ap);
 803
 804        ata_sff_tf_read(ap, tf);
 805}
 806
 807static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
 808{
 809        unsigned int idx = 0;
 810
 811        if (tf->flags & ATA_TFLAG_ISADDR) {
 812                if (tf->flags & ATA_TFLAG_LBA48) {
 813                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
 814                        cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
 815                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
 816                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
 817                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
 818                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
 819                } else
 820                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
 821
 822                cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
 823                cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
 824                cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
 825                cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
 826        }
 827
 828        if (tf->flags & ATA_TFLAG_DEVICE)
 829                cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
 830
 831        cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
 832
 833        while (idx < 12)
 834                cpb[idx++] = cpu_to_le16(IGN);
 835
 836        return idx;
 837}
 838
 839static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
 840{
 841        struct nv_adma_port_priv *pp = ap->private_data;
 842        u8 flags = pp->cpb[cpb_num].resp_flags;
 843
 844        VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
 845
 846        if (unlikely((force_err ||
 847                     flags & (NV_CPB_RESP_ATA_ERR |
 848                              NV_CPB_RESP_CMD_ERR |
 849                              NV_CPB_RESP_CPB_ERR)))) {
 850                struct ata_eh_info *ehi = &ap->link.eh_info;
 851                int freeze = 0;
 852
 853                ata_ehi_clear_desc(ehi);
 854                __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
 855                if (flags & NV_CPB_RESP_ATA_ERR) {
 856                        ata_ehi_push_desc(ehi, "ATA error");
 857                        ehi->err_mask |= AC_ERR_DEV;
 858                } else if (flags & NV_CPB_RESP_CMD_ERR) {
 859                        ata_ehi_push_desc(ehi, "CMD error");
 860                        ehi->err_mask |= AC_ERR_DEV;
 861                } else if (flags & NV_CPB_RESP_CPB_ERR) {
 862                        ata_ehi_push_desc(ehi, "CPB error");
 863                        ehi->err_mask |= AC_ERR_SYSTEM;
 864                        freeze = 1;
 865                } else {
 866                        /* notifier error, but no error in CPB flags? */
 867                        ata_ehi_push_desc(ehi, "unknown");
 868                        ehi->err_mask |= AC_ERR_OTHER;
 869                        freeze = 1;
 870                }
 871                /* Kill all commands. EH will determine what actually failed. */
 872                if (freeze)
 873                        ata_port_freeze(ap);
 874                else
 875                        ata_port_abort(ap);
 876                return -1;
 877        }
 878
 879        if (likely(flags & NV_CPB_RESP_DONE))
 880                return 1;
 881        return 0;
 882}
 883
 884static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
 885{
 886        struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
 887
 888        /* freeze if hotplugged */
 889        if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
 890                ata_port_freeze(ap);
 891                return 1;
 892        }
 893
 894        /* bail out if not our interrupt */
 895        if (!(irq_stat & NV_INT_DEV))
 896                return 0;
 897
 898        /* DEV interrupt w/ no active qc? */
 899        if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 900                ata_sff_check_status(ap);
 901                return 1;
 902        }
 903
 904        /* handle interrupt */
 905        return ata_bmdma_port_intr(ap, qc);
 906}
 907
 908static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
 909{
 910        struct ata_host *host = dev_instance;
 911        int i, handled = 0;
 912        u32 notifier_clears[2];
 913
 914        spin_lock(&host->lock);
 915
 916        for (i = 0; i < host->n_ports; i++) {
 917                struct ata_port *ap = host->ports[i];
 918                struct nv_adma_port_priv *pp = ap->private_data;
 919                void __iomem *mmio = pp->ctl_block;
 920                u16 status;
 921                u32 gen_ctl;
 922                u32 notifier, notifier_error;
 923
 924                notifier_clears[i] = 0;
 925
 926                /* if ADMA is disabled, use standard ata interrupt handler */
 927                if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
 928                        u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 929                                >> (NV_INT_PORT_SHIFT * i);
 930                        handled += nv_host_intr(ap, irq_stat);
 931                        continue;
 932                }
 933
 934                /* if in ATA register mode, check for standard interrupts */
 935                if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
 936                        u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 937                                >> (NV_INT_PORT_SHIFT * i);
 938                        if (ata_tag_valid(ap->link.active_tag))
 939                                /** NV_INT_DEV indication seems unreliable
 940                                    at times at least in ADMA mode. Force it
 941                                    on always when a command is active, to
 942                                    prevent losing interrupts. */
 943                                irq_stat |= NV_INT_DEV;
 944                        handled += nv_host_intr(ap, irq_stat);
 945                }
 946
 947                notifier = readl(mmio + NV_ADMA_NOTIFIER);
 948                notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
 949                notifier_clears[i] = notifier | notifier_error;
 950
 951                gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
 952
 953                if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
 954                    !notifier_error)
 955                        /* Nothing to do */
 956                        continue;
 957
 958                status = readw(mmio + NV_ADMA_STAT);
 959
 960                /*
 961                 * Clear status. Ensure the controller sees the
 962                 * clearing before we start looking at any of the CPB
 963                 * statuses, so that any CPB completions after this
 964                 * point in the handler will raise another interrupt.
 965                 */
 966                writew(status, mmio + NV_ADMA_STAT);
 967                readw(mmio + NV_ADMA_STAT); /* flush posted write */
 968                rmb();
 969
 970                handled++; /* irq handled if we got here */
 971
 972                /* freeze if hotplugged or controller error */
 973                if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
 974                                       NV_ADMA_STAT_HOTUNPLUG |
 975                                       NV_ADMA_STAT_TIMEOUT |
 976                                       NV_ADMA_STAT_SERROR))) {
 977                        struct ata_eh_info *ehi = &ap->link.eh_info;
 978
 979                        ata_ehi_clear_desc(ehi);
 980                        __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
 981                        if (status & NV_ADMA_STAT_TIMEOUT) {
 982                                ehi->err_mask |= AC_ERR_SYSTEM;
 983                                ata_ehi_push_desc(ehi, "timeout");
 984                        } else if (status & NV_ADMA_STAT_HOTPLUG) {
 985                                ata_ehi_hotplugged(ehi);
 986                                ata_ehi_push_desc(ehi, "hotplug");
 987                        } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
 988                                ata_ehi_hotplugged(ehi);
 989                                ata_ehi_push_desc(ehi, "hot unplug");
 990                        } else if (status & NV_ADMA_STAT_SERROR) {
 991                                /* let EH analyze SError and figure out cause */
 992                                ata_ehi_push_desc(ehi, "SError");
 993                        } else
 994                                ata_ehi_push_desc(ehi, "unknown");
 995                        ata_port_freeze(ap);
 996                        continue;
 997                }
 998
 999                if (status & (NV_ADMA_STAT_DONE |
1000                              NV_ADMA_STAT_CPBERR |
1001                              NV_ADMA_STAT_CMD_COMPLETE)) {
1002                        u32 check_commands = notifier_clears[i];
1003                        u32 done_mask = 0;
1004                        int pos, rc;
1005
1006                        if (status & NV_ADMA_STAT_CPBERR) {
1007                                /* check all active commands */
1008                                if (ata_tag_valid(ap->link.active_tag))
1009                                        check_commands = 1 <<
1010                                                ap->link.active_tag;
1011                                else
1012                                        check_commands = ap->link.sactive;
1013                        }
1014
1015                        /* check CPBs for completed commands */
1016                        while ((pos = ffs(check_commands))) {
1017                                pos--;
1018                                rc = nv_adma_check_cpb(ap, pos,
1019                                                notifier_error & (1 << pos));
1020                                if (rc > 0)
1021                                        done_mask |= 1 << pos;
1022                                else if (unlikely(rc < 0))
1023                                        check_commands = 0;
1024                                check_commands &= ~(1 << pos);
1025                        }
1026                        ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1027                }
1028        }
1029
1030        if (notifier_clears[0] || notifier_clears[1]) {
1031                /* Note: Both notifier clear registers must be written
1032                   if either is set, even if one is zero, according to NVIDIA. */
1033                struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1034                writel(notifier_clears[0], pp->notifier_clear_block);
1035                pp = host->ports[1]->private_data;
1036                writel(notifier_clears[1], pp->notifier_clear_block);
1037        }
1038
1039        spin_unlock(&host->lock);
1040
1041        return IRQ_RETVAL(handled);
1042}
1043
1044static void nv_adma_freeze(struct ata_port *ap)
1045{
1046        struct nv_adma_port_priv *pp = ap->private_data;
1047        void __iomem *mmio = pp->ctl_block;
1048        u16 tmp;
1049
1050        nv_ck804_freeze(ap);
1051
1052        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1053                return;
1054
1055        /* clear any outstanding CK804 notifications */
1056        writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1057                ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1058
1059        /* Disable interrupt */
1060        tmp = readw(mmio + NV_ADMA_CTL);
1061        writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1062                mmio + NV_ADMA_CTL);
1063        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1064}
1065
1066static void nv_adma_thaw(struct ata_port *ap)
1067{
1068        struct nv_adma_port_priv *pp = ap->private_data;
1069        void __iomem *mmio = pp->ctl_block;
1070        u16 tmp;
1071
1072        nv_ck804_thaw(ap);
1073
1074        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1075                return;
1076
1077        /* Enable interrupt */
1078        tmp = readw(mmio + NV_ADMA_CTL);
1079        writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1080                mmio + NV_ADMA_CTL);
1081        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1082}
1083
1084static void nv_adma_irq_clear(struct ata_port *ap)
1085{
1086        struct nv_adma_port_priv *pp = ap->private_data;
1087        void __iomem *mmio = pp->ctl_block;
1088        u32 notifier_clears[2];
1089
1090        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1091                ata_bmdma_irq_clear(ap);
1092                return;
1093        }
1094
1095        /* clear any outstanding CK804 notifications */
1096        writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1097                ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1098
1099        /* clear ADMA status */
1100        writew(0xffff, mmio + NV_ADMA_STAT);
1101
1102        /* clear notifiers - note both ports need to be written with
1103           something even though we are only clearing on one */
1104        if (ap->port_no == 0) {
1105                notifier_clears[0] = 0xFFFFFFFF;
1106                notifier_clears[1] = 0;
1107        } else {
1108                notifier_clears[0] = 0;
1109                notifier_clears[1] = 0xFFFFFFFF;
1110        }
1111        pp = ap->host->ports[0]->private_data;
1112        writel(notifier_clears[0], pp->notifier_clear_block);
1113        pp = ap->host->ports[1]->private_data;
1114        writel(notifier_clears[1], pp->notifier_clear_block);
1115}
1116
1117static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1118{
1119        struct nv_adma_port_priv *pp = qc->ap->private_data;
1120
1121        if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1122                ata_bmdma_post_internal_cmd(qc);
1123}
1124
1125static int nv_adma_port_start(struct ata_port *ap)
1126{
1127        struct device *dev = ap->host->dev;
1128        struct nv_adma_port_priv *pp;
1129        int rc;
1130        void *mem;
1131        dma_addr_t mem_dma;
1132        void __iomem *mmio;
1133        struct pci_dev *pdev = to_pci_dev(dev);
1134        u16 tmp;
1135
1136        VPRINTK("ENTER\n");
1137
1138        /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1139           pad buffers */
1140        rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1141        if (rc)
1142                return rc;
1143        rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1144        if (rc)
1145                return rc;
1146
1147        /* we might fallback to bmdma, allocate bmdma resources */
1148        rc = ata_bmdma_port_start(ap);
1149        if (rc)
1150                return rc;
1151
1152        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1153        if (!pp)
1154                return -ENOMEM;
1155
1156        mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1157               ap->port_no * NV_ADMA_PORT_SIZE;
1158        pp->ctl_block = mmio;
1159        pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1160        pp->notifier_clear_block = pp->gen_block +
1161               NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1162
1163        /* Now that the legacy PRD and padding buffer are allocated we can
1164           safely raise the DMA mask to allocate the CPB/APRD table.
1165           These are allowed to fail since we store the value that ends up
1166           being used to set as the bounce limit in slave_config later if
1167           needed. */
1168        pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1169        pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1170        pp->adma_dma_mask = *dev->dma_mask;
1171
1172        mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1173                                  &mem_dma, GFP_KERNEL);
1174        if (!mem)
1175                return -ENOMEM;
1176        memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1177
1178        /*
1179         * First item in chunk of DMA memory:
1180         * 128-byte command parameter block (CPB)
1181         * one for each command tag
1182         */
1183        pp->cpb     = mem;
1184        pp->cpb_dma = mem_dma;
1185
1186        writel(mem_dma & 0xFFFFFFFF,    mmio + NV_ADMA_CPB_BASE_LOW);
1187        writel((mem_dma >> 16) >> 16,   mmio + NV_ADMA_CPB_BASE_HIGH);
1188
1189        mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1190        mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1191
1192        /*
1193         * Second item: block of ADMA_SGTBL_LEN s/g entries
1194         */
1195        pp->aprd = mem;
1196        pp->aprd_dma = mem_dma;
1197
1198        ap->private_data = pp;
1199
1200        /* clear any outstanding interrupt conditions */
1201        writew(0xffff, mmio + NV_ADMA_STAT);
1202
1203        /* initialize port variables */
1204        pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1205
1206        /* clear CPB fetch count */
1207        writew(0, mmio + NV_ADMA_CPB_COUNT);
1208
1209        /* clear GO for register mode, enable interrupt */
1210        tmp = readw(mmio + NV_ADMA_CTL);
1211        writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1212                NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1213
1214        tmp = readw(mmio + NV_ADMA_CTL);
1215        writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1216        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1217        udelay(1);
1218        writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1219        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1220
1221        return 0;
1222}
1223
1224static void nv_adma_port_stop(struct ata_port *ap)
1225{
1226        struct nv_adma_port_priv *pp = ap->private_data;
1227        void __iomem *mmio = pp->ctl_block;
1228
1229        VPRINTK("ENTER\n");
1230        writew(0, mmio + NV_ADMA_CTL);
1231}
1232
1233#ifdef CONFIG_PM
1234static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1235{
1236        struct nv_adma_port_priv *pp = ap->private_data;
1237        void __iomem *mmio = pp->ctl_block;
1238
1239        /* Go to register mode - clears GO */
1240        nv_adma_register_mode(ap);
1241
1242        /* clear CPB fetch count */
1243        writew(0, mmio + NV_ADMA_CPB_COUNT);
1244
1245        /* disable interrupt, shut down port */
1246        writew(0, mmio + NV_ADMA_CTL);
1247
1248        return 0;
1249}
1250
1251static int nv_adma_port_resume(struct ata_port *ap)
1252{
1253        struct nv_adma_port_priv *pp = ap->private_data;
1254        void __iomem *mmio = pp->ctl_block;
1255        u16 tmp;
1256
1257        /* set CPB block location */
1258        writel(pp->cpb_dma & 0xFFFFFFFF,        mmio + NV_ADMA_CPB_BASE_LOW);
1259        writel((pp->cpb_dma >> 16) >> 16,       mmio + NV_ADMA_CPB_BASE_HIGH);
1260
1261        /* clear any outstanding interrupt conditions */
1262        writew(0xffff, mmio + NV_ADMA_STAT);
1263
1264        /* initialize port variables */
1265        pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1266
1267        /* clear CPB fetch count */
1268        writew(0, mmio + NV_ADMA_CPB_COUNT);
1269
1270        /* clear GO for register mode, enable interrupt */
1271        tmp = readw(mmio + NV_ADMA_CTL);
1272        writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1273                NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1274
1275        tmp = readw(mmio + NV_ADMA_CTL);
1276        writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1277        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1278        udelay(1);
1279        writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1280        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1281
1282        return 0;
1283}
1284#endif
1285
1286static void nv_adma_setup_port(struct ata_port *ap)
1287{
1288        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1289        struct ata_ioports *ioport = &ap->ioaddr;
1290
1291        VPRINTK("ENTER\n");
1292
1293        mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1294
1295        ioport->cmd_addr        = mmio;
1296        ioport->data_addr       = mmio + (ATA_REG_DATA * 4);
1297        ioport->error_addr      =
1298        ioport->feature_addr    = mmio + (ATA_REG_ERR * 4);
1299        ioport->nsect_addr      = mmio + (ATA_REG_NSECT * 4);
1300        ioport->lbal_addr       = mmio + (ATA_REG_LBAL * 4);
1301        ioport->lbam_addr       = mmio + (ATA_REG_LBAM * 4);
1302        ioport->lbah_addr       = mmio + (ATA_REG_LBAH * 4);
1303        ioport->device_addr     = mmio + (ATA_REG_DEVICE * 4);
1304        ioport->status_addr     =
1305        ioport->command_addr    = mmio + (ATA_REG_STATUS * 4);
1306        ioport->altstatus_addr  =
1307        ioport->ctl_addr        = mmio + 0x20;
1308}
1309
1310static int nv_adma_host_init(struct ata_host *host)
1311{
1312        struct pci_dev *pdev = to_pci_dev(host->dev);
1313        unsigned int i;
1314        u32 tmp32;
1315
1316        VPRINTK("ENTER\n");
1317
1318        /* enable ADMA on the ports */
1319        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1320        tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1321                 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1322                 NV_MCP_SATA_CFG_20_PORT1_EN |
1323                 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1324
1325        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1326
1327        for (i = 0; i < host->n_ports; i++)
1328                nv_adma_setup_port(host->ports[i]);
1329
1330        return 0;
1331}
1332
1333static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1334                              struct scatterlist *sg,
1335                              int idx,
1336                              struct nv_adma_prd *aprd)
1337{
1338        u8 flags = 0;
1339        if (qc->tf.flags & ATA_TFLAG_WRITE)
1340                flags |= NV_APRD_WRITE;
1341        if (idx == qc->n_elem - 1)
1342                flags |= NV_APRD_END;
1343        else if (idx != 4)
1344                flags |= NV_APRD_CONT;
1345
1346        aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1347        aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1348        aprd->flags = flags;
1349        aprd->packet_len = 0;
1350}
1351
1352static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1353{
1354        struct nv_adma_port_priv *pp = qc->ap->private_data;
1355        struct nv_adma_prd *aprd;
1356        struct scatterlist *sg;
1357        unsigned int si;
1358
1359        VPRINTK("ENTER\n");
1360
1361        for_each_sg(qc->sg, sg, qc->n_elem, si) {
1362                aprd = (si < 5) ? &cpb->aprd[si] :
1363                               &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1364                nv_adma_fill_aprd(qc, sg, si, aprd);
1365        }
1366        if (si > 5)
1367                cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1368        else
1369                cpb->next_aprd = cpu_to_le64(0);
1370}
1371
1372static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1373{
1374        struct nv_adma_port_priv *pp = qc->ap->private_data;
1375
1376        /* ADMA engine can only be used for non-ATAPI DMA commands,
1377           or interrupt-driven no-data commands. */
1378        if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1379           (qc->tf.flags & ATA_TFLAG_POLLING))
1380                return 1;
1381
1382        if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1383           (qc->tf.protocol == ATA_PROT_NODATA))
1384                return 0;
1385
1386        return 1;
1387}
1388
1389static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1390{
1391        struct nv_adma_port_priv *pp = qc->ap->private_data;
1392        struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1393        u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1394                       NV_CPB_CTL_IEN;
1395
1396        if (nv_adma_use_reg_mode(qc)) {
1397                BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1398                        (qc->flags & ATA_QCFLAG_DMAMAP));
1399                nv_adma_register_mode(qc->ap);
1400                ata_bmdma_qc_prep(qc);
1401                return;
1402        }
1403
1404        cpb->resp_flags = NV_CPB_RESP_DONE;
1405        wmb();
1406        cpb->ctl_flags = 0;
1407        wmb();
1408
1409        cpb->len                = 3;
1410        cpb->tag                = qc->tag;
1411        cpb->next_cpb_idx       = 0;
1412
1413        /* turn on NCQ flags for NCQ commands */
1414        if (qc->tf.protocol == ATA_PROT_NCQ)
1415                ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1416
1417        VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1418
1419        nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1420
1421        if (qc->flags & ATA_QCFLAG_DMAMAP) {
1422                nv_adma_fill_sg(qc, cpb);
1423                ctl_flags |= NV_CPB_CTL_APRD_VALID;
1424        } else
1425                memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1426
1427        /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1428           until we are finished filling in all of the contents */
1429        wmb();
1430        cpb->ctl_flags = ctl_flags;
1431        wmb();
1432        cpb->resp_flags = 0;
1433}
1434
1435static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1436{
1437        struct nv_adma_port_priv *pp = qc->ap->private_data;
1438        void __iomem *mmio = pp->ctl_block;
1439        int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1440
1441        VPRINTK("ENTER\n");
1442
1443        /* We can't handle result taskfile with NCQ commands, since
1444           retrieving the taskfile switches us out of ADMA mode and would abort
1445           existing commands. */
1446        if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1447                     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1448                ata_dev_printk(qc->dev, KERN_ERR,
1449                        "NCQ w/ RESULT_TF not allowed\n");
1450                return AC_ERR_SYSTEM;
1451        }
1452
1453        if (nv_adma_use_reg_mode(qc)) {
1454                /* use ATA register mode */
1455                VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1456                BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1457                        (qc->flags & ATA_QCFLAG_DMAMAP));
1458                nv_adma_register_mode(qc->ap);
1459                return ata_bmdma_qc_issue(qc);
1460        } else
1461                nv_adma_mode(qc->ap);
1462
1463        /* write append register, command tag in lower 8 bits
1464           and (number of cpbs to append -1) in top 8 bits */
1465        wmb();
1466
1467        if (curr_ncq != pp->last_issue_ncq) {
1468                /* Seems to need some delay before switching between NCQ and
1469                   non-NCQ commands, else we get command timeouts and such. */
1470                udelay(20);
1471                pp->last_issue_ncq = curr_ncq;
1472        }
1473
1474        writew(qc->tag, mmio + NV_ADMA_APPEND);
1475
1476        DPRINTK("Issued tag %u\n", qc->tag);
1477
1478        return 0;
1479}
1480
1481static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1482{
1483        struct ata_host *host = dev_instance;
1484        unsigned int i;
1485        unsigned int handled = 0;
1486        unsigned long flags;
1487
1488        spin_lock_irqsave(&host->lock, flags);
1489
1490        for (i = 0; i < host->n_ports; i++) {
1491                struct ata_port *ap = host->ports[i];
1492                struct ata_queued_cmd *qc;
1493
1494                qc = ata_qc_from_tag(ap, ap->link.active_tag);
1495                if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1496                        handled += ata_bmdma_port_intr(ap, qc);
1497                } else {
1498                        /*
1499                         * No request pending?  Clear interrupt status
1500                         * anyway, in case there's one pending.
1501                         */
1502                        ap->ops->sff_check_status(ap);
1503                }
1504        }
1505
1506        spin_unlock_irqrestore(&host->lock, flags);
1507
1508        return IRQ_RETVAL(handled);
1509}
1510
1511static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1512{
1513        int i, handled = 0;
1514
1515        for (i = 0; i < host->n_ports; i++) {
1516                handled += nv_host_intr(host->ports[i], irq_stat);
1517                irq_stat >>= NV_INT_PORT_SHIFT;
1518        }
1519
1520        return IRQ_RETVAL(handled);
1521}
1522
1523static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1524{
1525        struct ata_host *host = dev_instance;
1526        u8 irq_stat;
1527        irqreturn_t ret;
1528
1529        spin_lock(&host->lock);
1530        irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1531        ret = nv_do_interrupt(host, irq_stat);
1532        spin_unlock(&host->lock);
1533
1534        return ret;
1535}
1536
1537static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1538{
1539        struct ata_host *host = dev_instance;
1540        u8 irq_stat;
1541        irqreturn_t ret;
1542
1543        spin_lock(&host->lock);
1544        irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1545        ret = nv_do_interrupt(host, irq_stat);
1546        spin_unlock(&host->lock);
1547
1548        return ret;
1549}
1550
1551static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1552{
1553        if (sc_reg > SCR_CONTROL)
1554                return -EINVAL;
1555
1556        *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1557        return 0;
1558}
1559
1560static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1561{
1562        if (sc_reg > SCR_CONTROL)
1563                return -EINVAL;
1564
1565        iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1566        return 0;
1567}
1568
1569static int nv_hardreset(struct ata_link *link, unsigned int *class,
1570                        unsigned long deadline)
1571{
1572        struct ata_eh_context *ehc = &link->eh_context;
1573
1574        /* Do hardreset iff it's post-boot probing, please read the
1575         * comment above port ops for details.
1576         */
1577        if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1578            !ata_dev_enabled(link->device))
1579                sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1580                                    NULL, NULL);
1581        else {
1582                const unsigned long *timing = sata_ehc_deb_timing(ehc);
1583                int rc;
1584
1585                if (!(ehc->i.flags & ATA_EHI_QUIET))
1586                        ata_link_printk(link, KERN_INFO, "nv: skipping "
1587                                        "hardreset on occupied port\n");
1588
1589                /* make sure the link is online */
1590                rc = sata_link_resume(link, timing, deadline);
1591                /* whine about phy resume failure but proceed */
1592                if (rc && rc != -EOPNOTSUPP)
1593                        ata_link_printk(link, KERN_WARNING, "failed to resume "
1594                                        "link (errno=%d)\n", rc);
1595        }
1596
1597        /* device signature acquisition is unreliable */
1598        return -EAGAIN;
1599}
1600
1601static void nv_nf2_freeze(struct ata_port *ap)
1602{
1603        void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1604        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1605        u8 mask;
1606
1607        mask = ioread8(scr_addr + NV_INT_ENABLE);
1608        mask &= ~(NV_INT_ALL << shift);
1609        iowrite8(mask, scr_addr + NV_INT_ENABLE);
1610}
1611
1612static void nv_nf2_thaw(struct ata_port *ap)
1613{
1614        void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1615        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1616        u8 mask;
1617
1618        iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1619
1620        mask = ioread8(scr_addr + NV_INT_ENABLE);
1621        mask |= (NV_INT_MASK << shift);
1622        iowrite8(mask, scr_addr + NV_INT_ENABLE);
1623}
1624
1625static void nv_ck804_freeze(struct ata_port *ap)
1626{
1627        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1628        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1629        u8 mask;
1630
1631        mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1632        mask &= ~(NV_INT_ALL << shift);
1633        writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1634}
1635
1636static void nv_ck804_thaw(struct ata_port *ap)
1637{
1638        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1639        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1640        u8 mask;
1641
1642        writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1643
1644        mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1645        mask |= (NV_INT_MASK << shift);
1646        writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1647}
1648
1649static void nv_mcp55_freeze(struct ata_port *ap)
1650{
1651        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1652        int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1653        u32 mask;
1654
1655        writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1656
1657        mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1658        mask &= ~(NV_INT_ALL_MCP55 << shift);
1659        writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1660}
1661
1662static void nv_mcp55_thaw(struct ata_port *ap)
1663{
1664        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1665        int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1666        u32 mask;
1667
1668        writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1669
1670        mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1671        mask |= (NV_INT_MASK_MCP55 << shift);
1672        writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1673}
1674
1675static void nv_adma_error_handler(struct ata_port *ap)
1676{
1677        struct nv_adma_port_priv *pp = ap->private_data;
1678        if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1679                void __iomem *mmio = pp->ctl_block;
1680                int i;
1681                u16 tmp;
1682
1683                if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1684                        u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1685                        u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1686                        u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1687                        u32 status = readw(mmio + NV_ADMA_STAT);
1688                        u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1689                        u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1690
1691                        ata_port_printk(ap, KERN_ERR,
1692                                "EH in ADMA mode, notifier 0x%X "
1693                                "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1694                                "next cpb count 0x%X next cpb idx 0x%x\n",
1695                                notifier, notifier_error, gen_ctl, status,
1696                                cpb_count, next_cpb_idx);
1697
1698                        for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1699                                struct nv_adma_cpb *cpb = &pp->cpb[i];
1700                                if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1701                                    ap->link.sactive & (1 << i))
1702                                        ata_port_printk(ap, KERN_ERR,
1703                                                "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1704                                                i, cpb->ctl_flags, cpb->resp_flags);
1705                        }
1706                }
1707
1708                /* Push us back into port register mode for error handling. */
1709                nv_adma_register_mode(ap);
1710
1711                /* Mark all of the CPBs as invalid to prevent them from
1712                   being executed */
1713                for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1714                        pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1715
1716                /* clear CPB fetch count */
1717                writew(0, mmio + NV_ADMA_CPB_COUNT);
1718
1719                /* Reset channel */
1720                tmp = readw(mmio + NV_ADMA_CTL);
1721                writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1722                readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1723                udelay(1);
1724                writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1725                readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1726        }
1727
1728        ata_bmdma_error_handler(ap);
1729}
1730
1731static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1732{
1733        struct nv_swncq_port_priv *pp = ap->private_data;
1734        struct defer_queue *dq = &pp->defer_queue;
1735
1736        /* queue is full */
1737        WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1738        dq->defer_bits |= (1 << qc->tag);
1739        dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1740}
1741
1742static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1743{
1744        struct nv_swncq_port_priv *pp = ap->private_data;
1745        struct defer_queue *dq = &pp->defer_queue;
1746        unsigned int tag;
1747
1748        if (dq->head == dq->tail)       /* null queue */
1749                return NULL;
1750
1751        tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1752        dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1753        WARN_ON(!(dq->defer_bits & (1 << tag)));
1754        dq->defer_bits &= ~(1 << tag);
1755
1756        return ata_qc_from_tag(ap, tag);
1757}
1758
1759static void nv_swncq_fis_reinit(struct ata_port *ap)
1760{
1761        struct nv_swncq_port_priv *pp = ap->private_data;
1762
1763        pp->dhfis_bits = 0;
1764        pp->dmafis_bits = 0;
1765        pp->sdbfis_bits = 0;
1766        pp->ncq_flags = 0;
1767}
1768
1769static void nv_swncq_pp_reinit(struct ata_port *ap)
1770{
1771        struct nv_swncq_port_priv *pp = ap->private_data;
1772        struct defer_queue *dq = &pp->defer_queue;
1773
1774        dq->head = 0;
1775        dq->tail = 0;
1776        dq->defer_bits = 0;
1777        pp->qc_active = 0;
1778        pp->last_issue_tag = ATA_TAG_POISON;
1779        nv_swncq_fis_reinit(ap);
1780}
1781
1782static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1783{
1784        struct nv_swncq_port_priv *pp = ap->private_data;
1785
1786        writew(fis, pp->irq_block);
1787}
1788
1789static void __ata_bmdma_stop(struct ata_port *ap)
1790{
1791        struct ata_queued_cmd qc;
1792
1793        qc.ap = ap;
1794        ata_bmdma_stop(&qc);
1795}
1796
1797static void nv_swncq_ncq_stop(struct ata_port *ap)
1798{
1799        struct nv_swncq_port_priv *pp = ap->private_data;
1800        unsigned int i;
1801        u32 sactive;
1802        u32 done_mask;
1803
1804        ata_port_printk(ap, KERN_ERR,
1805                        "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1806                        ap->qc_active, ap->link.sactive);
1807        ata_port_printk(ap, KERN_ERR,
1808                "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1809                "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1810                pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1811                pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1812
1813        ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1814                        ap->ops->sff_check_status(ap),
1815                        ioread8(ap->ioaddr.error_addr));
1816
1817        sactive = readl(pp->sactive_block);
1818        done_mask = pp->qc_active ^ sactive;
1819
1820        ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1821        for (i = 0; i < ATA_MAX_QUEUE; i++) {
1822                u8 err = 0;
1823                if (pp->qc_active & (1 << i))
1824                        err = 0;
1825                else if (done_mask & (1 << i))
1826                        err = 1;
1827                else
1828                        continue;
1829
1830                ata_port_printk(ap, KERN_ERR,
1831                                "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1832                                (pp->dhfis_bits >> i) & 0x1,
1833                                (pp->dmafis_bits >> i) & 0x1,
1834                                (pp->sdbfis_bits >> i) & 0x1,
1835                                (sactive >> i) & 0x1,
1836                                (err ? "error! tag doesn't exit" : " "));
1837        }
1838
1839        nv_swncq_pp_reinit(ap);
1840        ap->ops->sff_irq_clear(ap);
1841        __ata_bmdma_stop(ap);
1842        nv_swncq_irq_clear(ap, 0xffff);
1843}
1844
1845static void nv_swncq_error_handler(struct ata_port *ap)
1846{
1847        struct ata_eh_context *ehc = &ap->link.eh_context;
1848
1849        if (ap->link.sactive) {
1850                nv_swncq_ncq_stop(ap);
1851                ehc->i.action |= ATA_EH_RESET;
1852        }
1853
1854        ata_bmdma_error_handler(ap);
1855}
1856
1857#ifdef CONFIG_PM
1858static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1859{
1860        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1861        u32 tmp;
1862
1863        /* clear irq */
1864        writel(~0, mmio + NV_INT_STATUS_MCP55);
1865
1866        /* disable irq */
1867        writel(0, mmio + NV_INT_ENABLE_MCP55);
1868
1869        /* disable swncq */
1870        tmp = readl(mmio + NV_CTL_MCP55);
1871        tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1872        writel(tmp, mmio + NV_CTL_MCP55);
1873
1874        return 0;
1875}
1876
1877static int nv_swncq_port_resume(struct ata_port *ap)
1878{
1879        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1880        u32 tmp;
1881
1882        /* clear irq */
1883        writel(~0, mmio + NV_INT_STATUS_MCP55);
1884
1885        /* enable irq */
1886        writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1887
1888        /* enable swncq */
1889        tmp = readl(mmio + NV_CTL_MCP55);
1890        writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1891
1892        return 0;
1893}
1894#endif
1895
1896static void nv_swncq_host_init(struct ata_host *host)
1897{
1898        u32 tmp;
1899        void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1900        struct pci_dev *pdev = to_pci_dev(host->dev);
1901        u8 regval;
1902
1903        /* disable  ECO 398 */
1904        pci_read_config_byte(pdev, 0x7f, &regval);
1905        regval &= ~(1 << 7);
1906        pci_write_config_byte(pdev, 0x7f, regval);
1907
1908        /* enable swncq */
1909        tmp = readl(mmio + NV_CTL_MCP55);
1910        VPRINTK("HOST_CTL:0x%X\n", tmp);
1911        writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1912
1913        /* enable irq intr */
1914        tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1915        VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1916        writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1917
1918        /*  clear port irq */
1919        writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1920}
1921
1922static int nv_swncq_slave_config(struct scsi_device *sdev)
1923{
1924        struct ata_port *ap = ata_shost_to_port(sdev->host);
1925        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1926        struct ata_device *dev;
1927        int rc;
1928        u8 rev;
1929        u8 check_maxtor = 0;
1930        unsigned char model_num[ATA_ID_PROD_LEN + 1];
1931
1932        rc = ata_scsi_slave_config(sdev);
1933        if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1934                /* Not a proper libata device, ignore */
1935                return rc;
1936
1937        dev = &ap->link.device[sdev->id];
1938        if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1939                return rc;
1940
1941        /* if MCP51 and Maxtor, then disable ncq */
1942        if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1943                pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1944                check_maxtor = 1;
1945
1946        /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1947        if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1948                pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1949                pci_read_config_byte(pdev, 0x8, &rev);
1950                if (rev <= 0xa2)
1951                        check_maxtor = 1;
1952        }
1953
1954        if (!check_maxtor)
1955                return rc;
1956
1957        ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1958
1959        if (strncmp(model_num, "Maxtor", 6) == 0) {
1960                ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT);
1961                ata_dev_printk(dev, KERN_NOTICE,
1962                        "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1963        }
1964
1965        return rc;
1966}
1967
1968static int nv_swncq_port_start(struct ata_port *ap)
1969{
1970        struct device *dev = ap->host->dev;
1971        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1972        struct nv_swncq_port_priv *pp;
1973        int rc;
1974
1975        /* we might fallback to bmdma, allocate bmdma resources */
1976        rc = ata_bmdma_port_start(ap);
1977        if (rc)
1978                return rc;
1979
1980        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1981        if (!pp)
1982                return -ENOMEM;
1983
1984        pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1985                                      &pp->prd_dma, GFP_KERNEL);
1986        if (!pp->prd)
1987                return -ENOMEM;
1988        memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1989
1990        ap->private_data = pp;
1991        pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1992        pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1993        pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1994
1995        return 0;
1996}
1997
1998static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1999{
2000        if (qc->tf.protocol != ATA_PROT_NCQ) {
2001                ata_bmdma_qc_prep(qc);
2002                return;
2003        }
2004
2005        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2006                return;
2007
2008        nv_swncq_fill_sg(qc);
2009}
2010
2011static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2012{
2013        struct ata_port *ap = qc->ap;
2014        struct scatterlist *sg;
2015        struct nv_swncq_port_priv *pp = ap->private_data;
2016        struct ata_bmdma_prd *prd;
2017        unsigned int si, idx;
2018
2019        prd = pp->prd + ATA_MAX_PRD * qc->tag;
2020
2021        idx = 0;
2022        for_each_sg(qc->sg, sg, qc->n_elem, si) {
2023                u32 addr, offset;
2024                u32 sg_len, len;
2025
2026                addr = (u32)sg_dma_address(sg);
2027                sg_len = sg_dma_len(sg);
2028
2029                while (sg_len) {
2030                        offset = addr & 0xffff;
2031                        len = sg_len;
2032                        if ((offset + sg_len) > 0x10000)
2033                                len = 0x10000 - offset;
2034
2035                        prd[idx].addr = cpu_to_le32(addr);
2036                        prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2037
2038                        idx++;
2039                        sg_len -= len;
2040                        addr += len;
2041                }
2042        }
2043
2044        prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2045}
2046
2047static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2048                                          struct ata_queued_cmd *qc)
2049{
2050        struct nv_swncq_port_priv *pp = ap->private_data;
2051
2052        if (qc == NULL)
2053                return 0;
2054
2055        DPRINTK("Enter\n");
2056
2057        writel((1 << qc->tag), pp->sactive_block);
2058        pp->last_issue_tag = qc->tag;
2059        pp->dhfis_bits &= ~(1 << qc->tag);
2060        pp->dmafis_bits &= ~(1 << qc->tag);
2061        pp->qc_active |= (0x1 << qc->tag);
2062
2063        ap->ops->sff_tf_load(ap, &qc->tf);       /* load tf registers */
2064        ap->ops->sff_exec_command(ap, &qc->tf);
2065
2066        DPRINTK("Issued tag %u\n", qc->tag);
2067
2068        return 0;
2069}
2070
2071static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2072{
2073        struct ata_port *ap = qc->ap;
2074        struct nv_swncq_port_priv *pp = ap->private_data;
2075
2076        if (qc->tf.protocol != ATA_PROT_NCQ)
2077                return ata_bmdma_qc_issue(qc);
2078
2079        DPRINTK("Enter\n");
2080
2081        if (!pp->qc_active)
2082                nv_swncq_issue_atacmd(ap, qc);
2083        else
2084                nv_swncq_qc_to_dq(ap, qc);      /* add qc to defer queue */
2085
2086        return 0;
2087}
2088
2089static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2090{
2091        u32 serror;
2092        struct ata_eh_info *ehi = &ap->link.eh_info;
2093
2094        ata_ehi_clear_desc(ehi);
2095
2096        /* AHCI needs SError cleared; otherwise, it might lock up */
2097        sata_scr_read(&ap->link, SCR_ERROR, &serror);
2098        sata_scr_write(&ap->link, SCR_ERROR, serror);
2099
2100        /* analyze @irq_stat */
2101        if (fis & NV_SWNCQ_IRQ_ADDED)
2102                ata_ehi_push_desc(ehi, "hot plug");
2103        else if (fis & NV_SWNCQ_IRQ_REMOVED)
2104                ata_ehi_push_desc(ehi, "hot unplug");
2105
2106        ata_ehi_hotplugged(ehi);
2107
2108        /* okay, let's hand over to EH */
2109        ehi->serror |= serror;
2110
2111        ata_port_freeze(ap);
2112}
2113
2114static int nv_swncq_sdbfis(struct ata_port *ap)
2115{
2116        struct ata_queued_cmd *qc;
2117        struct nv_swncq_port_priv *pp = ap->private_data;
2118        struct ata_eh_info *ehi = &ap->link.eh_info;
2119        u32 sactive;
2120        u32 done_mask;
2121        u8 host_stat;
2122        u8 lack_dhfis = 0;
2123
2124        host_stat = ap->ops->bmdma_status(ap);
2125        if (unlikely(host_stat & ATA_DMA_ERR)) {
2126                /* error when transfering data to/from memory */
2127                ata_ehi_clear_desc(ehi);
2128                ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2129                ehi->err_mask |= AC_ERR_HOST_BUS;
2130                ehi->action |= ATA_EH_RESET;
2131                return -EINVAL;
2132        }
2133
2134        ap->ops->sff_irq_clear(ap);
2135        __ata_bmdma_stop(ap);
2136
2137        sactive = readl(pp->sactive_block);
2138        done_mask = pp->qc_active ^ sactive;
2139
2140        pp->qc_active &= ~done_mask;
2141        pp->dhfis_bits &= ~done_mask;
2142        pp->dmafis_bits &= ~done_mask;
2143        pp->sdbfis_bits |= done_mask;
2144        ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2145
2146        if (!ap->qc_active) {
2147                DPRINTK("over\n");
2148                nv_swncq_pp_reinit(ap);
2149                return 0;
2150        }
2151
2152        if (pp->qc_active & pp->dhfis_bits)
2153                return 0;
2154
2155        if ((pp->ncq_flags & ncq_saw_backout) ||
2156            (pp->qc_active ^ pp->dhfis_bits))
2157                /* if the controller can't get a device to host register FIS,
2158                 * The driver needs to reissue the new command.
2159                 */
2160                lack_dhfis = 1;
2161
2162        DPRINTK("id 0x%x QC: qc_active 0x%x,"
2163                "SWNCQ:qc_active 0x%X defer_bits %X "
2164                "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2165                ap->print_id, ap->qc_active, pp->qc_active,
2166                pp->defer_queue.defer_bits, pp->dhfis_bits,
2167                pp->dmafis_bits, pp->last_issue_tag);
2168
2169        nv_swncq_fis_reinit(ap);
2170
2171        if (lack_dhfis) {
2172                qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2173                nv_swncq_issue_atacmd(ap, qc);
2174                return 0;
2175        }
2176
2177        if (pp->defer_queue.defer_bits) {
2178                /* send deferral queue command */
2179                qc = nv_swncq_qc_from_dq(ap);
2180                WARN_ON(qc == NULL);
2181                nv_swncq_issue_atacmd(ap, qc);
2182        }
2183
2184        return 0;
2185}
2186
2187static inline u32 nv_swncq_tag(struct ata_port *ap)
2188{
2189        struct nv_swncq_port_priv *pp = ap->private_data;
2190        u32 tag;
2191
2192        tag = readb(pp->tag_block) >> 2;
2193        return (tag & 0x1f);
2194}
2195
2196static void nv_swncq_dmafis(struct ata_port *ap)
2197{
2198        struct ata_queued_cmd *qc;
2199        unsigned int rw;
2200        u8 dmactl;
2201        u32 tag;
2202        struct nv_swncq_port_priv *pp = ap->private_data;
2203
2204        __ata_bmdma_stop(ap);
2205        tag = nv_swncq_tag(ap);
2206
2207        DPRINTK("dma setup tag 0x%x\n", tag);
2208        qc = ata_qc_from_tag(ap, tag);
2209
2210        if (unlikely(!qc))
2211                return;
2212
2213        rw = qc->tf.flags & ATA_TFLAG_WRITE;
2214
2215        /* load PRD table addr. */
2216        iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2217                  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2218
2219        /* specify data direction, triple-check start bit is clear */
2220        dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2221        dmactl &= ~ATA_DMA_WR;
2222        if (!rw)
2223                dmactl |= ATA_DMA_WR;
2224
2225        iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2226}
2227
2228static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2229{
2230        struct nv_swncq_port_priv *pp = ap->private_data;
2231        struct ata_queued_cmd *qc;
2232        struct ata_eh_info *ehi = &ap->link.eh_info;
2233        u32 serror;
2234        u8 ata_stat;
2235
2236        ata_stat = ap->ops->sff_check_status(ap);
2237        nv_swncq_irq_clear(ap, fis);
2238        if (!fis)
2239                return;
2240
2241        if (ap->pflags & ATA_PFLAG_FROZEN)
2242                return;
2243
2244        if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2245                nv_swncq_hotplug(ap, fis);
2246                return;
2247        }
2248
2249        if (!pp->qc_active)
2250                return;
2251
2252        if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2253                return;
2254        ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2255
2256        if (ata_stat & ATA_ERR) {
2257                ata_ehi_clear_desc(ehi);
2258                ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2259                ehi->err_mask |= AC_ERR_DEV;
2260                ehi->serror |= serror;
2261                ehi->action |= ATA_EH_RESET;
2262                ata_port_freeze(ap);
2263                return;
2264        }
2265
2266        if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2267                /* If the IRQ is backout, driver must issue
2268                 * the new command again some time later.
2269                 */
2270                pp->ncq_flags |= ncq_saw_backout;
2271        }
2272
2273        if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2274                pp->ncq_flags |= ncq_saw_sdb;
2275                DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2276                        "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2277                        ap->print_id, pp->qc_active, pp->dhfis_bits,
2278                        pp->dmafis_bits, readl(pp->sactive_block));
2279                if (nv_swncq_sdbfis(ap) < 0)
2280                        goto irq_error;
2281        }
2282
2283        if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2284                /* The interrupt indicates the new command
2285                 * was transmitted correctly to the drive.
2286                 */
2287                pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2288                pp->ncq_flags |= ncq_saw_d2h;
2289                if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2290                        ata_ehi_push_desc(ehi, "illegal fis transaction");
2291                        ehi->err_mask |= AC_ERR_HSM;
2292                        ehi->action |= ATA_EH_RESET;
2293                        goto irq_error;
2294                }
2295
2296                if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2297                    !(pp->ncq_flags & ncq_saw_dmas)) {
2298                        ata_stat = ap->ops->sff_check_status(ap);
2299                        if (ata_stat & ATA_BUSY)
2300                                goto irq_exit;
2301
2302                        if (pp->defer_queue.defer_bits) {
2303                                DPRINTK("send next command\n");
2304                                qc = nv_swncq_qc_from_dq(ap);
2305                                nv_swncq_issue_atacmd(ap, qc);
2306                        }
2307                }
2308        }
2309
2310        if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2311                /* program the dma controller with appropriate PRD buffers
2312                 * and start the DMA transfer for requested command.
2313                 */
2314                pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2315                pp->ncq_flags |= ncq_saw_dmas;
2316                nv_swncq_dmafis(ap);
2317        }
2318
2319irq_exit:
2320        return;
2321irq_error:
2322        ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2323        ata_port_freeze(ap);
2324        return;
2325}
2326
2327static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2328{
2329        struct ata_host *host = dev_instance;
2330        unsigned int i;
2331        unsigned int handled = 0;
2332        unsigned long flags;
2333        u32 irq_stat;
2334
2335        spin_lock_irqsave(&host->lock, flags);
2336
2337        irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2338
2339        for (i = 0; i < host->n_ports; i++) {
2340                struct ata_port *ap = host->ports[i];
2341
2342                if (ap->link.sactive) {
2343                        nv_swncq_host_interrupt(ap, (u16)irq_stat);
2344                        handled = 1;
2345                } else {
2346                        if (irq_stat)   /* reserve Hotplug */
2347                                nv_swncq_irq_clear(ap, 0xfff0);
2348
2349                        handled += nv_host_intr(ap, (u8)irq_stat);
2350                }
2351                irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2352        }
2353
2354        spin_unlock_irqrestore(&host->lock, flags);
2355
2356        return IRQ_RETVAL(handled);
2357}
2358
2359static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2360{
2361        static int printed_version;
2362        const struct ata_port_info *ppi[] = { NULL, NULL };
2363        struct nv_pi_priv *ipriv;
2364        struct ata_host *host;
2365        struct nv_host_priv *hpriv;
2366        int rc;
2367        u32 bar;
2368        void __iomem *base;
2369        unsigned long type = ent->driver_data;
2370
2371        // Make sure this is a SATA controller by counting the number of bars
2372        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2373        // it's an IDE controller and we ignore it.
2374        for (bar = 0; bar < 6; bar++)
2375                if (pci_resource_start(pdev, bar) == 0)
2376                        return -ENODEV;
2377
2378        if (!printed_version++)
2379                dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2380
2381        rc = pcim_enable_device(pdev);
2382        if (rc)
2383                return rc;
2384
2385        /* determine type and allocate host */
2386        if (type == CK804 && adma_enabled) {
2387                dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2388                type = ADMA;
2389        } else if (type == MCP5x && swncq_enabled) {
2390                dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2391                type = SWNCQ;
2392        }
2393
2394        ppi[0] = &nv_port_info[type];
2395        ipriv = ppi[0]->private_data;
2396        rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2397        if (rc)
2398                return rc;
2399
2400        hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2401        if (!hpriv)
2402                return -ENOMEM;
2403        hpriv->type = type;
2404        host->private_data = hpriv;
2405
2406        /* request and iomap NV_MMIO_BAR */
2407        rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2408        if (rc)
2409                return rc;
2410
2411        /* configure SCR access */
2412        base = host->iomap[NV_MMIO_BAR];
2413        host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2414        host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2415
2416        /* enable SATA space for CK804 */
2417        if (type >= CK804) {
2418                u8 regval;
2419
2420                pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2421                regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2422                pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2423        }
2424
2425        /* init ADMA */
2426        if (type == ADMA) {
2427                rc = nv_adma_host_init(host);
2428                if (rc)
2429                        return rc;
2430        } else if (type == SWNCQ)
2431                nv_swncq_host_init(host);
2432
2433        if (msi_enabled) {
2434                dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n");
2435                pci_enable_msi(pdev);
2436        }
2437
2438        pci_set_master(pdev);
2439        return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2440}
2441
2442#ifdef CONFIG_PM
2443static int nv_pci_device_resume(struct pci_dev *pdev)
2444{
2445        struct ata_host *host = dev_get_drvdata(&pdev->dev);
2446        struct nv_host_priv *hpriv = host->private_data;
2447        int rc;
2448
2449        rc = ata_pci_device_do_resume(pdev);
2450        if (rc)
2451                return rc;
2452
2453        if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2454                if (hpriv->type >= CK804) {
2455                        u8 regval;
2456
2457                        pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2458                        regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2459                        pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2460                }
2461                if (hpriv->type == ADMA) {
2462                        u32 tmp32;
2463                        struct nv_adma_port_priv *pp;
2464                        /* enable/disable ADMA on the ports appropriately */
2465                        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2466
2467                        pp = host->ports[0]->private_data;
2468                        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2469                                tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2470                                           NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2471                        else
2472                                tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2473                                           NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2474                        pp = host->ports[1]->private_data;
2475                        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2476                                tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2477                                           NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2478                        else
2479                                tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2480                                           NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2481
2482                        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2483                }
2484        }
2485
2486        ata_host_resume(host);
2487
2488        return 0;
2489}
2490#endif
2491
2492static void nv_ck804_host_stop(struct ata_host *host)
2493{
2494        struct pci_dev *pdev = to_pci_dev(host->dev);
2495        u8 regval;
2496
2497        /* disable SATA space for CK804 */
2498        pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2499        regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2500        pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2501}
2502
2503static void nv_adma_host_stop(struct ata_host *host)
2504{
2505        struct pci_dev *pdev = to_pci_dev(host->dev);
2506        u32 tmp32;
2507
2508        /* disable ADMA on the ports */
2509        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2510        tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2511                   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2512                   NV_MCP_SATA_CFG_20_PORT1_EN |
2513                   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2514
2515        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2516
2517        nv_ck804_host_stop(host);
2518}
2519
2520static int __init nv_init(void)
2521{
2522        return pci_register_driver(&nv_pci_driver);
2523}
2524
2525static void __exit nv_exit(void)
2526{
2527        pci_unregister_driver(&nv_pci_driver);
2528}
2529
2530module_init(nv_init);
2531module_exit(nv_exit);
2532module_param_named(adma, adma_enabled, bool, 0444);
2533MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2534module_param_named(swncq, swncq_enabled, bool, 0444);
2535MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2536module_param_named(msi, msi_enabled, bool, 0444);
2537MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2538
2539