linux/drivers/ata/sata_nv.c
<<
>>
Prefs
   1/*
   2 *  sata_nv.c - NVIDIA nForce SATA
   3 *
   4 *  Copyright 2004 NVIDIA Corp.  All rights reserved.
   5 *  Copyright 2004 Andrew Chew
   6 *
   7 *
   8 *  This program is free software; you can redistribute it and/or modify
   9 *  it under the terms of the GNU General Public License as published by
  10 *  the Free Software Foundation; either version 2, or (at your option)
  11 *  any later version.
  12 *
  13 *  This program is distributed in the hope that it will be useful,
  14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 *  GNU General Public License for more details.
  17 *
  18 *  You should have received a copy of the GNU General Public License
  19 *  along with this program; see the file COPYING.  If not, write to
  20 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  21 *
  22 *
  23 *  libata documentation is available via 'make {ps|pdf}docs',
  24 *  as Documentation/DocBook/libata.*
  25 *
  26 *  No hardware documentation available outside of NVIDIA.
  27 *  This driver programs the NVIDIA SATA controller in a similar
  28 *  fashion as with other PCI IDE BMDMA controllers, with a few
  29 *  NV-specific details such as register offsets, SATA phy location,
  30 *  hotplug info, etc.
  31 *
  32 *  CK804/MCP04 controllers support an alternate programming interface
  33 *  similar to the ADMA specification (with some modifications).
  34 *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
  35 *  sent through the legacy interface.
  36 *
  37 */
  38
  39#include <linux/kernel.h>
  40#include <linux/module.h>
  41#include <linux/pci.h>
  42#include <linux/init.h>
  43#include <linux/blkdev.h>
  44#include <linux/delay.h>
  45#include <linux/interrupt.h>
  46#include <linux/device.h>
  47#include <scsi/scsi_host.h>
  48#include <scsi/scsi_device.h>
  49#include <linux/libata.h>
  50
  51#define DRV_NAME                        "sata_nv"
  52#define DRV_VERSION                     "3.5"
  53
  54#define NV_ADMA_DMA_BOUNDARY            0xffffffffUL
  55
  56enum {
  57        NV_MMIO_BAR                     = 5,
  58
  59        NV_PORTS                        = 2,
  60        NV_PIO_MASK                     = ATA_PIO4,
  61        NV_MWDMA_MASK                   = ATA_MWDMA2,
  62        NV_UDMA_MASK                    = ATA_UDMA6,
  63        NV_PORT0_SCR_REG_OFFSET         = 0x00,
  64        NV_PORT1_SCR_REG_OFFSET         = 0x40,
  65
  66        /* INT_STATUS/ENABLE */
  67        NV_INT_STATUS                   = 0x10,
  68        NV_INT_ENABLE                   = 0x11,
  69        NV_INT_STATUS_CK804             = 0x440,
  70        NV_INT_ENABLE_CK804             = 0x441,
  71
  72        /* INT_STATUS/ENABLE bits */
  73        NV_INT_DEV                      = 0x01,
  74        NV_INT_PM                       = 0x02,
  75        NV_INT_ADDED                    = 0x04,
  76        NV_INT_REMOVED                  = 0x08,
  77
  78        NV_INT_PORT_SHIFT               = 4,    /* each port occupies 4 bits */
  79
  80        NV_INT_ALL                      = 0x0f,
  81        NV_INT_MASK                     = NV_INT_DEV |
  82                                          NV_INT_ADDED | NV_INT_REMOVED,
  83
  84        /* INT_CONFIG */
  85        NV_INT_CONFIG                   = 0x12,
  86        NV_INT_CONFIG_METHD             = 0x01, // 0 = INT, 1 = SMI
  87
  88        // For PCI config register 20
  89        NV_MCP_SATA_CFG_20              = 0x50,
  90        NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
  91        NV_MCP_SATA_CFG_20_PORT0_EN     = (1 << 17),
  92        NV_MCP_SATA_CFG_20_PORT1_EN     = (1 << 16),
  93        NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
  94        NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
  95
  96        NV_ADMA_MAX_CPBS                = 32,
  97        NV_ADMA_CPB_SZ                  = 128,
  98        NV_ADMA_APRD_SZ                 = 16,
  99        NV_ADMA_SGTBL_LEN               = (1024 - NV_ADMA_CPB_SZ) /
 100                                           NV_ADMA_APRD_SZ,
 101        NV_ADMA_SGTBL_TOTAL_LEN         = NV_ADMA_SGTBL_LEN + 5,
 102        NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
 103        NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
 104                                           (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
 105
 106        /* BAR5 offset to ADMA general registers */
 107        NV_ADMA_GEN                     = 0x400,
 108        NV_ADMA_GEN_CTL                 = 0x00,
 109        NV_ADMA_NOTIFIER_CLEAR          = 0x30,
 110
 111        /* BAR5 offset to ADMA ports */
 112        NV_ADMA_PORT                    = 0x480,
 113
 114        /* size of ADMA port register space  */
 115        NV_ADMA_PORT_SIZE               = 0x100,
 116
 117        /* ADMA port registers */
 118        NV_ADMA_CTL                     = 0x40,
 119        NV_ADMA_CPB_COUNT               = 0x42,
 120        NV_ADMA_NEXT_CPB_IDX            = 0x43,
 121        NV_ADMA_STAT                    = 0x44,
 122        NV_ADMA_CPB_BASE_LOW            = 0x48,
 123        NV_ADMA_CPB_BASE_HIGH           = 0x4C,
 124        NV_ADMA_APPEND                  = 0x50,
 125        NV_ADMA_NOTIFIER                = 0x68,
 126        NV_ADMA_NOTIFIER_ERROR          = 0x6C,
 127
 128        /* NV_ADMA_CTL register bits */
 129        NV_ADMA_CTL_HOTPLUG_IEN         = (1 << 0),
 130        NV_ADMA_CTL_CHANNEL_RESET       = (1 << 5),
 131        NV_ADMA_CTL_GO                  = (1 << 7),
 132        NV_ADMA_CTL_AIEN                = (1 << 8),
 133        NV_ADMA_CTL_READ_NON_COHERENT   = (1 << 11),
 134        NV_ADMA_CTL_WRITE_NON_COHERENT  = (1 << 12),
 135
 136        /* CPB response flag bits */
 137        NV_CPB_RESP_DONE                = (1 << 0),
 138        NV_CPB_RESP_ATA_ERR             = (1 << 3),
 139        NV_CPB_RESP_CMD_ERR             = (1 << 4),
 140        NV_CPB_RESP_CPB_ERR             = (1 << 7),
 141
 142        /* CPB control flag bits */
 143        NV_CPB_CTL_CPB_VALID            = (1 << 0),
 144        NV_CPB_CTL_QUEUE                = (1 << 1),
 145        NV_CPB_CTL_APRD_VALID           = (1 << 2),
 146        NV_CPB_CTL_IEN                  = (1 << 3),
 147        NV_CPB_CTL_FPDMA                = (1 << 4),
 148
 149        /* APRD flags */
 150        NV_APRD_WRITE                   = (1 << 1),
 151        NV_APRD_END                     = (1 << 2),
 152        NV_APRD_CONT                    = (1 << 3),
 153
 154        /* NV_ADMA_STAT flags */
 155        NV_ADMA_STAT_TIMEOUT            = (1 << 0),
 156        NV_ADMA_STAT_HOTUNPLUG          = (1 << 1),
 157        NV_ADMA_STAT_HOTPLUG            = (1 << 2),
 158        NV_ADMA_STAT_CPBERR             = (1 << 4),
 159        NV_ADMA_STAT_SERROR             = (1 << 5),
 160        NV_ADMA_STAT_CMD_COMPLETE       = (1 << 6),
 161        NV_ADMA_STAT_IDLE               = (1 << 8),
 162        NV_ADMA_STAT_LEGACY             = (1 << 9),
 163        NV_ADMA_STAT_STOPPED            = (1 << 10),
 164        NV_ADMA_STAT_DONE               = (1 << 12),
 165        NV_ADMA_STAT_ERR                = NV_ADMA_STAT_CPBERR |
 166                                          NV_ADMA_STAT_TIMEOUT,
 167
 168        /* port flags */
 169        NV_ADMA_PORT_REGISTER_MODE      = (1 << 0),
 170        NV_ADMA_ATAPI_SETUP_COMPLETE    = (1 << 1),
 171
 172        /* MCP55 reg offset */
 173        NV_CTL_MCP55                    = 0x400,
 174        NV_INT_STATUS_MCP55             = 0x440,
 175        NV_INT_ENABLE_MCP55             = 0x444,
 176        NV_NCQ_REG_MCP55                = 0x448,
 177
 178        /* MCP55 */
 179        NV_INT_ALL_MCP55                = 0xffff,
 180        NV_INT_PORT_SHIFT_MCP55         = 16,   /* each port occupies 16 bits */
 181        NV_INT_MASK_MCP55               = NV_INT_ALL_MCP55 & 0xfffd,
 182
 183        /* SWNCQ ENABLE BITS*/
 184        NV_CTL_PRI_SWNCQ                = 0x02,
 185        NV_CTL_SEC_SWNCQ                = 0x04,
 186
 187        /* SW NCQ status bits*/
 188        NV_SWNCQ_IRQ_DEV                = (1 << 0),
 189        NV_SWNCQ_IRQ_PM                 = (1 << 1),
 190        NV_SWNCQ_IRQ_ADDED              = (1 << 2),
 191        NV_SWNCQ_IRQ_REMOVED            = (1 << 3),
 192
 193        NV_SWNCQ_IRQ_BACKOUT            = (1 << 4),
 194        NV_SWNCQ_IRQ_SDBFIS             = (1 << 5),
 195        NV_SWNCQ_IRQ_DHREGFIS           = (1 << 6),
 196        NV_SWNCQ_IRQ_DMASETUP           = (1 << 7),
 197
 198        NV_SWNCQ_IRQ_HOTPLUG            = NV_SWNCQ_IRQ_ADDED |
 199                                          NV_SWNCQ_IRQ_REMOVED,
 200
 201};
 202
 203/* ADMA Physical Region Descriptor - one SG segment */
 204struct nv_adma_prd {
 205        __le64                  addr;
 206        __le32                  len;
 207        u8                      flags;
 208        u8                      packet_len;
 209        __le16                  reserved;
 210};
 211
 212enum nv_adma_regbits {
 213        CMDEND  = (1 << 15),            /* end of command list */
 214        WNB     = (1 << 14),            /* wait-not-BSY */
 215        IGN     = (1 << 13),            /* ignore this entry */
 216        CS1n    = (1 << (4 + 8)),       /* std. PATA signals follow... */
 217        DA2     = (1 << (2 + 8)),
 218        DA1     = (1 << (1 + 8)),
 219        DA0     = (1 << (0 + 8)),
 220};
 221
 222/* ADMA Command Parameter Block
 223   The first 5 SG segments are stored inside the Command Parameter Block itself.
 224   If there are more than 5 segments the remainder are stored in a separate
 225   memory area indicated by next_aprd. */
 226struct nv_adma_cpb {
 227        u8                      resp_flags;    /* 0 */
 228        u8                      reserved1;     /* 1 */
 229        u8                      ctl_flags;     /* 2 */
 230        /* len is length of taskfile in 64 bit words */
 231        u8                      len;            /* 3  */
 232        u8                      tag;           /* 4 */
 233        u8                      next_cpb_idx;  /* 5 */
 234        __le16                  reserved2;     /* 6-7 */
 235        __le16                  tf[12];        /* 8-31 */
 236        struct nv_adma_prd      aprd[5];       /* 32-111 */
 237        __le64                  next_aprd;     /* 112-119 */
 238        __le64                  reserved3;     /* 120-127 */
 239};
 240
 241
 242struct nv_adma_port_priv {
 243        struct nv_adma_cpb      *cpb;
 244        dma_addr_t              cpb_dma;
 245        struct nv_adma_prd      *aprd;
 246        dma_addr_t              aprd_dma;
 247        void __iomem            *ctl_block;
 248        void __iomem            *gen_block;
 249        void __iomem            *notifier_clear_block;
 250        u64                     adma_dma_mask;
 251        u8                      flags;
 252        int                     last_issue_ncq;
 253};
 254
 255struct nv_host_priv {
 256        unsigned long           type;
 257};
 258
 259struct defer_queue {
 260        u32             defer_bits;
 261        unsigned int    head;
 262        unsigned int    tail;
 263        unsigned int    tag[ATA_MAX_QUEUE];
 264};
 265
 266enum ncq_saw_flag_list {
 267        ncq_saw_d2h     = (1U << 0),
 268        ncq_saw_dmas    = (1U << 1),
 269        ncq_saw_sdb     = (1U << 2),
 270        ncq_saw_backout = (1U << 3),
 271};
 272
 273struct nv_swncq_port_priv {
 274        struct ata_prd  *prd;    /* our SG list */
 275        dma_addr_t      prd_dma; /* and its DMA mapping */
 276        void __iomem    *sactive_block;
 277        void __iomem    *irq_block;
 278        void __iomem    *tag_block;
 279        u32             qc_active;
 280
 281        unsigned int    last_issue_tag;
 282
 283        /* fifo circular queue to store deferral command */
 284        struct defer_queue defer_queue;
 285
 286        /* for NCQ interrupt analysis */
 287        u32             dhfis_bits;
 288        u32             dmafis_bits;
 289        u32             sdbfis_bits;
 290
 291        unsigned int    ncq_flags;
 292};
 293
 294
 295#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
 296
 297static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 298#ifdef CONFIG_PM
 299static int nv_pci_device_resume(struct pci_dev *pdev);
 300#endif
 301static void nv_ck804_host_stop(struct ata_host *host);
 302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
 303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
 304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
 305static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 306static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
 307
 308static int nv_hardreset(struct ata_link *link, unsigned int *class,
 309                        unsigned long deadline);
 310static void nv_nf2_freeze(struct ata_port *ap);
 311static void nv_nf2_thaw(struct ata_port *ap);
 312static void nv_ck804_freeze(struct ata_port *ap);
 313static void nv_ck804_thaw(struct ata_port *ap);
 314static int nv_adma_slave_config(struct scsi_device *sdev);
 315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
 316static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
 317static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
 318static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
 319static void nv_adma_irq_clear(struct ata_port *ap);
 320static int nv_adma_port_start(struct ata_port *ap);
 321static void nv_adma_port_stop(struct ata_port *ap);
 322#ifdef CONFIG_PM
 323static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
 324static int nv_adma_port_resume(struct ata_port *ap);
 325#endif
 326static void nv_adma_freeze(struct ata_port *ap);
 327static void nv_adma_thaw(struct ata_port *ap);
 328static void nv_adma_error_handler(struct ata_port *ap);
 329static void nv_adma_host_stop(struct ata_host *host);
 330static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
 331static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 332
 333static void nv_mcp55_thaw(struct ata_port *ap);
 334static void nv_mcp55_freeze(struct ata_port *ap);
 335static void nv_swncq_error_handler(struct ata_port *ap);
 336static int nv_swncq_slave_config(struct scsi_device *sdev);
 337static int nv_swncq_port_start(struct ata_port *ap);
 338static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
 339static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
 340static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
 341static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
 342static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
 343#ifdef CONFIG_PM
 344static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
 345static int nv_swncq_port_resume(struct ata_port *ap);
 346#endif
 347
 348enum nv_host_type
 349{
 350        GENERIC,
 351        NFORCE2,
 352        NFORCE3 = NFORCE2,      /* NF2 == NF3 as far as sata_nv is concerned */
 353        CK804,
 354        ADMA,
 355        MCP5x,
 356        SWNCQ,
 357};
 358
 359static const struct pci_device_id nv_pci_tbl[] = {
 360        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
 361        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
 362        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
 363        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
 364        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
 365        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
 366        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
 367        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
 368        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
 369        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
 370        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
 371        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
 372        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
 373        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
 374
 375        { } /* terminate list */
 376};
 377
 378static struct pci_driver nv_pci_driver = {
 379        .name                   = DRV_NAME,
 380        .id_table               = nv_pci_tbl,
 381        .probe                  = nv_init_one,
 382#ifdef CONFIG_PM
 383        .suspend                = ata_pci_device_suspend,
 384        .resume                 = nv_pci_device_resume,
 385#endif
 386        .remove                 = ata_pci_remove_one,
 387};
 388
 389static struct scsi_host_template nv_sht = {
 390        ATA_BMDMA_SHT(DRV_NAME),
 391};
 392
 393static struct scsi_host_template nv_adma_sht = {
 394        ATA_NCQ_SHT(DRV_NAME),
 395        .can_queue              = NV_ADMA_MAX_CPBS,
 396        .sg_tablesize           = NV_ADMA_SGTBL_TOTAL_LEN,
 397        .dma_boundary           = NV_ADMA_DMA_BOUNDARY,
 398        .slave_configure        = nv_adma_slave_config,
 399};
 400
 401static struct scsi_host_template nv_swncq_sht = {
 402        ATA_NCQ_SHT(DRV_NAME),
 403        .can_queue              = ATA_MAX_QUEUE,
 404        .sg_tablesize           = LIBATA_MAX_PRD,
 405        .dma_boundary           = ATA_DMA_BOUNDARY,
 406        .slave_configure        = nv_swncq_slave_config,
 407};
 408
 409/*
 410 * NV SATA controllers have various different problems with hardreset
 411 * protocol depending on the specific controller and device.
 412 *
 413 * GENERIC:
 414 *
 415 *  bko11195 reports that link doesn't come online after hardreset on
 416 *  generic nv's and there have been several other similar reports on
 417 *  linux-ide.
 418 *
 419 *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
 420 *  softreset.
 421 *
 422 * NF2/3:
 423 *
 424 *  bko3352 reports nf2/3 controllers can't determine device signature
 425 *  reliably after hardreset.  The following thread reports detection
 426 *  failure on cold boot with the standard debouncing timing.
 427 *
 428 *  http://thread.gmane.org/gmane.linux.ide/34098
 429 *
 430 *  bko12176 reports that hardreset fails to bring up the link during
 431 *  boot on nf2.
 432 *
 433 * CK804:
 434 *
 435 *  For initial probing after boot and hot plugging, hardreset mostly
 436 *  works fine on CK804 but curiously, reprobing on the initial port
 437 *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
 438 *  FIS in somewhat undeterministic way.
 439 *
 440 * SWNCQ:
 441 *
 442 *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
 443 *  hardreset should be used and hardreset can't report proper
 444 *  signature, which suggests that mcp5x is closer to nf2 as long as
 445 *  reset quirkiness is concerned.
 446 *
 447 *  bko12703 reports that boot probing fails for intel SSD with
 448 *  hardreset.  Link fails to come online.  Softreset works fine.
 449 *
 450 * The failures are varied but the following patterns seem true for
 451 * all flavors.
 452 *
 453 * - Softreset during boot always works.
 454 *
 455 * - Hardreset during boot sometimes fails to bring up the link on
 456 *   certain comibnations and device signature acquisition is
 457 *   unreliable.
 458 *
 459 * - Hardreset is often necessary after hotplug.
 460 *
 461 * So, preferring softreset for boot probing and error handling (as
 462 * hardreset might bring down the link) but using hardreset for
 463 * post-boot probing should work around the above issues in most
 464 * cases.  Define nv_hardreset() which only kicks in for post-boot
 465 * probing and use it for all variants.
 466 */
 467static struct ata_port_operations nv_generic_ops = {
 468        .inherits               = &ata_bmdma_port_ops,
 469        .lost_interrupt         = ATA_OP_NULL,
 470        .scr_read               = nv_scr_read,
 471        .scr_write              = nv_scr_write,
 472        .hardreset              = nv_hardreset,
 473};
 474
 475static struct ata_port_operations nv_nf2_ops = {
 476        .inherits               = &nv_generic_ops,
 477        .freeze                 = nv_nf2_freeze,
 478        .thaw                   = nv_nf2_thaw,
 479};
 480
 481static struct ata_port_operations nv_ck804_ops = {
 482        .inherits               = &nv_generic_ops,
 483        .freeze                 = nv_ck804_freeze,
 484        .thaw                   = nv_ck804_thaw,
 485        .host_stop              = nv_ck804_host_stop,
 486};
 487
 488static struct ata_port_operations nv_adma_ops = {
 489        .inherits               = &nv_ck804_ops,
 490
 491        .check_atapi_dma        = nv_adma_check_atapi_dma,
 492        .sff_tf_read            = nv_adma_tf_read,
 493        .qc_defer               = ata_std_qc_defer,
 494        .qc_prep                = nv_adma_qc_prep,
 495        .qc_issue               = nv_adma_qc_issue,
 496        .sff_irq_clear          = nv_adma_irq_clear,
 497
 498        .freeze                 = nv_adma_freeze,
 499        .thaw                   = nv_adma_thaw,
 500        .error_handler          = nv_adma_error_handler,
 501        .post_internal_cmd      = nv_adma_post_internal_cmd,
 502
 503        .port_start             = nv_adma_port_start,
 504        .port_stop              = nv_adma_port_stop,
 505#ifdef CONFIG_PM
 506        .port_suspend           = nv_adma_port_suspend,
 507        .port_resume            = nv_adma_port_resume,
 508#endif
 509        .host_stop              = nv_adma_host_stop,
 510};
 511
 512static struct ata_port_operations nv_swncq_ops = {
 513        .inherits               = &nv_generic_ops,
 514
 515        .qc_defer               = ata_std_qc_defer,
 516        .qc_prep                = nv_swncq_qc_prep,
 517        .qc_issue               = nv_swncq_qc_issue,
 518
 519        .freeze                 = nv_mcp55_freeze,
 520        .thaw                   = nv_mcp55_thaw,
 521        .error_handler          = nv_swncq_error_handler,
 522
 523#ifdef CONFIG_PM
 524        .port_suspend           = nv_swncq_port_suspend,
 525        .port_resume            = nv_swncq_port_resume,
 526#endif
 527        .port_start             = nv_swncq_port_start,
 528};
 529
 530struct nv_pi_priv {
 531        irq_handler_t                   irq_handler;
 532        struct scsi_host_template       *sht;
 533};
 534
 535#define NV_PI_PRIV(_irq_handler, _sht) \
 536        &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
 537
 538static const struct ata_port_info nv_port_info[] = {
 539        /* generic */
 540        {
 541                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
 542                .pio_mask       = NV_PIO_MASK,
 543                .mwdma_mask     = NV_MWDMA_MASK,
 544                .udma_mask      = NV_UDMA_MASK,
 545                .port_ops       = &nv_generic_ops,
 546                .private_data   = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 547        },
 548        /* nforce2/3 */
 549        {
 550                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
 551                .pio_mask       = NV_PIO_MASK,
 552                .mwdma_mask     = NV_MWDMA_MASK,
 553                .udma_mask      = NV_UDMA_MASK,
 554                .port_ops       = &nv_nf2_ops,
 555                .private_data   = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
 556        },
 557        /* ck804 */
 558        {
 559                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
 560                .pio_mask       = NV_PIO_MASK,
 561                .mwdma_mask     = NV_MWDMA_MASK,
 562                .udma_mask      = NV_UDMA_MASK,
 563                .port_ops       = &nv_ck804_ops,
 564                .private_data   = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
 565        },
 566        /* ADMA */
 567        {
 568                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 569                                  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
 570                .pio_mask       = NV_PIO_MASK,
 571                .mwdma_mask     = NV_MWDMA_MASK,
 572                .udma_mask      = NV_UDMA_MASK,
 573                .port_ops       = &nv_adma_ops,
 574                .private_data   = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
 575        },
 576        /* MCP5x */
 577        {
 578                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
 579                .pio_mask       = NV_PIO_MASK,
 580                .mwdma_mask     = NV_MWDMA_MASK,
 581                .udma_mask      = NV_UDMA_MASK,
 582                .port_ops       = &nv_generic_ops,
 583                .private_data   = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 584        },
 585        /* SWNCQ */
 586        {
 587                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 588                                  ATA_FLAG_NCQ,
 589                .pio_mask       = NV_PIO_MASK,
 590                .mwdma_mask     = NV_MWDMA_MASK,
 591                .udma_mask      = NV_UDMA_MASK,
 592                .port_ops       = &nv_swncq_ops,
 593                .private_data   = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
 594        },
 595};
 596
 597MODULE_AUTHOR("NVIDIA");
 598MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
 599MODULE_LICENSE("GPL");
 600MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
 601MODULE_VERSION(DRV_VERSION);
 602
 603static int adma_enabled;
 604static int swncq_enabled = 1;
 605static int msi_enabled;
 606
 607static void nv_adma_register_mode(struct ata_port *ap)
 608{
 609        struct nv_adma_port_priv *pp = ap->private_data;
 610        void __iomem *mmio = pp->ctl_block;
 611        u16 tmp, status;
 612        int count = 0;
 613
 614        if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
 615                return;
 616
 617        status = readw(mmio + NV_ADMA_STAT);
 618        while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
 619                ndelay(50);
 620                status = readw(mmio + NV_ADMA_STAT);
 621                count++;
 622        }
 623        if (count == 20)
 624                ata_port_printk(ap, KERN_WARNING,
 625                        "timeout waiting for ADMA IDLE, stat=0x%hx\n",
 626                        status);
 627
 628        tmp = readw(mmio + NV_ADMA_CTL);
 629        writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 630
 631        count = 0;
 632        status = readw(mmio + NV_ADMA_STAT);
 633        while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
 634                ndelay(50);
 635                status = readw(mmio + NV_ADMA_STAT);
 636                count++;
 637        }
 638        if (count == 20)
 639                ata_port_printk(ap, KERN_WARNING,
 640                         "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
 641                         status);
 642
 643        pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
 644}
 645
 646static void nv_adma_mode(struct ata_port *ap)
 647{
 648        struct nv_adma_port_priv *pp = ap->private_data;
 649        void __iomem *mmio = pp->ctl_block;
 650        u16 tmp, status;
 651        int count = 0;
 652
 653        if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
 654                return;
 655
 656        WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 657
 658        tmp = readw(mmio + NV_ADMA_CTL);
 659        writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 660
 661        status = readw(mmio + NV_ADMA_STAT);
 662        while (((status & NV_ADMA_STAT_LEGACY) ||
 663              !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
 664                ndelay(50);
 665                status = readw(mmio + NV_ADMA_STAT);
 666                count++;
 667        }
 668        if (count == 20)
 669                ata_port_printk(ap, KERN_WARNING,
 670                        "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
 671                        status);
 672
 673        pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
 674}
 675
 676static int nv_adma_slave_config(struct scsi_device *sdev)
 677{
 678        struct ata_port *ap = ata_shost_to_port(sdev->host);
 679        struct nv_adma_port_priv *pp = ap->private_data;
 680        struct nv_adma_port_priv *port0, *port1;
 681        struct scsi_device *sdev0, *sdev1;
 682        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 683        unsigned long segment_boundary, flags;
 684        unsigned short sg_tablesize;
 685        int rc;
 686        int adma_enable;
 687        u32 current_reg, new_reg, config_mask;
 688
 689        rc = ata_scsi_slave_config(sdev);
 690
 691        if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
 692                /* Not a proper libata device, ignore */
 693                return rc;
 694
 695        spin_lock_irqsave(ap->lock, flags);
 696
 697        if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
 698                /*
 699                 * NVIDIA reports that ADMA mode does not support ATAPI commands.
 700                 * Therefore ATAPI commands are sent through the legacy interface.
 701                 * However, the legacy interface only supports 32-bit DMA.
 702                 * Restrict DMA parameters as required by the legacy interface
 703                 * when an ATAPI device is connected.
 704                 */
 705                segment_boundary = ATA_DMA_BOUNDARY;
 706                /* Subtract 1 since an extra entry may be needed for padding, see
 707                   libata-scsi.c */
 708                sg_tablesize = LIBATA_MAX_PRD - 1;
 709
 710                /* Since the legacy DMA engine is in use, we need to disable ADMA
 711                   on the port. */
 712                adma_enable = 0;
 713                nv_adma_register_mode(ap);
 714        } else {
 715                segment_boundary = NV_ADMA_DMA_BOUNDARY;
 716                sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
 717                adma_enable = 1;
 718        }
 719
 720        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
 721
 722        if (ap->port_no == 1)
 723                config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
 724                              NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
 725        else
 726                config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
 727                              NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
 728
 729        if (adma_enable) {
 730                new_reg = current_reg | config_mask;
 731                pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
 732        } else {
 733                new_reg = current_reg & ~config_mask;
 734                pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
 735        }
 736
 737        if (current_reg != new_reg)
 738                pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
 739
 740        port0 = ap->host->ports[0]->private_data;
 741        port1 = ap->host->ports[1]->private_data;
 742        sdev0 = ap->host->ports[0]->link.device[0].sdev;
 743        sdev1 = ap->host->ports[1]->link.device[0].sdev;
 744        if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
 745            (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
 746                /** We have to set the DMA mask to 32-bit if either port is in
 747                    ATAPI mode, since they are on the same PCI device which is
 748                    used for DMA mapping. If we set the mask we also need to set
 749                    the bounce limit on both ports to ensure that the block
 750                    layer doesn't feed addresses that cause DMA mapping to
 751                    choke. If either SCSI device is not allocated yet, it's OK
 752                    since that port will discover its correct setting when it
 753                    does get allocated.
 754                    Note: Setting 32-bit mask should not fail. */
 755                if (sdev0)
 756                        blk_queue_bounce_limit(sdev0->request_queue,
 757                                               ATA_DMA_MASK);
 758                if (sdev1)
 759                        blk_queue_bounce_limit(sdev1->request_queue,
 760                                               ATA_DMA_MASK);
 761
 762                pci_set_dma_mask(pdev, ATA_DMA_MASK);
 763        } else {
 764                /** This shouldn't fail as it was set to this value before */
 765                pci_set_dma_mask(pdev, pp->adma_dma_mask);
 766                if (sdev0)
 767                        blk_queue_bounce_limit(sdev0->request_queue,
 768                                               pp->adma_dma_mask);
 769                if (sdev1)
 770                        blk_queue_bounce_limit(sdev1->request_queue,
 771                                               pp->adma_dma_mask);
 772        }
 773
 774        blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
 775        blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
 776        ata_port_printk(ap, KERN_INFO,
 777                "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
 778                (unsigned long long)*ap->host->dev->dma_mask,
 779                segment_boundary, sg_tablesize);
 780
 781        spin_unlock_irqrestore(ap->lock, flags);
 782
 783        return rc;
 784}
 785
 786static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
 787{
 788        struct nv_adma_port_priv *pp = qc->ap->private_data;
 789        return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 790}
 791
 792static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 793{
 794        /* Other than when internal or pass-through commands are executed,
 795           the only time this function will be called in ADMA mode will be
 796           if a command fails. In the failure case we don't care about going
 797           into register mode with ADMA commands pending, as the commands will
 798           all shortly be aborted anyway. We assume that NCQ commands are not
 799           issued via passthrough, which is the only way that switching into
 800           ADMA mode could abort outstanding commands. */
 801        nv_adma_register_mode(ap);
 802
 803        ata_sff_tf_read(ap, tf);
 804}
 805
 806static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
 807{
 808        unsigned int idx = 0;
 809
 810        if (tf->flags & ATA_TFLAG_ISADDR) {
 811                if (tf->flags & ATA_TFLAG_LBA48) {
 812                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
 813                        cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
 814                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
 815                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
 816                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
 817                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
 818                } else
 819                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
 820
 821                cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
 822                cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
 823                cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
 824                cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
 825        }
 826
 827        if (tf->flags & ATA_TFLAG_DEVICE)
 828                cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
 829
 830        cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
 831
 832        while (idx < 12)
 833                cpb[idx++] = cpu_to_le16(IGN);
 834
 835        return idx;
 836}
 837
 838static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
 839{
 840        struct nv_adma_port_priv *pp = ap->private_data;
 841        u8 flags = pp->cpb[cpb_num].resp_flags;
 842
 843        VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
 844
 845        if (unlikely((force_err ||
 846                     flags & (NV_CPB_RESP_ATA_ERR |
 847                              NV_CPB_RESP_CMD_ERR |
 848                              NV_CPB_RESP_CPB_ERR)))) {
 849                struct ata_eh_info *ehi = &ap->link.eh_info;
 850                int freeze = 0;
 851
 852                ata_ehi_clear_desc(ehi);
 853                __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
 854                if (flags & NV_CPB_RESP_ATA_ERR) {
 855                        ata_ehi_push_desc(ehi, "ATA error");
 856                        ehi->err_mask |= AC_ERR_DEV;
 857                } else if (flags & NV_CPB_RESP_CMD_ERR) {
 858                        ata_ehi_push_desc(ehi, "CMD error");
 859                        ehi->err_mask |= AC_ERR_DEV;
 860                } else if (flags & NV_CPB_RESP_CPB_ERR) {
 861                        ata_ehi_push_desc(ehi, "CPB error");
 862                        ehi->err_mask |= AC_ERR_SYSTEM;
 863                        freeze = 1;
 864                } else {
 865                        /* notifier error, but no error in CPB flags? */
 866                        ata_ehi_push_desc(ehi, "unknown");
 867                        ehi->err_mask |= AC_ERR_OTHER;
 868                        freeze = 1;
 869                }
 870                /* Kill all commands. EH will determine what actually failed. */
 871                if (freeze)
 872                        ata_port_freeze(ap);
 873                else
 874                        ata_port_abort(ap);
 875                return 1;
 876        }
 877
 878        if (likely(flags & NV_CPB_RESP_DONE)) {
 879                struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
 880                VPRINTK("CPB flags done, flags=0x%x\n", flags);
 881                if (likely(qc)) {
 882                        DPRINTK("Completing qc from tag %d\n", cpb_num);
 883                        ata_qc_complete(qc);
 884                } else {
 885                        struct ata_eh_info *ehi = &ap->link.eh_info;
 886                        /* Notifier bits set without a command may indicate the drive
 887                           is misbehaving. Raise host state machine violation on this
 888                           condition. */
 889                        ata_port_printk(ap, KERN_ERR,
 890                                        "notifier for tag %d with no cmd?\n",
 891                                        cpb_num);
 892                        ehi->err_mask |= AC_ERR_HSM;
 893                        ehi->action |= ATA_EH_RESET;
 894                        ata_port_freeze(ap);
 895                        return 1;
 896                }
 897        }
 898        return 0;
 899}
 900
 901static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
 902{
 903        struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
 904
 905        /* freeze if hotplugged */
 906        if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
 907                ata_port_freeze(ap);
 908                return 1;
 909        }
 910
 911        /* bail out if not our interrupt */
 912        if (!(irq_stat & NV_INT_DEV))
 913                return 0;
 914
 915        /* DEV interrupt w/ no active qc? */
 916        if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 917                ata_sff_check_status(ap);
 918                return 1;
 919        }
 920
 921        /* handle interrupt */
 922        return ata_sff_host_intr(ap, qc);
 923}
 924
 925static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
 926{
 927        struct ata_host *host = dev_instance;
 928        int i, handled = 0;
 929        u32 notifier_clears[2];
 930
 931        spin_lock(&host->lock);
 932
 933        for (i = 0; i < host->n_ports; i++) {
 934                struct ata_port *ap = host->ports[i];
 935                notifier_clears[i] = 0;
 936
 937                if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
 938                        struct nv_adma_port_priv *pp = ap->private_data;
 939                        void __iomem *mmio = pp->ctl_block;
 940                        u16 status;
 941                        u32 gen_ctl;
 942                        u32 notifier, notifier_error;
 943
 944                        /* if ADMA is disabled, use standard ata interrupt handler */
 945                        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
 946                                u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 947                                        >> (NV_INT_PORT_SHIFT * i);
 948                                handled += nv_host_intr(ap, irq_stat);
 949                                continue;
 950                        }
 951
 952                        /* if in ATA register mode, check for standard interrupts */
 953                        if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
 954                                u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 955                                        >> (NV_INT_PORT_SHIFT * i);
 956                                if (ata_tag_valid(ap->link.active_tag))
 957                                        /** NV_INT_DEV indication seems unreliable at times
 958                                            at least in ADMA mode. Force it on always when a
 959                                            command is active, to prevent losing interrupts. */
 960                                        irq_stat |= NV_INT_DEV;
 961                                handled += nv_host_intr(ap, irq_stat);
 962                        }
 963
 964                        notifier = readl(mmio + NV_ADMA_NOTIFIER);
 965                        notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
 966                        notifier_clears[i] = notifier | notifier_error;
 967
 968                        gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
 969
 970                        if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
 971                            !notifier_error)
 972                                /* Nothing to do */
 973                                continue;
 974
 975                        status = readw(mmio + NV_ADMA_STAT);
 976
 977                        /* Clear status. Ensure the controller sees the clearing before we start
 978                           looking at any of the CPB statuses, so that any CPB completions after
 979                           this point in the handler will raise another interrupt. */
 980                        writew(status, mmio + NV_ADMA_STAT);
 981                        readw(mmio + NV_ADMA_STAT); /* flush posted write */
 982                        rmb();
 983
 984                        handled++; /* irq handled if we got here */
 985
 986                        /* freeze if hotplugged or controller error */
 987                        if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
 988                                               NV_ADMA_STAT_HOTUNPLUG |
 989                                               NV_ADMA_STAT_TIMEOUT |
 990                                               NV_ADMA_STAT_SERROR))) {
 991                                struct ata_eh_info *ehi = &ap->link.eh_info;
 992
 993                                ata_ehi_clear_desc(ehi);
 994                                __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
 995                                if (status & NV_ADMA_STAT_TIMEOUT) {
 996                                        ehi->err_mask |= AC_ERR_SYSTEM;
 997                                        ata_ehi_push_desc(ehi, "timeout");
 998                                } else if (status & NV_ADMA_STAT_HOTPLUG) {
 999                                        ata_ehi_hotplugged(ehi);
1000                                        ata_ehi_push_desc(ehi, "hotplug");
1001                                } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1002                                        ata_ehi_hotplugged(ehi);
1003                                        ata_ehi_push_desc(ehi, "hot unplug");
1004                                } else if (status & NV_ADMA_STAT_SERROR) {
1005                                        /* let libata analyze SError and figure out the cause */
1006                                        ata_ehi_push_desc(ehi, "SError");
1007                                } else
1008                                        ata_ehi_push_desc(ehi, "unknown");
1009                                ata_port_freeze(ap);
1010                                continue;
1011                        }
1012
1013                        if (status & (NV_ADMA_STAT_DONE |
1014                                      NV_ADMA_STAT_CPBERR |
1015                                      NV_ADMA_STAT_CMD_COMPLETE)) {
1016                                u32 check_commands = notifier_clears[i];
1017                                int pos, error = 0;
1018
1019                                if (status & NV_ADMA_STAT_CPBERR) {
1020                                        /* Check all active commands */
1021                                        if (ata_tag_valid(ap->link.active_tag))
1022                                                check_commands = 1 <<
1023                                                        ap->link.active_tag;
1024                                        else
1025                                                check_commands = ap->
1026                                                        link.sactive;
1027                                }
1028
1029                                /** Check CPBs for completed commands */
1030                                while ((pos = ffs(check_commands)) && !error) {
1031                                        pos--;
1032                                        error = nv_adma_check_cpb(ap, pos,
1033                                                notifier_error & (1 << pos));
1034                                        check_commands &= ~(1 << pos);
1035                                }
1036                        }
1037                }
1038        }
1039
1040        if (notifier_clears[0] || notifier_clears[1]) {
1041                /* Note: Both notifier clear registers must be written
1042                   if either is set, even if one is zero, according to NVIDIA. */
1043                struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1044                writel(notifier_clears[0], pp->notifier_clear_block);
1045                pp = host->ports[1]->private_data;
1046                writel(notifier_clears[1], pp->notifier_clear_block);
1047        }
1048
1049        spin_unlock(&host->lock);
1050
1051        return IRQ_RETVAL(handled);
1052}
1053
1054static void nv_adma_freeze(struct ata_port *ap)
1055{
1056        struct nv_adma_port_priv *pp = ap->private_data;
1057        void __iomem *mmio = pp->ctl_block;
1058        u16 tmp;
1059
1060        nv_ck804_freeze(ap);
1061
1062        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1063                return;
1064
1065        /* clear any outstanding CK804 notifications */
1066        writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1067                ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1068
1069        /* Disable interrupt */
1070        tmp = readw(mmio + NV_ADMA_CTL);
1071        writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1072                mmio + NV_ADMA_CTL);
1073        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1074}
1075
1076static void nv_adma_thaw(struct ata_port *ap)
1077{
1078        struct nv_adma_port_priv *pp = ap->private_data;
1079        void __iomem *mmio = pp->ctl_block;
1080        u16 tmp;
1081
1082        nv_ck804_thaw(ap);
1083
1084        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1085                return;
1086
1087        /* Enable interrupt */
1088        tmp = readw(mmio + NV_ADMA_CTL);
1089        writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1090                mmio + NV_ADMA_CTL);
1091        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1092}
1093
1094static void nv_adma_irq_clear(struct ata_port *ap)
1095{
1096        struct nv_adma_port_priv *pp = ap->private_data;
1097        void __iomem *mmio = pp->ctl_block;
1098        u32 notifier_clears[2];
1099
1100        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1101                ata_sff_irq_clear(ap);
1102                return;
1103        }
1104
1105        /* clear any outstanding CK804 notifications */
1106        writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1107                ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1108
1109        /* clear ADMA status */
1110        writew(0xffff, mmio + NV_ADMA_STAT);
1111
1112        /* clear notifiers - note both ports need to be written with
1113           something even though we are only clearing on one */
1114        if (ap->port_no == 0) {
1115                notifier_clears[0] = 0xFFFFFFFF;
1116                notifier_clears[1] = 0;
1117        } else {
1118                notifier_clears[0] = 0;
1119                notifier_clears[1] = 0xFFFFFFFF;
1120        }
1121        pp = ap->host->ports[0]->private_data;
1122        writel(notifier_clears[0], pp->notifier_clear_block);
1123        pp = ap->host->ports[1]->private_data;
1124        writel(notifier_clears[1], pp->notifier_clear_block);
1125}
1126
1127static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1128{
1129        struct nv_adma_port_priv *pp = qc->ap->private_data;
1130
1131        if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1132                ata_sff_post_internal_cmd(qc);
1133}
1134
1135static int nv_adma_port_start(struct ata_port *ap)
1136{
1137        struct device *dev = ap->host->dev;
1138        struct nv_adma_port_priv *pp;
1139        int rc;
1140        void *mem;
1141        dma_addr_t mem_dma;
1142        void __iomem *mmio;
1143        struct pci_dev *pdev = to_pci_dev(dev);
1144        u16 tmp;
1145
1146        VPRINTK("ENTER\n");
1147
1148        /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1149           pad buffers */
1150        rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1151        if (rc)
1152                return rc;
1153        rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1154        if (rc)
1155                return rc;
1156
1157        rc = ata_port_start(ap);
1158        if (rc)
1159                return rc;
1160
1161        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1162        if (!pp)
1163                return -ENOMEM;
1164
1165        mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1166               ap->port_no * NV_ADMA_PORT_SIZE;
1167        pp->ctl_block = mmio;
1168        pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1169        pp->notifier_clear_block = pp->gen_block +
1170               NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1171
1172        /* Now that the legacy PRD and padding buffer are allocated we can
1173           safely raise the DMA mask to allocate the CPB/APRD table.
1174           These are allowed to fail since we store the value that ends up
1175           being used to set as the bounce limit in slave_config later if
1176           needed. */
1177        pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1178        pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1179        pp->adma_dma_mask = *dev->dma_mask;
1180
1181        mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1182                                  &mem_dma, GFP_KERNEL);
1183        if (!mem)
1184                return -ENOMEM;
1185        memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1186
1187        /*
1188         * First item in chunk of DMA memory:
1189         * 128-byte command parameter block (CPB)
1190         * one for each command tag
1191         */
1192        pp->cpb     = mem;
1193        pp->cpb_dma = mem_dma;
1194
1195        writel(mem_dma & 0xFFFFFFFF,    mmio + NV_ADMA_CPB_BASE_LOW);
1196        writel((mem_dma >> 16) >> 16,   mmio + NV_ADMA_CPB_BASE_HIGH);
1197
1198        mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1199        mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1200
1201        /*
1202         * Second item: block of ADMA_SGTBL_LEN s/g entries
1203         */
1204        pp->aprd = mem;
1205        pp->aprd_dma = mem_dma;
1206
1207        ap->private_data = pp;
1208
1209        /* clear any outstanding interrupt conditions */
1210        writew(0xffff, mmio + NV_ADMA_STAT);
1211
1212        /* initialize port variables */
1213        pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1214
1215        /* clear CPB fetch count */
1216        writew(0, mmio + NV_ADMA_CPB_COUNT);
1217
1218        /* clear GO for register mode, enable interrupt */
1219        tmp = readw(mmio + NV_ADMA_CTL);
1220        writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1221                NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1222
1223        tmp = readw(mmio + NV_ADMA_CTL);
1224        writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1225        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1226        udelay(1);
1227        writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1228        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1229
1230        return 0;
1231}
1232
1233static void nv_adma_port_stop(struct ata_port *ap)
1234{
1235        struct nv_adma_port_priv *pp = ap->private_data;
1236        void __iomem *mmio = pp->ctl_block;
1237
1238        VPRINTK("ENTER\n");
1239        writew(0, mmio + NV_ADMA_CTL);
1240}
1241
1242#ifdef CONFIG_PM
1243static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1244{
1245        struct nv_adma_port_priv *pp = ap->private_data;
1246        void __iomem *mmio = pp->ctl_block;
1247
1248        /* Go to register mode - clears GO */
1249        nv_adma_register_mode(ap);
1250
1251        /* clear CPB fetch count */
1252        writew(0, mmio + NV_ADMA_CPB_COUNT);
1253
1254        /* disable interrupt, shut down port */
1255        writew(0, mmio + NV_ADMA_CTL);
1256
1257        return 0;
1258}
1259
1260static int nv_adma_port_resume(struct ata_port *ap)
1261{
1262        struct nv_adma_port_priv *pp = ap->private_data;
1263        void __iomem *mmio = pp->ctl_block;
1264        u16 tmp;
1265
1266        /* set CPB block location */
1267        writel(pp->cpb_dma & 0xFFFFFFFF,        mmio + NV_ADMA_CPB_BASE_LOW);
1268        writel((pp->cpb_dma >> 16) >> 16,       mmio + NV_ADMA_CPB_BASE_HIGH);
1269
1270        /* clear any outstanding interrupt conditions */
1271        writew(0xffff, mmio + NV_ADMA_STAT);
1272
1273        /* initialize port variables */
1274        pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1275
1276        /* clear CPB fetch count */
1277        writew(0, mmio + NV_ADMA_CPB_COUNT);
1278
1279        /* clear GO for register mode, enable interrupt */
1280        tmp = readw(mmio + NV_ADMA_CTL);
1281        writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1282                NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1283
1284        tmp = readw(mmio + NV_ADMA_CTL);
1285        writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1286        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1287        udelay(1);
1288        writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1289        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1290
1291        return 0;
1292}
1293#endif
1294
1295static void nv_adma_setup_port(struct ata_port *ap)
1296{
1297        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1298        struct ata_ioports *ioport = &ap->ioaddr;
1299
1300        VPRINTK("ENTER\n");
1301
1302        mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1303
1304        ioport->cmd_addr        = mmio;
1305        ioport->data_addr       = mmio + (ATA_REG_DATA * 4);
1306        ioport->error_addr      =
1307        ioport->feature_addr    = mmio + (ATA_REG_ERR * 4);
1308        ioport->nsect_addr      = mmio + (ATA_REG_NSECT * 4);
1309        ioport->lbal_addr       = mmio + (ATA_REG_LBAL * 4);
1310        ioport->lbam_addr       = mmio + (ATA_REG_LBAM * 4);
1311        ioport->lbah_addr       = mmio + (ATA_REG_LBAH * 4);
1312        ioport->device_addr     = mmio + (ATA_REG_DEVICE * 4);
1313        ioport->status_addr     =
1314        ioport->command_addr    = mmio + (ATA_REG_STATUS * 4);
1315        ioport->altstatus_addr  =
1316        ioport->ctl_addr        = mmio + 0x20;
1317}
1318
1319static int nv_adma_host_init(struct ata_host *host)
1320{
1321        struct pci_dev *pdev = to_pci_dev(host->dev);
1322        unsigned int i;
1323        u32 tmp32;
1324
1325        VPRINTK("ENTER\n");
1326
1327        /* enable ADMA on the ports */
1328        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1329        tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1330                 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1331                 NV_MCP_SATA_CFG_20_PORT1_EN |
1332                 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1333
1334        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1335
1336        for (i = 0; i < host->n_ports; i++)
1337                nv_adma_setup_port(host->ports[i]);
1338
1339        return 0;
1340}
1341
1342static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1343                              struct scatterlist *sg,
1344                              int idx,
1345                              struct nv_adma_prd *aprd)
1346{
1347        u8 flags = 0;
1348        if (qc->tf.flags & ATA_TFLAG_WRITE)
1349                flags |= NV_APRD_WRITE;
1350        if (idx == qc->n_elem - 1)
1351                flags |= NV_APRD_END;
1352        else if (idx != 4)
1353                flags |= NV_APRD_CONT;
1354
1355        aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1356        aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1357        aprd->flags = flags;
1358        aprd->packet_len = 0;
1359}
1360
1361static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1362{
1363        struct nv_adma_port_priv *pp = qc->ap->private_data;
1364        struct nv_adma_prd *aprd;
1365        struct scatterlist *sg;
1366        unsigned int si;
1367
1368        VPRINTK("ENTER\n");
1369
1370        for_each_sg(qc->sg, sg, qc->n_elem, si) {
1371                aprd = (si < 5) ? &cpb->aprd[si] :
1372                               &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1373                nv_adma_fill_aprd(qc, sg, si, aprd);
1374        }
1375        if (si > 5)
1376                cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1377        else
1378                cpb->next_aprd = cpu_to_le64(0);
1379}
1380
1381static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1382{
1383        struct nv_adma_port_priv *pp = qc->ap->private_data;
1384
1385        /* ADMA engine can only be used for non-ATAPI DMA commands,
1386           or interrupt-driven no-data commands. */
1387        if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1388           (qc->tf.flags & ATA_TFLAG_POLLING))
1389                return 1;
1390
1391        if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1392           (qc->tf.protocol == ATA_PROT_NODATA))
1393                return 0;
1394
1395        return 1;
1396}
1397
1398static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1399{
1400        struct nv_adma_port_priv *pp = qc->ap->private_data;
1401        struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1402        u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1403                       NV_CPB_CTL_IEN;
1404
1405        if (nv_adma_use_reg_mode(qc)) {
1406                BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1407                        (qc->flags & ATA_QCFLAG_DMAMAP));
1408                nv_adma_register_mode(qc->ap);
1409                ata_sff_qc_prep(qc);
1410                return;
1411        }
1412
1413        cpb->resp_flags = NV_CPB_RESP_DONE;
1414        wmb();
1415        cpb->ctl_flags = 0;
1416        wmb();
1417
1418        cpb->len                = 3;
1419        cpb->tag                = qc->tag;
1420        cpb->next_cpb_idx       = 0;
1421
1422        /* turn on NCQ flags for NCQ commands */
1423        if (qc->tf.protocol == ATA_PROT_NCQ)
1424                ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1425
1426        VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1427
1428        nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1429
1430        if (qc->flags & ATA_QCFLAG_DMAMAP) {
1431                nv_adma_fill_sg(qc, cpb);
1432                ctl_flags |= NV_CPB_CTL_APRD_VALID;
1433        } else
1434                memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1435
1436        /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1437           until we are finished filling in all of the contents */
1438        wmb();
1439        cpb->ctl_flags = ctl_flags;
1440        wmb();
1441        cpb->resp_flags = 0;
1442}
1443
1444static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1445{
1446        struct nv_adma_port_priv *pp = qc->ap->private_data;
1447        void __iomem *mmio = pp->ctl_block;
1448        int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1449
1450        VPRINTK("ENTER\n");
1451
1452        /* We can't handle result taskfile with NCQ commands, since
1453           retrieving the taskfile switches us out of ADMA mode and would abort
1454           existing commands. */
1455        if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1456                     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1457                ata_dev_printk(qc->dev, KERN_ERR,
1458                        "NCQ w/ RESULT_TF not allowed\n");
1459                return AC_ERR_SYSTEM;
1460        }
1461
1462        if (nv_adma_use_reg_mode(qc)) {
1463                /* use ATA register mode */
1464                VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1465                BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1466                        (qc->flags & ATA_QCFLAG_DMAMAP));
1467                nv_adma_register_mode(qc->ap);
1468                return ata_sff_qc_issue(qc);
1469        } else
1470                nv_adma_mode(qc->ap);
1471
1472        /* write append register, command tag in lower 8 bits
1473           and (number of cpbs to append -1) in top 8 bits */
1474        wmb();
1475
1476        if (curr_ncq != pp->last_issue_ncq) {
1477                /* Seems to need some delay before switching between NCQ and
1478                   non-NCQ commands, else we get command timeouts and such. */
1479                udelay(20);
1480                pp->last_issue_ncq = curr_ncq;
1481        }
1482
1483        writew(qc->tag, mmio + NV_ADMA_APPEND);
1484
1485        DPRINTK("Issued tag %u\n", qc->tag);
1486
1487        return 0;
1488}
1489
1490static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1491{
1492        struct ata_host *host = dev_instance;
1493        unsigned int i;
1494        unsigned int handled = 0;
1495        unsigned long flags;
1496
1497        spin_lock_irqsave(&host->lock, flags);
1498
1499        for (i = 0; i < host->n_ports; i++) {
1500                struct ata_port *ap;
1501
1502                ap = host->ports[i];
1503                if (ap &&
1504                    !(ap->flags & ATA_FLAG_DISABLED)) {
1505                        struct ata_queued_cmd *qc;
1506
1507                        qc = ata_qc_from_tag(ap, ap->link.active_tag);
1508                        if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1509                                handled += ata_sff_host_intr(ap, qc);
1510                        else
1511                                // No request pending?  Clear interrupt status
1512                                // anyway, in case there's one pending.
1513                                ap->ops->sff_check_status(ap);
1514                }
1515
1516        }
1517
1518        spin_unlock_irqrestore(&host->lock, flags);
1519
1520        return IRQ_RETVAL(handled);
1521}
1522
1523static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1524{
1525        int i, handled = 0;
1526
1527        for (i = 0; i < host->n_ports; i++) {
1528                struct ata_port *ap = host->ports[i];
1529
1530                if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1531                        handled += nv_host_intr(ap, irq_stat);
1532
1533                irq_stat >>= NV_INT_PORT_SHIFT;
1534        }
1535
1536        return IRQ_RETVAL(handled);
1537}
1538
1539static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1540{
1541        struct ata_host *host = dev_instance;
1542        u8 irq_stat;
1543        irqreturn_t ret;
1544
1545        spin_lock(&host->lock);
1546        irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1547        ret = nv_do_interrupt(host, irq_stat);
1548        spin_unlock(&host->lock);
1549
1550        return ret;
1551}
1552
1553static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1554{
1555        struct ata_host *host = dev_instance;
1556        u8 irq_stat;
1557        irqreturn_t ret;
1558
1559        spin_lock(&host->lock);
1560        irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1561        ret = nv_do_interrupt(host, irq_stat);
1562        spin_unlock(&host->lock);
1563
1564        return ret;
1565}
1566
1567static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1568{
1569        if (sc_reg > SCR_CONTROL)
1570                return -EINVAL;
1571
1572        *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1573        return 0;
1574}
1575
1576static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1577{
1578        if (sc_reg > SCR_CONTROL)
1579                return -EINVAL;
1580
1581        iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1582        return 0;
1583}
1584
1585static int nv_hardreset(struct ata_link *link, unsigned int *class,
1586                        unsigned long deadline)
1587{
1588        struct ata_eh_context *ehc = &link->eh_context;
1589
1590        /* Do hardreset iff it's post-boot probing, please read the
1591         * comment above port ops for details.
1592         */
1593        if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1594            !ata_dev_enabled(link->device))
1595                sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1596                                    NULL, NULL);
1597        else {
1598                const unsigned long *timing = sata_ehc_deb_timing(ehc);
1599                int rc;
1600
1601                if (!(ehc->i.flags & ATA_EHI_QUIET))
1602                        ata_link_printk(link, KERN_INFO, "nv: skipping "
1603                                        "hardreset on occupied port\n");
1604
1605                /* make sure the link is online */
1606                rc = sata_link_resume(link, timing, deadline);
1607                /* whine about phy resume failure but proceed */
1608                if (rc && rc != -EOPNOTSUPP)
1609                        ata_link_printk(link, KERN_WARNING, "failed to resume "
1610                                        "link (errno=%d)\n", rc);
1611        }
1612
1613        /* device signature acquisition is unreliable */
1614        return -EAGAIN;
1615}
1616
1617static void nv_nf2_freeze(struct ata_port *ap)
1618{
1619        void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1620        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1621        u8 mask;
1622
1623        mask = ioread8(scr_addr + NV_INT_ENABLE);
1624        mask &= ~(NV_INT_ALL << shift);
1625        iowrite8(mask, scr_addr + NV_INT_ENABLE);
1626}
1627
1628static void nv_nf2_thaw(struct ata_port *ap)
1629{
1630        void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1631        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1632        u8 mask;
1633
1634        iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1635
1636        mask = ioread8(scr_addr + NV_INT_ENABLE);
1637        mask |= (NV_INT_MASK << shift);
1638        iowrite8(mask, scr_addr + NV_INT_ENABLE);
1639}
1640
1641static void nv_ck804_freeze(struct ata_port *ap)
1642{
1643        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1644        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1645        u8 mask;
1646
1647        mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1648        mask &= ~(NV_INT_ALL << shift);
1649        writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1650}
1651
1652static void nv_ck804_thaw(struct ata_port *ap)
1653{
1654        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1655        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1656        u8 mask;
1657
1658        writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1659
1660        mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1661        mask |= (NV_INT_MASK << shift);
1662        writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1663}
1664
1665static void nv_mcp55_freeze(struct ata_port *ap)
1666{
1667        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1668        int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1669        u32 mask;
1670
1671        writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1672
1673        mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1674        mask &= ~(NV_INT_ALL_MCP55 << shift);
1675        writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1676        ata_sff_freeze(ap);
1677}
1678
1679static void nv_mcp55_thaw(struct ata_port *ap)
1680{
1681        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1682        int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1683        u32 mask;
1684
1685        writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1686
1687        mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1688        mask |= (NV_INT_MASK_MCP55 << shift);
1689        writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1690        ata_sff_thaw(ap);
1691}
1692
1693static void nv_adma_error_handler(struct ata_port *ap)
1694{
1695        struct nv_adma_port_priv *pp = ap->private_data;
1696        if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1697                void __iomem *mmio = pp->ctl_block;
1698                int i;
1699                u16 tmp;
1700
1701                if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1702                        u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1703                        u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1704                        u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1705                        u32 status = readw(mmio + NV_ADMA_STAT);
1706                        u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1707                        u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1708
1709                        ata_port_printk(ap, KERN_ERR,
1710                                "EH in ADMA mode, notifier 0x%X "
1711                                "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1712                                "next cpb count 0x%X next cpb idx 0x%x\n",
1713                                notifier, notifier_error, gen_ctl, status,
1714                                cpb_count, next_cpb_idx);
1715
1716                        for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1717                                struct nv_adma_cpb *cpb = &pp->cpb[i];
1718                                if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1719                                    ap->link.sactive & (1 << i))
1720                                        ata_port_printk(ap, KERN_ERR,
1721                                                "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1722                                                i, cpb->ctl_flags, cpb->resp_flags);
1723                        }
1724                }
1725
1726                /* Push us back into port register mode for error handling. */
1727                nv_adma_register_mode(ap);
1728
1729                /* Mark all of the CPBs as invalid to prevent them from
1730                   being executed */
1731                for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1732                        pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1733
1734                /* clear CPB fetch count */
1735                writew(0, mmio + NV_ADMA_CPB_COUNT);
1736
1737                /* Reset channel */
1738                tmp = readw(mmio + NV_ADMA_CTL);
1739                writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1740                readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1741                udelay(1);
1742                writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1743                readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1744        }
1745
1746        ata_sff_error_handler(ap);
1747}
1748
1749static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1750{
1751        struct nv_swncq_port_priv *pp = ap->private_data;
1752        struct defer_queue *dq = &pp->defer_queue;
1753
1754        /* queue is full */
1755        WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1756        dq->defer_bits |= (1 << qc->tag);
1757        dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1758}
1759
1760static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1761{
1762        struct nv_swncq_port_priv *pp = ap->private_data;
1763        struct defer_queue *dq = &pp->defer_queue;
1764        unsigned int tag;
1765
1766        if (dq->head == dq->tail)       /* null queue */
1767                return NULL;
1768
1769        tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1770        dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1771        WARN_ON(!(dq->defer_bits & (1 << tag)));
1772        dq->defer_bits &= ~(1 << tag);
1773
1774        return ata_qc_from_tag(ap, tag);
1775}
1776
1777static void nv_swncq_fis_reinit(struct ata_port *ap)
1778{
1779        struct nv_swncq_port_priv *pp = ap->private_data;
1780
1781        pp->dhfis_bits = 0;
1782        pp->dmafis_bits = 0;
1783        pp->sdbfis_bits = 0;
1784        pp->ncq_flags = 0;
1785}
1786
1787static void nv_swncq_pp_reinit(struct ata_port *ap)
1788{
1789        struct nv_swncq_port_priv *pp = ap->private_data;
1790        struct defer_queue *dq = &pp->defer_queue;
1791
1792        dq->head = 0;
1793        dq->tail = 0;
1794        dq->defer_bits = 0;
1795        pp->qc_active = 0;
1796        pp->last_issue_tag = ATA_TAG_POISON;
1797        nv_swncq_fis_reinit(ap);
1798}
1799
1800static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1801{
1802        struct nv_swncq_port_priv *pp = ap->private_data;
1803
1804        writew(fis, pp->irq_block);
1805}
1806
1807static void __ata_bmdma_stop(struct ata_port *ap)
1808{
1809        struct ata_queued_cmd qc;
1810
1811        qc.ap = ap;
1812        ata_bmdma_stop(&qc);
1813}
1814
1815static void nv_swncq_ncq_stop(struct ata_port *ap)
1816{
1817        struct nv_swncq_port_priv *pp = ap->private_data;
1818        unsigned int i;
1819        u32 sactive;
1820        u32 done_mask;
1821
1822        ata_port_printk(ap, KERN_ERR,
1823                        "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1824                        ap->qc_active, ap->link.sactive);
1825        ata_port_printk(ap, KERN_ERR,
1826                "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1827                "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1828                pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1829                pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1830
1831        ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1832                        ap->ops->sff_check_status(ap),
1833                        ioread8(ap->ioaddr.error_addr));
1834
1835        sactive = readl(pp->sactive_block);
1836        done_mask = pp->qc_active ^ sactive;
1837
1838        ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1839        for (i = 0; i < ATA_MAX_QUEUE; i++) {
1840                u8 err = 0;
1841                if (pp->qc_active & (1 << i))
1842                        err = 0;
1843                else if (done_mask & (1 << i))
1844                        err = 1;
1845                else
1846                        continue;
1847
1848                ata_port_printk(ap, KERN_ERR,
1849                                "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1850                                (pp->dhfis_bits >> i) & 0x1,
1851                                (pp->dmafis_bits >> i) & 0x1,
1852                                (pp->sdbfis_bits >> i) & 0x1,
1853                                (sactive >> i) & 0x1,
1854                                (err ? "error! tag doesn't exit" : " "));
1855        }
1856
1857        nv_swncq_pp_reinit(ap);
1858        ap->ops->sff_irq_clear(ap);
1859        __ata_bmdma_stop(ap);
1860        nv_swncq_irq_clear(ap, 0xffff);
1861}
1862
1863static void nv_swncq_error_handler(struct ata_port *ap)
1864{
1865        struct ata_eh_context *ehc = &ap->link.eh_context;
1866
1867        if (ap->link.sactive) {
1868                nv_swncq_ncq_stop(ap);
1869                ehc->i.action |= ATA_EH_RESET;
1870        }
1871
1872        ata_sff_error_handler(ap);
1873}
1874
1875#ifdef CONFIG_PM
1876static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1877{
1878        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1879        u32 tmp;
1880
1881        /* clear irq */
1882        writel(~0, mmio + NV_INT_STATUS_MCP55);
1883
1884        /* disable irq */
1885        writel(0, mmio + NV_INT_ENABLE_MCP55);
1886
1887        /* disable swncq */
1888        tmp = readl(mmio + NV_CTL_MCP55);
1889        tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1890        writel(tmp, mmio + NV_CTL_MCP55);
1891
1892        return 0;
1893}
1894
1895static int nv_swncq_port_resume(struct ata_port *ap)
1896{
1897        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1898        u32 tmp;
1899
1900        /* clear irq */
1901        writel(~0, mmio + NV_INT_STATUS_MCP55);
1902
1903        /* enable irq */
1904        writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1905
1906        /* enable swncq */
1907        tmp = readl(mmio + NV_CTL_MCP55);
1908        writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1909
1910        return 0;
1911}
1912#endif
1913
1914static void nv_swncq_host_init(struct ata_host *host)
1915{
1916        u32 tmp;
1917        void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1918        struct pci_dev *pdev = to_pci_dev(host->dev);
1919        u8 regval;
1920
1921        /* disable  ECO 398 */
1922        pci_read_config_byte(pdev, 0x7f, &regval);
1923        regval &= ~(1 << 7);
1924        pci_write_config_byte(pdev, 0x7f, regval);
1925
1926        /* enable swncq */
1927        tmp = readl(mmio + NV_CTL_MCP55);
1928        VPRINTK("HOST_CTL:0x%X\n", tmp);
1929        writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1930
1931        /* enable irq intr */
1932        tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1933        VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1934        writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1935
1936        /*  clear port irq */
1937        writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1938}
1939
1940static int nv_swncq_slave_config(struct scsi_device *sdev)
1941{
1942        struct ata_port *ap = ata_shost_to_port(sdev->host);
1943        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1944        struct ata_device *dev;
1945        int rc;
1946        u8 rev;
1947        u8 check_maxtor = 0;
1948        unsigned char model_num[ATA_ID_PROD_LEN + 1];
1949
1950        rc = ata_scsi_slave_config(sdev);
1951        if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1952                /* Not a proper libata device, ignore */
1953                return rc;
1954
1955        dev = &ap->link.device[sdev->id];
1956        if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1957                return rc;
1958
1959        /* if MCP51 and Maxtor, then disable ncq */
1960        if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1961                pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1962                check_maxtor = 1;
1963
1964        /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1965        if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1966                pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1967                pci_read_config_byte(pdev, 0x8, &rev);
1968                if (rev <= 0xa2)
1969                        check_maxtor = 1;
1970        }
1971
1972        if (!check_maxtor)
1973                return rc;
1974
1975        ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1976
1977        if (strncmp(model_num, "Maxtor", 6) == 0) {
1978                ata_scsi_change_queue_depth(sdev, 1);
1979                ata_dev_printk(dev, KERN_NOTICE,
1980                        "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1981        }
1982
1983        return rc;
1984}
1985
1986static int nv_swncq_port_start(struct ata_port *ap)
1987{
1988        struct device *dev = ap->host->dev;
1989        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1990        struct nv_swncq_port_priv *pp;
1991        int rc;
1992
1993        rc = ata_port_start(ap);
1994        if (rc)
1995                return rc;
1996
1997        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1998        if (!pp)
1999                return -ENOMEM;
2000
2001        pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
2002                                      &pp->prd_dma, GFP_KERNEL);
2003        if (!pp->prd)
2004                return -ENOMEM;
2005        memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
2006
2007        ap->private_data = pp;
2008        pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
2009        pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
2010        pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
2011
2012        return 0;
2013}
2014
2015static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
2016{
2017        if (qc->tf.protocol != ATA_PROT_NCQ) {
2018                ata_sff_qc_prep(qc);
2019                return;
2020        }
2021
2022        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2023                return;
2024
2025        nv_swncq_fill_sg(qc);
2026}
2027
2028static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2029{
2030        struct ata_port *ap = qc->ap;
2031        struct scatterlist *sg;
2032        struct nv_swncq_port_priv *pp = ap->private_data;
2033        struct ata_prd *prd;
2034        unsigned int si, idx;
2035
2036        prd = pp->prd + ATA_MAX_PRD * qc->tag;
2037
2038        idx = 0;
2039        for_each_sg(qc->sg, sg, qc->n_elem, si) {
2040                u32 addr, offset;
2041                u32 sg_len, len;
2042
2043                addr = (u32)sg_dma_address(sg);
2044                sg_len = sg_dma_len(sg);
2045
2046                while (sg_len) {
2047                        offset = addr & 0xffff;
2048                        len = sg_len;
2049                        if ((offset + sg_len) > 0x10000)
2050                                len = 0x10000 - offset;
2051
2052                        prd[idx].addr = cpu_to_le32(addr);
2053                        prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2054
2055                        idx++;
2056                        sg_len -= len;
2057                        addr += len;
2058                }
2059        }
2060
2061        prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2062}
2063
2064static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2065                                          struct ata_queued_cmd *qc)
2066{
2067        struct nv_swncq_port_priv *pp = ap->private_data;
2068
2069        if (qc == NULL)
2070                return 0;
2071
2072        DPRINTK("Enter\n");
2073
2074        writel((1 << qc->tag), pp->sactive_block);
2075        pp->last_issue_tag = qc->tag;
2076        pp->dhfis_bits &= ~(1 << qc->tag);
2077        pp->dmafis_bits &= ~(1 << qc->tag);
2078        pp->qc_active |= (0x1 << qc->tag);
2079
2080        ap->ops->sff_tf_load(ap, &qc->tf);       /* load tf registers */
2081        ap->ops->sff_exec_command(ap, &qc->tf);
2082
2083        DPRINTK("Issued tag %u\n", qc->tag);
2084
2085        return 0;
2086}
2087
2088static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2089{
2090        struct ata_port *ap = qc->ap;
2091        struct nv_swncq_port_priv *pp = ap->private_data;
2092
2093        if (qc->tf.protocol != ATA_PROT_NCQ)
2094                return ata_sff_qc_issue(qc);
2095
2096        DPRINTK("Enter\n");
2097
2098        if (!pp->qc_active)
2099                nv_swncq_issue_atacmd(ap, qc);
2100        else
2101                nv_swncq_qc_to_dq(ap, qc);      /* add qc to defer queue */
2102
2103        return 0;
2104}
2105
2106static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2107{
2108        u32 serror;
2109        struct ata_eh_info *ehi = &ap->link.eh_info;
2110
2111        ata_ehi_clear_desc(ehi);
2112
2113        /* AHCI needs SError cleared; otherwise, it might lock up */
2114        sata_scr_read(&ap->link, SCR_ERROR, &serror);
2115        sata_scr_write(&ap->link, SCR_ERROR, serror);
2116
2117        /* analyze @irq_stat */
2118        if (fis & NV_SWNCQ_IRQ_ADDED)
2119                ata_ehi_push_desc(ehi, "hot plug");
2120        else if (fis & NV_SWNCQ_IRQ_REMOVED)
2121                ata_ehi_push_desc(ehi, "hot unplug");
2122
2123        ata_ehi_hotplugged(ehi);
2124
2125        /* okay, let's hand over to EH */
2126        ehi->serror |= serror;
2127
2128        ata_port_freeze(ap);
2129}
2130
2131static int nv_swncq_sdbfis(struct ata_port *ap)
2132{
2133        struct ata_queued_cmd *qc;
2134        struct nv_swncq_port_priv *pp = ap->private_data;
2135        struct ata_eh_info *ehi = &ap->link.eh_info;
2136        u32 sactive;
2137        int nr_done = 0;
2138        u32 done_mask;
2139        int i;
2140        u8 host_stat;
2141        u8 lack_dhfis = 0;
2142
2143        host_stat = ap->ops->bmdma_status(ap);
2144        if (unlikely(host_stat & ATA_DMA_ERR)) {
2145                /* error when transfering data to/from memory */
2146                ata_ehi_clear_desc(ehi);
2147                ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2148                ehi->err_mask |= AC_ERR_HOST_BUS;
2149                ehi->action |= ATA_EH_RESET;
2150                return -EINVAL;
2151        }
2152
2153        ap->ops->sff_irq_clear(ap);
2154        __ata_bmdma_stop(ap);
2155
2156        sactive = readl(pp->sactive_block);
2157        done_mask = pp->qc_active ^ sactive;
2158
2159        if (unlikely(done_mask & sactive)) {
2160                ata_ehi_clear_desc(ehi);
2161                ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2162                                  "(%08x->%08x)", pp->qc_active, sactive);
2163                ehi->err_mask |= AC_ERR_HSM;
2164                ehi->action |= ATA_EH_RESET;
2165                return -EINVAL;
2166        }
2167        for (i = 0; i < ATA_MAX_QUEUE; i++) {
2168                if (!(done_mask & (1 << i)))
2169                        continue;
2170
2171                qc = ata_qc_from_tag(ap, i);
2172                if (qc) {
2173                        ata_qc_complete(qc);
2174                        pp->qc_active &= ~(1 << i);
2175                        pp->dhfis_bits &= ~(1 << i);
2176                        pp->dmafis_bits &= ~(1 << i);
2177                        pp->sdbfis_bits |= (1 << i);
2178                        nr_done++;
2179                }
2180        }
2181
2182        if (!ap->qc_active) {
2183                DPRINTK("over\n");
2184                nv_swncq_pp_reinit(ap);
2185                return nr_done;
2186        }
2187
2188        if (pp->qc_active & pp->dhfis_bits)
2189                return nr_done;
2190
2191        if ((pp->ncq_flags & ncq_saw_backout) ||
2192            (pp->qc_active ^ pp->dhfis_bits))
2193                /* if the controller cann't get a device to host register FIS,
2194                 * The driver needs to reissue the new command.
2195                 */
2196                lack_dhfis = 1;
2197
2198        DPRINTK("id 0x%x QC: qc_active 0x%x,"
2199                "SWNCQ:qc_active 0x%X defer_bits %X "
2200                "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2201                ap->print_id, ap->qc_active, pp->qc_active,
2202                pp->defer_queue.defer_bits, pp->dhfis_bits,
2203                pp->dmafis_bits, pp->last_issue_tag);
2204
2205        nv_swncq_fis_reinit(ap);
2206
2207        if (lack_dhfis) {
2208                qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2209                nv_swncq_issue_atacmd(ap, qc);
2210                return nr_done;
2211        }
2212
2213        if (pp->defer_queue.defer_bits) {
2214                /* send deferral queue command */
2215                qc = nv_swncq_qc_from_dq(ap);
2216                WARN_ON(qc == NULL);
2217                nv_swncq_issue_atacmd(ap, qc);
2218        }
2219
2220        return nr_done;
2221}
2222
2223static inline u32 nv_swncq_tag(struct ata_port *ap)
2224{
2225        struct nv_swncq_port_priv *pp = ap->private_data;
2226        u32 tag;
2227
2228        tag = readb(pp->tag_block) >> 2;
2229        return (tag & 0x1f);
2230}
2231
2232static int nv_swncq_dmafis(struct ata_port *ap)
2233{
2234        struct ata_queued_cmd *qc;
2235        unsigned int rw;
2236        u8 dmactl;
2237        u32 tag;
2238        struct nv_swncq_port_priv *pp = ap->private_data;
2239
2240        __ata_bmdma_stop(ap);
2241        tag = nv_swncq_tag(ap);
2242
2243        DPRINTK("dma setup tag 0x%x\n", tag);
2244        qc = ata_qc_from_tag(ap, tag);
2245
2246        if (unlikely(!qc))
2247                return 0;
2248
2249        rw = qc->tf.flags & ATA_TFLAG_WRITE;
2250
2251        /* load PRD table addr. */
2252        iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2253                  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2254
2255        /* specify data direction, triple-check start bit is clear */
2256        dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2257        dmactl &= ~ATA_DMA_WR;
2258        if (!rw)
2259                dmactl |= ATA_DMA_WR;
2260
2261        iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2262
2263        return 1;
2264}
2265
2266static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2267{
2268        struct nv_swncq_port_priv *pp = ap->private_data;
2269        struct ata_queued_cmd *qc;
2270        struct ata_eh_info *ehi = &ap->link.eh_info;
2271        u32 serror;
2272        u8 ata_stat;
2273        int rc = 0;
2274
2275        ata_stat = ap->ops->sff_check_status(ap);
2276        nv_swncq_irq_clear(ap, fis);
2277        if (!fis)
2278                return;
2279
2280        if (ap->pflags & ATA_PFLAG_FROZEN)
2281                return;
2282
2283        if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2284                nv_swncq_hotplug(ap, fis);
2285                return;
2286        }
2287
2288        if (!pp->qc_active)
2289                return;
2290
2291        if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2292                return;
2293        ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2294
2295        if (ata_stat & ATA_ERR) {
2296                ata_ehi_clear_desc(ehi);
2297                ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2298                ehi->err_mask |= AC_ERR_DEV;
2299                ehi->serror |= serror;
2300                ehi->action |= ATA_EH_RESET;
2301                ata_port_freeze(ap);
2302                return;
2303        }
2304
2305        if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2306                /* If the IRQ is backout, driver must issue
2307                 * the new command again some time later.
2308                 */
2309                pp->ncq_flags |= ncq_saw_backout;
2310        }
2311
2312        if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2313                pp->ncq_flags |= ncq_saw_sdb;
2314                DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2315                        "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2316                        ap->print_id, pp->qc_active, pp->dhfis_bits,
2317                        pp->dmafis_bits, readl(pp->sactive_block));
2318                rc = nv_swncq_sdbfis(ap);
2319                if (rc < 0)
2320                        goto irq_error;
2321        }
2322
2323        if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2324                /* The interrupt indicates the new command
2325                 * was transmitted correctly to the drive.
2326                 */
2327                pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2328                pp->ncq_flags |= ncq_saw_d2h;
2329                if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2330                        ata_ehi_push_desc(ehi, "illegal fis transaction");
2331                        ehi->err_mask |= AC_ERR_HSM;
2332                        ehi->action |= ATA_EH_RESET;
2333                        goto irq_error;
2334                }
2335
2336                if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2337                    !(pp->ncq_flags & ncq_saw_dmas)) {
2338                        ata_stat = ap->ops->sff_check_status(ap);
2339                        if (ata_stat & ATA_BUSY)
2340                                goto irq_exit;
2341
2342                        if (pp->defer_queue.defer_bits) {
2343                                DPRINTK("send next command\n");
2344                                qc = nv_swncq_qc_from_dq(ap);
2345                                nv_swncq_issue_atacmd(ap, qc);
2346                        }
2347                }
2348        }
2349
2350        if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2351                /* program the dma controller with appropriate PRD buffers
2352                 * and start the DMA transfer for requested command.
2353                 */
2354                pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2355                pp->ncq_flags |= ncq_saw_dmas;
2356                rc = nv_swncq_dmafis(ap);
2357        }
2358
2359irq_exit:
2360        return;
2361irq_error:
2362        ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2363        ata_port_freeze(ap);
2364        return;
2365}
2366
2367static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2368{
2369        struct ata_host *host = dev_instance;
2370        unsigned int i;
2371        unsigned int handled = 0;
2372        unsigned long flags;
2373        u32 irq_stat;
2374
2375        spin_lock_irqsave(&host->lock, flags);
2376
2377        irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2378
2379        for (i = 0; i < host->n_ports; i++) {
2380                struct ata_port *ap = host->ports[i];
2381
2382                if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2383                        if (ap->link.sactive) {
2384                                nv_swncq_host_interrupt(ap, (u16)irq_stat);
2385                                handled = 1;
2386                        } else {
2387                                if (irq_stat)   /* reserve Hotplug */
2388                                        nv_swncq_irq_clear(ap, 0xfff0);
2389
2390                                handled += nv_host_intr(ap, (u8)irq_stat);
2391                        }
2392                }
2393                irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2394        }
2395
2396        spin_unlock_irqrestore(&host->lock, flags);
2397
2398        return IRQ_RETVAL(handled);
2399}
2400
2401static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2402{
2403        static int printed_version;
2404        const struct ata_port_info *ppi[] = { NULL, NULL };
2405        struct nv_pi_priv *ipriv;
2406        struct ata_host *host;
2407        struct nv_host_priv *hpriv;
2408        int rc;
2409        u32 bar;
2410        void __iomem *base;
2411        unsigned long type = ent->driver_data;
2412
2413        // Make sure this is a SATA controller by counting the number of bars
2414        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2415        // it's an IDE controller and we ignore it.
2416        for (bar = 0; bar < 6; bar++)
2417                if (pci_resource_start(pdev, bar) == 0)
2418                        return -ENODEV;
2419
2420        if (!printed_version++)
2421                dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2422
2423        rc = pcim_enable_device(pdev);
2424        if (rc)
2425                return rc;
2426
2427        /* determine type and allocate host */
2428        if (type == CK804 && adma_enabled) {
2429                dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2430                type = ADMA;
2431        } else if (type == MCP5x && swncq_enabled) {
2432                dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2433                type = SWNCQ;
2434        }
2435
2436        ppi[0] = &nv_port_info[type];
2437        ipriv = ppi[0]->private_data;
2438        rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2439        if (rc)
2440                return rc;
2441
2442        hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2443        if (!hpriv)
2444                return -ENOMEM;
2445        hpriv->type = type;
2446        host->private_data = hpriv;
2447
2448        /* request and iomap NV_MMIO_BAR */
2449        rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2450        if (rc)
2451                return rc;
2452
2453        /* configure SCR access */
2454        base = host->iomap[NV_MMIO_BAR];
2455        host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2456        host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2457
2458        /* enable SATA space for CK804 */
2459        if (type >= CK804) {
2460                u8 regval;
2461
2462                pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2463                regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2464                pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2465        }
2466
2467        /* init ADMA */
2468        if (type == ADMA) {
2469                rc = nv_adma_host_init(host);
2470                if (rc)
2471                        return rc;
2472        } else if (type == SWNCQ)
2473                nv_swncq_host_init(host);
2474
2475        if (msi_enabled) {
2476                dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n");
2477                pci_enable_msi(pdev);
2478        }
2479
2480        pci_set_master(pdev);
2481        return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2482                                 IRQF_SHARED, ipriv->sht);
2483}
2484
2485#ifdef CONFIG_PM
2486static int nv_pci_device_resume(struct pci_dev *pdev)
2487{
2488        struct ata_host *host = dev_get_drvdata(&pdev->dev);
2489        struct nv_host_priv *hpriv = host->private_data;
2490        int rc;
2491
2492        rc = ata_pci_device_do_resume(pdev);
2493        if (rc)
2494                return rc;
2495
2496        if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2497                if (hpriv->type >= CK804) {
2498                        u8 regval;
2499
2500                        pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2501                        regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2502                        pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2503                }
2504                if (hpriv->type == ADMA) {
2505                        u32 tmp32;
2506                        struct nv_adma_port_priv *pp;
2507                        /* enable/disable ADMA on the ports appropriately */
2508                        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2509
2510                        pp = host->ports[0]->private_data;
2511                        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2512                                tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2513                                           NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2514                        else
2515                                tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2516                                           NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2517                        pp = host->ports[1]->private_data;
2518                        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2519                                tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2520                                           NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2521                        else
2522                                tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2523                                           NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2524
2525                        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2526                }
2527        }
2528
2529        ata_host_resume(host);
2530
2531        return 0;
2532}
2533#endif
2534
2535static void nv_ck804_host_stop(struct ata_host *host)
2536{
2537        struct pci_dev *pdev = to_pci_dev(host->dev);
2538        u8 regval;
2539
2540        /* disable SATA space for CK804 */
2541        pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2542        regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2543        pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2544}
2545
2546static void nv_adma_host_stop(struct ata_host *host)
2547{
2548        struct pci_dev *pdev = to_pci_dev(host->dev);
2549        u32 tmp32;
2550
2551        /* disable ADMA on the ports */
2552        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2553        tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2554                   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2555                   NV_MCP_SATA_CFG_20_PORT1_EN |
2556                   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2557
2558        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2559
2560        nv_ck804_host_stop(host);
2561}
2562
2563static int __init nv_init(void)
2564{
2565        return pci_register_driver(&nv_pci_driver);
2566}
2567
2568static void __exit nv_exit(void)
2569{
2570        pci_unregister_driver(&nv_pci_driver);
2571}
2572
2573module_init(nv_init);
2574module_exit(nv_exit);
2575module_param_named(adma, adma_enabled, bool, 0444);
2576MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2577module_param_named(swncq, swncq_enabled, bool, 0444);
2578MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2579module_param_named(msi, msi_enabled, bool, 0444);
2580MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2581
2582