linux/drivers/ata/sata_nv.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  sata_nv.c - NVIDIA nForce SATA
   4 *
   5 *  Copyright 2004 NVIDIA Corp.  All rights reserved.
   6 *  Copyright 2004 Andrew Chew
   7 *
   8 *  libata documentation is available via 'make {ps|pdf}docs',
   9 *  as Documentation/driver-api/libata.rst
  10 *
  11 *  No hardware documentation available outside of NVIDIA.
  12 *  This driver programs the NVIDIA SATA controller in a similar
  13 *  fashion as with other PCI IDE BMDMA controllers, with a few
  14 *  NV-specific details such as register offsets, SATA phy location,
  15 *  hotplug info, etc.
  16 *
  17 *  CK804/MCP04 controllers support an alternate programming interface
  18 *  similar to the ADMA specification (with some modifications).
  19 *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
  20 *  sent through the legacy interface.
  21 */
  22
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/gfp.h>
  26#include <linux/pci.h>
  27#include <linux/blkdev.h>
  28#include <linux/delay.h>
  29#include <linux/interrupt.h>
  30#include <linux/device.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <linux/libata.h>
  34
  35#define DRV_NAME                        "sata_nv"
  36#define DRV_VERSION                     "3.5"
  37
  38#define NV_ADMA_DMA_BOUNDARY            0xffffffffUL
  39
  40enum {
  41        NV_MMIO_BAR                     = 5,
  42
  43        NV_PORTS                        = 2,
  44        NV_PIO_MASK                     = ATA_PIO4,
  45        NV_MWDMA_MASK                   = ATA_MWDMA2,
  46        NV_UDMA_MASK                    = ATA_UDMA6,
  47        NV_PORT0_SCR_REG_OFFSET         = 0x00,
  48        NV_PORT1_SCR_REG_OFFSET         = 0x40,
  49
  50        /* INT_STATUS/ENABLE */
  51        NV_INT_STATUS                   = 0x10,
  52        NV_INT_ENABLE                   = 0x11,
  53        NV_INT_STATUS_CK804             = 0x440,
  54        NV_INT_ENABLE_CK804             = 0x441,
  55
  56        /* INT_STATUS/ENABLE bits */
  57        NV_INT_DEV                      = 0x01,
  58        NV_INT_PM                       = 0x02,
  59        NV_INT_ADDED                    = 0x04,
  60        NV_INT_REMOVED                  = 0x08,
  61
  62        NV_INT_PORT_SHIFT               = 4,    /* each port occupies 4 bits */
  63
  64        NV_INT_ALL                      = 0x0f,
  65        NV_INT_MASK                     = NV_INT_DEV |
  66                                          NV_INT_ADDED | NV_INT_REMOVED,
  67
  68        /* INT_CONFIG */
  69        NV_INT_CONFIG                   = 0x12,
  70        NV_INT_CONFIG_METHD             = 0x01, // 0 = INT, 1 = SMI
  71
  72        // For PCI config register 20
  73        NV_MCP_SATA_CFG_20              = 0x50,
  74        NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
  75        NV_MCP_SATA_CFG_20_PORT0_EN     = (1 << 17),
  76        NV_MCP_SATA_CFG_20_PORT1_EN     = (1 << 16),
  77        NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
  78        NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
  79
  80        NV_ADMA_MAX_CPBS                = 32,
  81        NV_ADMA_CPB_SZ                  = 128,
  82        NV_ADMA_APRD_SZ                 = 16,
  83        NV_ADMA_SGTBL_LEN               = (1024 - NV_ADMA_CPB_SZ) /
  84                                           NV_ADMA_APRD_SZ,
  85        NV_ADMA_SGTBL_TOTAL_LEN         = NV_ADMA_SGTBL_LEN + 5,
  86        NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
  87        NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
  88                                           (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
  89
  90        /* BAR5 offset to ADMA general registers */
  91        NV_ADMA_GEN                     = 0x400,
  92        NV_ADMA_GEN_CTL                 = 0x00,
  93        NV_ADMA_NOTIFIER_CLEAR          = 0x30,
  94
  95        /* BAR5 offset to ADMA ports */
  96        NV_ADMA_PORT                    = 0x480,
  97
  98        /* size of ADMA port register space  */
  99        NV_ADMA_PORT_SIZE               = 0x100,
 100
 101        /* ADMA port registers */
 102        NV_ADMA_CTL                     = 0x40,
 103        NV_ADMA_CPB_COUNT               = 0x42,
 104        NV_ADMA_NEXT_CPB_IDX            = 0x43,
 105        NV_ADMA_STAT                    = 0x44,
 106        NV_ADMA_CPB_BASE_LOW            = 0x48,
 107        NV_ADMA_CPB_BASE_HIGH           = 0x4C,
 108        NV_ADMA_APPEND                  = 0x50,
 109        NV_ADMA_NOTIFIER                = 0x68,
 110        NV_ADMA_NOTIFIER_ERROR          = 0x6C,
 111
 112        /* NV_ADMA_CTL register bits */
 113        NV_ADMA_CTL_HOTPLUG_IEN         = (1 << 0),
 114        NV_ADMA_CTL_CHANNEL_RESET       = (1 << 5),
 115        NV_ADMA_CTL_GO                  = (1 << 7),
 116        NV_ADMA_CTL_AIEN                = (1 << 8),
 117        NV_ADMA_CTL_READ_NON_COHERENT   = (1 << 11),
 118        NV_ADMA_CTL_WRITE_NON_COHERENT  = (1 << 12),
 119
 120        /* CPB response flag bits */
 121        NV_CPB_RESP_DONE                = (1 << 0),
 122        NV_CPB_RESP_ATA_ERR             = (1 << 3),
 123        NV_CPB_RESP_CMD_ERR             = (1 << 4),
 124        NV_CPB_RESP_CPB_ERR             = (1 << 7),
 125
 126        /* CPB control flag bits */
 127        NV_CPB_CTL_CPB_VALID            = (1 << 0),
 128        NV_CPB_CTL_QUEUE                = (1 << 1),
 129        NV_CPB_CTL_APRD_VALID           = (1 << 2),
 130        NV_CPB_CTL_IEN                  = (1 << 3),
 131        NV_CPB_CTL_FPDMA                = (1 << 4),
 132
 133        /* APRD flags */
 134        NV_APRD_WRITE                   = (1 << 1),
 135        NV_APRD_END                     = (1 << 2),
 136        NV_APRD_CONT                    = (1 << 3),
 137
 138        /* NV_ADMA_STAT flags */
 139        NV_ADMA_STAT_TIMEOUT            = (1 << 0),
 140        NV_ADMA_STAT_HOTUNPLUG          = (1 << 1),
 141        NV_ADMA_STAT_HOTPLUG            = (1 << 2),
 142        NV_ADMA_STAT_CPBERR             = (1 << 4),
 143        NV_ADMA_STAT_SERROR             = (1 << 5),
 144        NV_ADMA_STAT_CMD_COMPLETE       = (1 << 6),
 145        NV_ADMA_STAT_IDLE               = (1 << 8),
 146        NV_ADMA_STAT_LEGACY             = (1 << 9),
 147        NV_ADMA_STAT_STOPPED            = (1 << 10),
 148        NV_ADMA_STAT_DONE               = (1 << 12),
 149        NV_ADMA_STAT_ERR                = NV_ADMA_STAT_CPBERR |
 150                                          NV_ADMA_STAT_TIMEOUT,
 151
 152        /* port flags */
 153        NV_ADMA_PORT_REGISTER_MODE      = (1 << 0),
 154        NV_ADMA_ATAPI_SETUP_COMPLETE    = (1 << 1),
 155
 156        /* MCP55 reg offset */
 157        NV_CTL_MCP55                    = 0x400,
 158        NV_INT_STATUS_MCP55             = 0x440,
 159        NV_INT_ENABLE_MCP55             = 0x444,
 160        NV_NCQ_REG_MCP55                = 0x448,
 161
 162        /* MCP55 */
 163        NV_INT_ALL_MCP55                = 0xffff,
 164        NV_INT_PORT_SHIFT_MCP55         = 16,   /* each port occupies 16 bits */
 165        NV_INT_MASK_MCP55               = NV_INT_ALL_MCP55 & 0xfffd,
 166
 167        /* SWNCQ ENABLE BITS*/
 168        NV_CTL_PRI_SWNCQ                = 0x02,
 169        NV_CTL_SEC_SWNCQ                = 0x04,
 170
 171        /* SW NCQ status bits*/
 172        NV_SWNCQ_IRQ_DEV                = (1 << 0),
 173        NV_SWNCQ_IRQ_PM                 = (1 << 1),
 174        NV_SWNCQ_IRQ_ADDED              = (1 << 2),
 175        NV_SWNCQ_IRQ_REMOVED            = (1 << 3),
 176
 177        NV_SWNCQ_IRQ_BACKOUT            = (1 << 4),
 178        NV_SWNCQ_IRQ_SDBFIS             = (1 << 5),
 179        NV_SWNCQ_IRQ_DHREGFIS           = (1 << 6),
 180        NV_SWNCQ_IRQ_DMASETUP           = (1 << 7),
 181
 182        NV_SWNCQ_IRQ_HOTPLUG            = NV_SWNCQ_IRQ_ADDED |
 183                                          NV_SWNCQ_IRQ_REMOVED,
 184
 185};
 186
 187/* ADMA Physical Region Descriptor - one SG segment */
 188struct nv_adma_prd {
 189        __le64                  addr;
 190        __le32                  len;
 191        u8                      flags;
 192        u8                      packet_len;
 193        __le16                  reserved;
 194};
 195
 196enum nv_adma_regbits {
 197        CMDEND  = (1 << 15),            /* end of command list */
 198        WNB     = (1 << 14),            /* wait-not-BSY */
 199        IGN     = (1 << 13),            /* ignore this entry */
 200        CS1n    = (1 << (4 + 8)),       /* std. PATA signals follow... */
 201        DA2     = (1 << (2 + 8)),
 202        DA1     = (1 << (1 + 8)),
 203        DA0     = (1 << (0 + 8)),
 204};
 205
 206/* ADMA Command Parameter Block
 207   The first 5 SG segments are stored inside the Command Parameter Block itself.
 208   If there are more than 5 segments the remainder are stored in a separate
 209   memory area indicated by next_aprd. */
 210struct nv_adma_cpb {
 211        u8                      resp_flags;    /* 0 */
 212        u8                      reserved1;     /* 1 */
 213        u8                      ctl_flags;     /* 2 */
 214        /* len is length of taskfile in 64 bit words */
 215        u8                      len;            /* 3  */
 216        u8                      tag;           /* 4 */
 217        u8                      next_cpb_idx;  /* 5 */
 218        __le16                  reserved2;     /* 6-7 */
 219        __le16                  tf[12];        /* 8-31 */
 220        struct nv_adma_prd      aprd[5];       /* 32-111 */
 221        __le64                  next_aprd;     /* 112-119 */
 222        __le64                  reserved3;     /* 120-127 */
 223};
 224
 225
 226struct nv_adma_port_priv {
 227        struct nv_adma_cpb      *cpb;
 228        dma_addr_t              cpb_dma;
 229        struct nv_adma_prd      *aprd;
 230        dma_addr_t              aprd_dma;
 231        void __iomem            *ctl_block;
 232        void __iomem            *gen_block;
 233        void __iomem            *notifier_clear_block;
 234        u64                     adma_dma_mask;
 235        u8                      flags;
 236        int                     last_issue_ncq;
 237};
 238
 239struct nv_host_priv {
 240        unsigned long           type;
 241};
 242
 243struct defer_queue {
 244        u32             defer_bits;
 245        unsigned int    head;
 246        unsigned int    tail;
 247        unsigned int    tag[ATA_MAX_QUEUE];
 248};
 249
 250enum ncq_saw_flag_list {
 251        ncq_saw_d2h     = (1U << 0),
 252        ncq_saw_dmas    = (1U << 1),
 253        ncq_saw_sdb     = (1U << 2),
 254        ncq_saw_backout = (1U << 3),
 255};
 256
 257struct nv_swncq_port_priv {
 258        struct ata_bmdma_prd *prd;       /* our SG list */
 259        dma_addr_t      prd_dma; /* and its DMA mapping */
 260        void __iomem    *sactive_block;
 261        void __iomem    *irq_block;
 262        void __iomem    *tag_block;
 263        u32             qc_active;
 264
 265        unsigned int    last_issue_tag;
 266
 267        /* fifo circular queue to store deferral command */
 268        struct defer_queue defer_queue;
 269
 270        /* for NCQ interrupt analysis */
 271        u32             dhfis_bits;
 272        u32             dmafis_bits;
 273        u32             sdbfis_bits;
 274
 275        unsigned int    ncq_flags;
 276};
 277
 278
 279#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
 280
 281static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 282#ifdef CONFIG_PM_SLEEP
 283static int nv_pci_device_resume(struct pci_dev *pdev);
 284#endif
 285static void nv_ck804_host_stop(struct ata_host *host);
 286static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
 287static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
 288static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
 289static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 290static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
 291
 292static int nv_hardreset(struct ata_link *link, unsigned int *class,
 293                        unsigned long deadline);
 294static void nv_nf2_freeze(struct ata_port *ap);
 295static void nv_nf2_thaw(struct ata_port *ap);
 296static void nv_ck804_freeze(struct ata_port *ap);
 297static void nv_ck804_thaw(struct ata_port *ap);
 298static int nv_adma_slave_config(struct scsi_device *sdev);
 299static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
 300static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
 301static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
 302static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
 303static void nv_adma_irq_clear(struct ata_port *ap);
 304static int nv_adma_port_start(struct ata_port *ap);
 305static void nv_adma_port_stop(struct ata_port *ap);
 306#ifdef CONFIG_PM
 307static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
 308static int nv_adma_port_resume(struct ata_port *ap);
 309#endif
 310static void nv_adma_freeze(struct ata_port *ap);
 311static void nv_adma_thaw(struct ata_port *ap);
 312static void nv_adma_error_handler(struct ata_port *ap);
 313static void nv_adma_host_stop(struct ata_host *host);
 314static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
 315static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 316
 317static void nv_mcp55_thaw(struct ata_port *ap);
 318static void nv_mcp55_freeze(struct ata_port *ap);
 319static void nv_swncq_error_handler(struct ata_port *ap);
 320static int nv_swncq_slave_config(struct scsi_device *sdev);
 321static int nv_swncq_port_start(struct ata_port *ap);
 322static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
 323static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
 324static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
 325static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
 326static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
 327#ifdef CONFIG_PM
 328static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
 329static int nv_swncq_port_resume(struct ata_port *ap);
 330#endif
 331
 332enum nv_host_type
 333{
 334        GENERIC,
 335        NFORCE2,
 336        NFORCE3 = NFORCE2,      /* NF2 == NF3 as far as sata_nv is concerned */
 337        CK804,
 338        ADMA,
 339        MCP5x,
 340        SWNCQ,
 341};
 342
 343static const struct pci_device_id nv_pci_tbl[] = {
 344        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
 345        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
 346        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
 347        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
 348        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
 349        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
 350        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
 351        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
 352        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
 353        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
 354        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
 355        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
 356        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
 357        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
 358
 359        { } /* terminate list */
 360};
 361
 362static struct pci_driver nv_pci_driver = {
 363        .name                   = DRV_NAME,
 364        .id_table               = nv_pci_tbl,
 365        .probe                  = nv_init_one,
 366#ifdef CONFIG_PM_SLEEP
 367        .suspend                = ata_pci_device_suspend,
 368        .resume                 = nv_pci_device_resume,
 369#endif
 370        .remove                 = ata_pci_remove_one,
 371};
 372
 373static struct scsi_host_template nv_sht = {
 374        ATA_BMDMA_SHT(DRV_NAME),
 375};
 376
 377static struct scsi_host_template nv_adma_sht = {
 378        ATA_NCQ_SHT(DRV_NAME),
 379        .can_queue              = NV_ADMA_MAX_CPBS,
 380        .sg_tablesize           = NV_ADMA_SGTBL_TOTAL_LEN,
 381        .dma_boundary           = NV_ADMA_DMA_BOUNDARY,
 382        .slave_configure        = nv_adma_slave_config,
 383};
 384
 385static struct scsi_host_template nv_swncq_sht = {
 386        ATA_NCQ_SHT(DRV_NAME),
 387        .can_queue              = ATA_MAX_QUEUE - 1,
 388        .sg_tablesize           = LIBATA_MAX_PRD,
 389        .dma_boundary           = ATA_DMA_BOUNDARY,
 390        .slave_configure        = nv_swncq_slave_config,
 391};
 392
 393/*
 394 * NV SATA controllers have various different problems with hardreset
 395 * protocol depending on the specific controller and device.
 396 *
 397 * GENERIC:
 398 *
 399 *  bko11195 reports that link doesn't come online after hardreset on
 400 *  generic nv's and there have been several other similar reports on
 401 *  linux-ide.
 402 *
 403 *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
 404 *  softreset.
 405 *
 406 * NF2/3:
 407 *
 408 *  bko3352 reports nf2/3 controllers can't determine device signature
 409 *  reliably after hardreset.  The following thread reports detection
 410 *  failure on cold boot with the standard debouncing timing.
 411 *
 412 *  http://thread.gmane.org/gmane.linux.ide/34098
 413 *
 414 *  bko12176 reports that hardreset fails to bring up the link during
 415 *  boot on nf2.
 416 *
 417 * CK804:
 418 *
 419 *  For initial probing after boot and hot plugging, hardreset mostly
 420 *  works fine on CK804 but curiously, reprobing on the initial port
 421 *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
 422 *  FIS in somewhat undeterministic way.
 423 *
 424 * SWNCQ:
 425 *
 426 *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
 427 *  hardreset should be used and hardreset can't report proper
 428 *  signature, which suggests that mcp5x is closer to nf2 as long as
 429 *  reset quirkiness is concerned.
 430 *
 431 *  bko12703 reports that boot probing fails for intel SSD with
 432 *  hardreset.  Link fails to come online.  Softreset works fine.
 433 *
 434 * The failures are varied but the following patterns seem true for
 435 * all flavors.
 436 *
 437 * - Softreset during boot always works.
 438 *
 439 * - Hardreset during boot sometimes fails to bring up the link on
 440 *   certain comibnations and device signature acquisition is
 441 *   unreliable.
 442 *
 443 * - Hardreset is often necessary after hotplug.
 444 *
 445 * So, preferring softreset for boot probing and error handling (as
 446 * hardreset might bring down the link) but using hardreset for
 447 * post-boot probing should work around the above issues in most
 448 * cases.  Define nv_hardreset() which only kicks in for post-boot
 449 * probing and use it for all variants.
 450 */
 451static struct ata_port_operations nv_generic_ops = {
 452        .inherits               = &ata_bmdma_port_ops,
 453        .lost_interrupt         = ATA_OP_NULL,
 454        .scr_read               = nv_scr_read,
 455        .scr_write              = nv_scr_write,
 456        .hardreset              = nv_hardreset,
 457};
 458
 459static struct ata_port_operations nv_nf2_ops = {
 460        .inherits               = &nv_generic_ops,
 461        .freeze                 = nv_nf2_freeze,
 462        .thaw                   = nv_nf2_thaw,
 463};
 464
 465static struct ata_port_operations nv_ck804_ops = {
 466        .inherits               = &nv_generic_ops,
 467        .freeze                 = nv_ck804_freeze,
 468        .thaw                   = nv_ck804_thaw,
 469        .host_stop              = nv_ck804_host_stop,
 470};
 471
 472static struct ata_port_operations nv_adma_ops = {
 473        .inherits               = &nv_ck804_ops,
 474
 475        .check_atapi_dma        = nv_adma_check_atapi_dma,
 476        .sff_tf_read            = nv_adma_tf_read,
 477        .qc_defer               = ata_std_qc_defer,
 478        .qc_prep                = nv_adma_qc_prep,
 479        .qc_issue               = nv_adma_qc_issue,
 480        .sff_irq_clear          = nv_adma_irq_clear,
 481
 482        .freeze                 = nv_adma_freeze,
 483        .thaw                   = nv_adma_thaw,
 484        .error_handler          = nv_adma_error_handler,
 485        .post_internal_cmd      = nv_adma_post_internal_cmd,
 486
 487        .port_start             = nv_adma_port_start,
 488        .port_stop              = nv_adma_port_stop,
 489#ifdef CONFIG_PM
 490        .port_suspend           = nv_adma_port_suspend,
 491        .port_resume            = nv_adma_port_resume,
 492#endif
 493        .host_stop              = nv_adma_host_stop,
 494};
 495
 496static struct ata_port_operations nv_swncq_ops = {
 497        .inherits               = &nv_generic_ops,
 498
 499        .qc_defer               = ata_std_qc_defer,
 500        .qc_prep                = nv_swncq_qc_prep,
 501        .qc_issue               = nv_swncq_qc_issue,
 502
 503        .freeze                 = nv_mcp55_freeze,
 504        .thaw                   = nv_mcp55_thaw,
 505        .error_handler          = nv_swncq_error_handler,
 506
 507#ifdef CONFIG_PM
 508        .port_suspend           = nv_swncq_port_suspend,
 509        .port_resume            = nv_swncq_port_resume,
 510#endif
 511        .port_start             = nv_swncq_port_start,
 512};
 513
 514struct nv_pi_priv {
 515        irq_handler_t                   irq_handler;
 516        struct scsi_host_template       *sht;
 517};
 518
 519#define NV_PI_PRIV(_irq_handler, _sht) \
 520        &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
 521
 522static const struct ata_port_info nv_port_info[] = {
 523        /* generic */
 524        {
 525                .flags          = ATA_FLAG_SATA,
 526                .pio_mask       = NV_PIO_MASK,
 527                .mwdma_mask     = NV_MWDMA_MASK,
 528                .udma_mask      = NV_UDMA_MASK,
 529                .port_ops       = &nv_generic_ops,
 530                .private_data   = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 531        },
 532        /* nforce2/3 */
 533        {
 534                .flags          = ATA_FLAG_SATA,
 535                .pio_mask       = NV_PIO_MASK,
 536                .mwdma_mask     = NV_MWDMA_MASK,
 537                .udma_mask      = NV_UDMA_MASK,
 538                .port_ops       = &nv_nf2_ops,
 539                .private_data   = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
 540        },
 541        /* ck804 */
 542        {
 543                .flags          = ATA_FLAG_SATA,
 544                .pio_mask       = NV_PIO_MASK,
 545                .mwdma_mask     = NV_MWDMA_MASK,
 546                .udma_mask      = NV_UDMA_MASK,
 547                .port_ops       = &nv_ck804_ops,
 548                .private_data   = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
 549        },
 550        /* ADMA */
 551        {
 552                .flags          = ATA_FLAG_SATA | ATA_FLAG_NCQ,
 553                .pio_mask       = NV_PIO_MASK,
 554                .mwdma_mask     = NV_MWDMA_MASK,
 555                .udma_mask      = NV_UDMA_MASK,
 556                .port_ops       = &nv_adma_ops,
 557                .private_data   = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
 558        },
 559        /* MCP5x */
 560        {
 561                .flags          = ATA_FLAG_SATA,
 562                .pio_mask       = NV_PIO_MASK,
 563                .mwdma_mask     = NV_MWDMA_MASK,
 564                .udma_mask      = NV_UDMA_MASK,
 565                .port_ops       = &nv_generic_ops,
 566                .private_data   = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 567        },
 568        /* SWNCQ */
 569        {
 570                .flags          = ATA_FLAG_SATA | ATA_FLAG_NCQ,
 571                .pio_mask       = NV_PIO_MASK,
 572                .mwdma_mask     = NV_MWDMA_MASK,
 573                .udma_mask      = NV_UDMA_MASK,
 574                .port_ops       = &nv_swncq_ops,
 575                .private_data   = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
 576        },
 577};
 578
 579MODULE_AUTHOR("NVIDIA");
 580MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
 581MODULE_LICENSE("GPL");
 582MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
 583MODULE_VERSION(DRV_VERSION);
 584
 585static bool adma_enabled;
 586static bool swncq_enabled = true;
 587static bool msi_enabled;
 588
 589static void nv_adma_register_mode(struct ata_port *ap)
 590{
 591        struct nv_adma_port_priv *pp = ap->private_data;
 592        void __iomem *mmio = pp->ctl_block;
 593        u16 tmp, status;
 594        int count = 0;
 595
 596        if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
 597                return;
 598
 599        status = readw(mmio + NV_ADMA_STAT);
 600        while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
 601                ndelay(50);
 602                status = readw(mmio + NV_ADMA_STAT);
 603                count++;
 604        }
 605        if (count == 20)
 606                ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
 607                              status);
 608
 609        tmp = readw(mmio + NV_ADMA_CTL);
 610        writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 611
 612        count = 0;
 613        status = readw(mmio + NV_ADMA_STAT);
 614        while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
 615                ndelay(50);
 616                status = readw(mmio + NV_ADMA_STAT);
 617                count++;
 618        }
 619        if (count == 20)
 620                ata_port_warn(ap,
 621                              "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
 622                              status);
 623
 624        pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
 625}
 626
 627static void nv_adma_mode(struct ata_port *ap)
 628{
 629        struct nv_adma_port_priv *pp = ap->private_data;
 630        void __iomem *mmio = pp->ctl_block;
 631        u16 tmp, status;
 632        int count = 0;
 633
 634        if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
 635                return;
 636
 637        WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 638
 639        tmp = readw(mmio + NV_ADMA_CTL);
 640        writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 641
 642        status = readw(mmio + NV_ADMA_STAT);
 643        while (((status & NV_ADMA_STAT_LEGACY) ||
 644              !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
 645                ndelay(50);
 646                status = readw(mmio + NV_ADMA_STAT);
 647                count++;
 648        }
 649        if (count == 20)
 650                ata_port_warn(ap,
 651                        "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
 652                        status);
 653
 654        pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
 655}
 656
 657static int nv_adma_slave_config(struct scsi_device *sdev)
 658{
 659        struct ata_port *ap = ata_shost_to_port(sdev->host);
 660        struct nv_adma_port_priv *pp = ap->private_data;
 661        struct nv_adma_port_priv *port0, *port1;
 662        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 663        unsigned long segment_boundary, flags;
 664        unsigned short sg_tablesize;
 665        int rc;
 666        int adma_enable;
 667        u32 current_reg, new_reg, config_mask;
 668
 669        rc = ata_scsi_slave_config(sdev);
 670
 671        if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
 672                /* Not a proper libata device, ignore */
 673                return rc;
 674
 675        spin_lock_irqsave(ap->lock, flags);
 676
 677        if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
 678                /*
 679                 * NVIDIA reports that ADMA mode does not support ATAPI commands.
 680                 * Therefore ATAPI commands are sent through the legacy interface.
 681                 * However, the legacy interface only supports 32-bit DMA.
 682                 * Restrict DMA parameters as required by the legacy interface
 683                 * when an ATAPI device is connected.
 684                 */
 685                segment_boundary = ATA_DMA_BOUNDARY;
 686                /* Subtract 1 since an extra entry may be needed for padding, see
 687                   libata-scsi.c */
 688                sg_tablesize = LIBATA_MAX_PRD - 1;
 689
 690                /* Since the legacy DMA engine is in use, we need to disable ADMA
 691                   on the port. */
 692                adma_enable = 0;
 693                nv_adma_register_mode(ap);
 694        } else {
 695                segment_boundary = NV_ADMA_DMA_BOUNDARY;
 696                sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
 697                adma_enable = 1;
 698        }
 699
 700        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
 701
 702        if (ap->port_no == 1)
 703                config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
 704                              NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
 705        else
 706                config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
 707                              NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
 708
 709        if (adma_enable) {
 710                new_reg = current_reg | config_mask;
 711                pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
 712        } else {
 713                new_reg = current_reg & ~config_mask;
 714                pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
 715        }
 716
 717        if (current_reg != new_reg)
 718                pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
 719
 720        port0 = ap->host->ports[0]->private_data;
 721        port1 = ap->host->ports[1]->private_data;
 722        if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
 723            (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
 724                /*
 725                 * We have to set the DMA mask to 32-bit if either port is in
 726                 * ATAPI mode, since they are on the same PCI device which is
 727                 * used for DMA mapping.  If either SCSI device is not allocated
 728                 * yet, it's OK since that port will discover its correct
 729                 * setting when it does get allocated.
 730                 */
 731                rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
 732        } else {
 733                rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
 734        }
 735
 736        blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
 737        blk_queue_max_segments(sdev->request_queue, sg_tablesize);
 738        ata_port_info(ap,
 739                      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
 740                      (unsigned long long)*ap->host->dev->dma_mask,
 741                      segment_boundary, sg_tablesize);
 742
 743        spin_unlock_irqrestore(ap->lock, flags);
 744
 745        return rc;
 746}
 747
 748static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
 749{
 750        struct nv_adma_port_priv *pp = qc->ap->private_data;
 751        return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 752}
 753
 754static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 755{
 756        /* Other than when internal or pass-through commands are executed,
 757           the only time this function will be called in ADMA mode will be
 758           if a command fails. In the failure case we don't care about going
 759           into register mode with ADMA commands pending, as the commands will
 760           all shortly be aborted anyway. We assume that NCQ commands are not
 761           issued via passthrough, which is the only way that switching into
 762           ADMA mode could abort outstanding commands. */
 763        nv_adma_register_mode(ap);
 764
 765        ata_sff_tf_read(ap, tf);
 766}
 767
 768static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
 769{
 770        unsigned int idx = 0;
 771
 772        if (tf->flags & ATA_TFLAG_ISADDR) {
 773                if (tf->flags & ATA_TFLAG_LBA48) {
 774                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
 775                        cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
 776                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
 777                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
 778                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
 779                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
 780                } else
 781                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
 782
 783                cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
 784                cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
 785                cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
 786                cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
 787        }
 788
 789        if (tf->flags & ATA_TFLAG_DEVICE)
 790                cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
 791
 792        cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
 793
 794        while (idx < 12)
 795                cpb[idx++] = cpu_to_le16(IGN);
 796
 797        return idx;
 798}
 799
 800static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
 801{
 802        struct nv_adma_port_priv *pp = ap->private_data;
 803        u8 flags = pp->cpb[cpb_num].resp_flags;
 804
 805        VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
 806
 807        if (unlikely((force_err ||
 808                     flags & (NV_CPB_RESP_ATA_ERR |
 809                              NV_CPB_RESP_CMD_ERR |
 810                              NV_CPB_RESP_CPB_ERR)))) {
 811                struct ata_eh_info *ehi = &ap->link.eh_info;
 812                int freeze = 0;
 813
 814                ata_ehi_clear_desc(ehi);
 815                __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
 816                if (flags & NV_CPB_RESP_ATA_ERR) {
 817                        ata_ehi_push_desc(ehi, "ATA error");
 818                        ehi->err_mask |= AC_ERR_DEV;
 819                } else if (flags & NV_CPB_RESP_CMD_ERR) {
 820                        ata_ehi_push_desc(ehi, "CMD error");
 821                        ehi->err_mask |= AC_ERR_DEV;
 822                } else if (flags & NV_CPB_RESP_CPB_ERR) {
 823                        ata_ehi_push_desc(ehi, "CPB error");
 824                        ehi->err_mask |= AC_ERR_SYSTEM;
 825                        freeze = 1;
 826                } else {
 827                        /* notifier error, but no error in CPB flags? */
 828                        ata_ehi_push_desc(ehi, "unknown");
 829                        ehi->err_mask |= AC_ERR_OTHER;
 830                        freeze = 1;
 831                }
 832                /* Kill all commands. EH will determine what actually failed. */
 833                if (freeze)
 834                        ata_port_freeze(ap);
 835                else
 836                        ata_port_abort(ap);
 837                return -1;
 838        }
 839
 840        if (likely(flags & NV_CPB_RESP_DONE))
 841                return 1;
 842        return 0;
 843}
 844
 845static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
 846{
 847        struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
 848
 849        /* freeze if hotplugged */
 850        if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
 851                ata_port_freeze(ap);
 852                return 1;
 853        }
 854
 855        /* bail out if not our interrupt */
 856        if (!(irq_stat & NV_INT_DEV))
 857                return 0;
 858
 859        /* DEV interrupt w/ no active qc? */
 860        if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 861                ata_sff_check_status(ap);
 862                return 1;
 863        }
 864
 865        /* handle interrupt */
 866        return ata_bmdma_port_intr(ap, qc);
 867}
 868
 869static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
 870{
 871        struct ata_host *host = dev_instance;
 872        int i, handled = 0;
 873        u32 notifier_clears[2];
 874
 875        spin_lock(&host->lock);
 876
 877        for (i = 0; i < host->n_ports; i++) {
 878                struct ata_port *ap = host->ports[i];
 879                struct nv_adma_port_priv *pp = ap->private_data;
 880                void __iomem *mmio = pp->ctl_block;
 881                u16 status;
 882                u32 gen_ctl;
 883                u32 notifier, notifier_error;
 884
 885                notifier_clears[i] = 0;
 886
 887                /* if ADMA is disabled, use standard ata interrupt handler */
 888                if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
 889                        u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 890                                >> (NV_INT_PORT_SHIFT * i);
 891                        handled += nv_host_intr(ap, irq_stat);
 892                        continue;
 893                }
 894
 895                /* if in ATA register mode, check for standard interrupts */
 896                if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
 897                        u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 898                                >> (NV_INT_PORT_SHIFT * i);
 899                        if (ata_tag_valid(ap->link.active_tag))
 900                                /** NV_INT_DEV indication seems unreliable
 901                                    at times at least in ADMA mode. Force it
 902                                    on always when a command is active, to
 903                                    prevent losing interrupts. */
 904                                irq_stat |= NV_INT_DEV;
 905                        handled += nv_host_intr(ap, irq_stat);
 906                }
 907
 908                notifier = readl(mmio + NV_ADMA_NOTIFIER);
 909                notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
 910                notifier_clears[i] = notifier | notifier_error;
 911
 912                gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
 913
 914                if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
 915                    !notifier_error)
 916                        /* Nothing to do */
 917                        continue;
 918
 919                status = readw(mmio + NV_ADMA_STAT);
 920
 921                /*
 922                 * Clear status. Ensure the controller sees the
 923                 * clearing before we start looking at any of the CPB
 924                 * statuses, so that any CPB completions after this
 925                 * point in the handler will raise another interrupt.
 926                 */
 927                writew(status, mmio + NV_ADMA_STAT);
 928                readw(mmio + NV_ADMA_STAT); /* flush posted write */
 929                rmb();
 930
 931                handled++; /* irq handled if we got here */
 932
 933                /* freeze if hotplugged or controller error */
 934                if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
 935                                       NV_ADMA_STAT_HOTUNPLUG |
 936                                       NV_ADMA_STAT_TIMEOUT |
 937                                       NV_ADMA_STAT_SERROR))) {
 938                        struct ata_eh_info *ehi = &ap->link.eh_info;
 939
 940                        ata_ehi_clear_desc(ehi);
 941                        __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
 942                        if (status & NV_ADMA_STAT_TIMEOUT) {
 943                                ehi->err_mask |= AC_ERR_SYSTEM;
 944                                ata_ehi_push_desc(ehi, "timeout");
 945                        } else if (status & NV_ADMA_STAT_HOTPLUG) {
 946                                ata_ehi_hotplugged(ehi);
 947                                ata_ehi_push_desc(ehi, "hotplug");
 948                        } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
 949                                ata_ehi_hotplugged(ehi);
 950                                ata_ehi_push_desc(ehi, "hot unplug");
 951                        } else if (status & NV_ADMA_STAT_SERROR) {
 952                                /* let EH analyze SError and figure out cause */
 953                                ata_ehi_push_desc(ehi, "SError");
 954                        } else
 955                                ata_ehi_push_desc(ehi, "unknown");
 956                        ata_port_freeze(ap);
 957                        continue;
 958                }
 959
 960                if (status & (NV_ADMA_STAT_DONE |
 961                              NV_ADMA_STAT_CPBERR |
 962                              NV_ADMA_STAT_CMD_COMPLETE)) {
 963                        u32 check_commands = notifier_clears[i];
 964                        u32 done_mask = 0;
 965                        int pos, rc;
 966
 967                        if (status & NV_ADMA_STAT_CPBERR) {
 968                                /* check all active commands */
 969                                if (ata_tag_valid(ap->link.active_tag))
 970                                        check_commands = 1 <<
 971                                                ap->link.active_tag;
 972                                else
 973                                        check_commands = ap->link.sactive;
 974                        }
 975
 976                        /* check CPBs for completed commands */
 977                        while ((pos = ffs(check_commands))) {
 978                                pos--;
 979                                rc = nv_adma_check_cpb(ap, pos,
 980                                                notifier_error & (1 << pos));
 981                                if (rc > 0)
 982                                        done_mask |= 1 << pos;
 983                                else if (unlikely(rc < 0))
 984                                        check_commands = 0;
 985                                check_commands &= ~(1 << pos);
 986                        }
 987                        ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
 988                }
 989        }
 990
 991        if (notifier_clears[0] || notifier_clears[1]) {
 992                /* Note: Both notifier clear registers must be written
 993                   if either is set, even if one is zero, according to NVIDIA. */
 994                struct nv_adma_port_priv *pp = host->ports[0]->private_data;
 995                writel(notifier_clears[0], pp->notifier_clear_block);
 996                pp = host->ports[1]->private_data;
 997                writel(notifier_clears[1], pp->notifier_clear_block);
 998        }
 999
1000        spin_unlock(&host->lock);
1001
1002        return IRQ_RETVAL(handled);
1003}
1004
1005static void nv_adma_freeze(struct ata_port *ap)
1006{
1007        struct nv_adma_port_priv *pp = ap->private_data;
1008        void __iomem *mmio = pp->ctl_block;
1009        u16 tmp;
1010
1011        nv_ck804_freeze(ap);
1012
1013        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1014                return;
1015
1016        /* clear any outstanding CK804 notifications */
1017        writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1018                ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1019
1020        /* Disable interrupt */
1021        tmp = readw(mmio + NV_ADMA_CTL);
1022        writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1023                mmio + NV_ADMA_CTL);
1024        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1025}
1026
1027static void nv_adma_thaw(struct ata_port *ap)
1028{
1029        struct nv_adma_port_priv *pp = ap->private_data;
1030        void __iomem *mmio = pp->ctl_block;
1031        u16 tmp;
1032
1033        nv_ck804_thaw(ap);
1034
1035        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1036                return;
1037
1038        /* Enable interrupt */
1039        tmp = readw(mmio + NV_ADMA_CTL);
1040        writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1041                mmio + NV_ADMA_CTL);
1042        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1043}
1044
1045static void nv_adma_irq_clear(struct ata_port *ap)
1046{
1047        struct nv_adma_port_priv *pp = ap->private_data;
1048        void __iomem *mmio = pp->ctl_block;
1049        u32 notifier_clears[2];
1050
1051        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1052                ata_bmdma_irq_clear(ap);
1053                return;
1054        }
1055
1056        /* clear any outstanding CK804 notifications */
1057        writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1058                ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1059
1060        /* clear ADMA status */
1061        writew(0xffff, mmio + NV_ADMA_STAT);
1062
1063        /* clear notifiers - note both ports need to be written with
1064           something even though we are only clearing on one */
1065        if (ap->port_no == 0) {
1066                notifier_clears[0] = 0xFFFFFFFF;
1067                notifier_clears[1] = 0;
1068        } else {
1069                notifier_clears[0] = 0;
1070                notifier_clears[1] = 0xFFFFFFFF;
1071        }
1072        pp = ap->host->ports[0]->private_data;
1073        writel(notifier_clears[0], pp->notifier_clear_block);
1074        pp = ap->host->ports[1]->private_data;
1075        writel(notifier_clears[1], pp->notifier_clear_block);
1076}
1077
1078static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1079{
1080        struct nv_adma_port_priv *pp = qc->ap->private_data;
1081
1082        if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1083                ata_bmdma_post_internal_cmd(qc);
1084}
1085
1086static int nv_adma_port_start(struct ata_port *ap)
1087{
1088        struct device *dev = ap->host->dev;
1089        struct nv_adma_port_priv *pp;
1090        int rc;
1091        void *mem;
1092        dma_addr_t mem_dma;
1093        void __iomem *mmio;
1094        struct pci_dev *pdev = to_pci_dev(dev);
1095        u16 tmp;
1096
1097        VPRINTK("ENTER\n");
1098
1099        /*
1100         * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1101         * pad buffers.
1102         */
1103        rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1104        if (rc)
1105                return rc;
1106
1107        /* we might fallback to bmdma, allocate bmdma resources */
1108        rc = ata_bmdma_port_start(ap);
1109        if (rc)
1110                return rc;
1111
1112        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1113        if (!pp)
1114                return -ENOMEM;
1115
1116        mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1117               ap->port_no * NV_ADMA_PORT_SIZE;
1118        pp->ctl_block = mmio;
1119        pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1120        pp->notifier_clear_block = pp->gen_block +
1121               NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1122
1123        /*
1124         * Now that the legacy PRD and padding buffer are allocated we can
1125         * raise the DMA mask to allocate the CPB/APRD table.
1126         */
1127        dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1128
1129        pp->adma_dma_mask = *dev->dma_mask;
1130
1131        mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1132                                  &mem_dma, GFP_KERNEL);
1133        if (!mem)
1134                return -ENOMEM;
1135
1136        /*
1137         * First item in chunk of DMA memory:
1138         * 128-byte command parameter block (CPB)
1139         * one for each command tag
1140         */
1141        pp->cpb     = mem;
1142        pp->cpb_dma = mem_dma;
1143
1144        writel(mem_dma & 0xFFFFFFFF,    mmio + NV_ADMA_CPB_BASE_LOW);
1145        writel((mem_dma >> 16) >> 16,   mmio + NV_ADMA_CPB_BASE_HIGH);
1146
1147        mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1148        mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1149
1150        /*
1151         * Second item: block of ADMA_SGTBL_LEN s/g entries
1152         */
1153        pp->aprd = mem;
1154        pp->aprd_dma = mem_dma;
1155
1156        ap->private_data = pp;
1157
1158        /* clear any outstanding interrupt conditions */
1159        writew(0xffff, mmio + NV_ADMA_STAT);
1160
1161        /* initialize port variables */
1162        pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1163
1164        /* clear CPB fetch count */
1165        writew(0, mmio + NV_ADMA_CPB_COUNT);
1166
1167        /* clear GO for register mode, enable interrupt */
1168        tmp = readw(mmio + NV_ADMA_CTL);
1169        writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1170                NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1171
1172        tmp = readw(mmio + NV_ADMA_CTL);
1173        writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1174        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1175        udelay(1);
1176        writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1177        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1178
1179        return 0;
1180}
1181
1182static void nv_adma_port_stop(struct ata_port *ap)
1183{
1184        struct nv_adma_port_priv *pp = ap->private_data;
1185        void __iomem *mmio = pp->ctl_block;
1186
1187        VPRINTK("ENTER\n");
1188        writew(0, mmio + NV_ADMA_CTL);
1189}
1190
1191#ifdef CONFIG_PM
1192static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1193{
1194        struct nv_adma_port_priv *pp = ap->private_data;
1195        void __iomem *mmio = pp->ctl_block;
1196
1197        /* Go to register mode - clears GO */
1198        nv_adma_register_mode(ap);
1199
1200        /* clear CPB fetch count */
1201        writew(0, mmio + NV_ADMA_CPB_COUNT);
1202
1203        /* disable interrupt, shut down port */
1204        writew(0, mmio + NV_ADMA_CTL);
1205
1206        return 0;
1207}
1208
1209static int nv_adma_port_resume(struct ata_port *ap)
1210{
1211        struct nv_adma_port_priv *pp = ap->private_data;
1212        void __iomem *mmio = pp->ctl_block;
1213        u16 tmp;
1214
1215        /* set CPB block location */
1216        writel(pp->cpb_dma & 0xFFFFFFFF,        mmio + NV_ADMA_CPB_BASE_LOW);
1217        writel((pp->cpb_dma >> 16) >> 16,       mmio + NV_ADMA_CPB_BASE_HIGH);
1218
1219        /* clear any outstanding interrupt conditions */
1220        writew(0xffff, mmio + NV_ADMA_STAT);
1221
1222        /* initialize port variables */
1223        pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1224
1225        /* clear CPB fetch count */
1226        writew(0, mmio + NV_ADMA_CPB_COUNT);
1227
1228        /* clear GO for register mode, enable interrupt */
1229        tmp = readw(mmio + NV_ADMA_CTL);
1230        writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1231                NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1232
1233        tmp = readw(mmio + NV_ADMA_CTL);
1234        writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1235        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1236        udelay(1);
1237        writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1238        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1239
1240        return 0;
1241}
1242#endif
1243
1244static void nv_adma_setup_port(struct ata_port *ap)
1245{
1246        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1247        struct ata_ioports *ioport = &ap->ioaddr;
1248
1249        VPRINTK("ENTER\n");
1250
1251        mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1252
1253        ioport->cmd_addr        = mmio;
1254        ioport->data_addr       = mmio + (ATA_REG_DATA * 4);
1255        ioport->error_addr      =
1256        ioport->feature_addr    = mmio + (ATA_REG_ERR * 4);
1257        ioport->nsect_addr      = mmio + (ATA_REG_NSECT * 4);
1258        ioport->lbal_addr       = mmio + (ATA_REG_LBAL * 4);
1259        ioport->lbam_addr       = mmio + (ATA_REG_LBAM * 4);
1260        ioport->lbah_addr       = mmio + (ATA_REG_LBAH * 4);
1261        ioport->device_addr     = mmio + (ATA_REG_DEVICE * 4);
1262        ioport->status_addr     =
1263        ioport->command_addr    = mmio + (ATA_REG_STATUS * 4);
1264        ioport->altstatus_addr  =
1265        ioport->ctl_addr        = mmio + 0x20;
1266}
1267
1268static int nv_adma_host_init(struct ata_host *host)
1269{
1270        struct pci_dev *pdev = to_pci_dev(host->dev);
1271        unsigned int i;
1272        u32 tmp32;
1273
1274        VPRINTK("ENTER\n");
1275
1276        /* enable ADMA on the ports */
1277        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1278        tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1279                 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1280                 NV_MCP_SATA_CFG_20_PORT1_EN |
1281                 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1282
1283        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1284
1285        for (i = 0; i < host->n_ports; i++)
1286                nv_adma_setup_port(host->ports[i]);
1287
1288        return 0;
1289}
1290
1291static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1292                              struct scatterlist *sg,
1293                              int idx,
1294                              struct nv_adma_prd *aprd)
1295{
1296        u8 flags = 0;
1297        if (qc->tf.flags & ATA_TFLAG_WRITE)
1298                flags |= NV_APRD_WRITE;
1299        if (idx == qc->n_elem - 1)
1300                flags |= NV_APRD_END;
1301        else if (idx != 4)
1302                flags |= NV_APRD_CONT;
1303
1304        aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1305        aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1306        aprd->flags = flags;
1307        aprd->packet_len = 0;
1308}
1309
1310static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1311{
1312        struct nv_adma_port_priv *pp = qc->ap->private_data;
1313        struct nv_adma_prd *aprd;
1314        struct scatterlist *sg;
1315        unsigned int si;
1316
1317        VPRINTK("ENTER\n");
1318
1319        for_each_sg(qc->sg, sg, qc->n_elem, si) {
1320                aprd = (si < 5) ? &cpb->aprd[si] :
1321                        &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1322                nv_adma_fill_aprd(qc, sg, si, aprd);
1323        }
1324        if (si > 5)
1325                cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1326        else
1327                cpb->next_aprd = cpu_to_le64(0);
1328}
1329
1330static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1331{
1332        struct nv_adma_port_priv *pp = qc->ap->private_data;
1333
1334        /* ADMA engine can only be used for non-ATAPI DMA commands,
1335           or interrupt-driven no-data commands. */
1336        if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1337           (qc->tf.flags & ATA_TFLAG_POLLING))
1338                return 1;
1339
1340        if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1341           (qc->tf.protocol == ATA_PROT_NODATA))
1342                return 0;
1343
1344        return 1;
1345}
1346
1347static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1348{
1349        struct nv_adma_port_priv *pp = qc->ap->private_data;
1350        struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1351        u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1352                       NV_CPB_CTL_IEN;
1353
1354        if (nv_adma_use_reg_mode(qc)) {
1355                BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1356                        (qc->flags & ATA_QCFLAG_DMAMAP));
1357                nv_adma_register_mode(qc->ap);
1358                ata_bmdma_qc_prep(qc);
1359                return AC_ERR_OK;
1360        }
1361
1362        cpb->resp_flags = NV_CPB_RESP_DONE;
1363        wmb();
1364        cpb->ctl_flags = 0;
1365        wmb();
1366
1367        cpb->len                = 3;
1368        cpb->tag                = qc->hw_tag;
1369        cpb->next_cpb_idx       = 0;
1370
1371        /* turn on NCQ flags for NCQ commands */
1372        if (qc->tf.protocol == ATA_PROT_NCQ)
1373                ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1374
1375        VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1376
1377        nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1378
1379        if (qc->flags & ATA_QCFLAG_DMAMAP) {
1380                nv_adma_fill_sg(qc, cpb);
1381                ctl_flags |= NV_CPB_CTL_APRD_VALID;
1382        } else
1383                memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1384
1385        /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1386           until we are finished filling in all of the contents */
1387        wmb();
1388        cpb->ctl_flags = ctl_flags;
1389        wmb();
1390        cpb->resp_flags = 0;
1391
1392        return AC_ERR_OK;
1393}
1394
1395static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1396{
1397        struct nv_adma_port_priv *pp = qc->ap->private_data;
1398        void __iomem *mmio = pp->ctl_block;
1399        int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1400
1401        VPRINTK("ENTER\n");
1402
1403        /* We can't handle result taskfile with NCQ commands, since
1404           retrieving the taskfile switches us out of ADMA mode and would abort
1405           existing commands. */
1406        if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1407                     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1408                ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1409                return AC_ERR_SYSTEM;
1410        }
1411
1412        if (nv_adma_use_reg_mode(qc)) {
1413                /* use ATA register mode */
1414                VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1415                BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1416                        (qc->flags & ATA_QCFLAG_DMAMAP));
1417                nv_adma_register_mode(qc->ap);
1418                return ata_bmdma_qc_issue(qc);
1419        } else
1420                nv_adma_mode(qc->ap);
1421
1422        /* write append register, command tag in lower 8 bits
1423           and (number of cpbs to append -1) in top 8 bits */
1424        wmb();
1425
1426        if (curr_ncq != pp->last_issue_ncq) {
1427                /* Seems to need some delay before switching between NCQ and
1428                   non-NCQ commands, else we get command timeouts and such. */
1429                udelay(20);
1430                pp->last_issue_ncq = curr_ncq;
1431        }
1432
1433        writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1434
1435        DPRINTK("Issued tag %u\n", qc->hw_tag);
1436
1437        return 0;
1438}
1439
1440static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1441{
1442        struct ata_host *host = dev_instance;
1443        unsigned int i;
1444        unsigned int handled = 0;
1445        unsigned long flags;
1446
1447        spin_lock_irqsave(&host->lock, flags);
1448
1449        for (i = 0; i < host->n_ports; i++) {
1450                struct ata_port *ap = host->ports[i];
1451                struct ata_queued_cmd *qc;
1452
1453                qc = ata_qc_from_tag(ap, ap->link.active_tag);
1454                if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1455                        handled += ata_bmdma_port_intr(ap, qc);
1456                } else {
1457                        /*
1458                         * No request pending?  Clear interrupt status
1459                         * anyway, in case there's one pending.
1460                         */
1461                        ap->ops->sff_check_status(ap);
1462                }
1463        }
1464
1465        spin_unlock_irqrestore(&host->lock, flags);
1466
1467        return IRQ_RETVAL(handled);
1468}
1469
1470static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1471{
1472        int i, handled = 0;
1473
1474        for (i = 0; i < host->n_ports; i++) {
1475                handled += nv_host_intr(host->ports[i], irq_stat);
1476                irq_stat >>= NV_INT_PORT_SHIFT;
1477        }
1478
1479        return IRQ_RETVAL(handled);
1480}
1481
1482static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1483{
1484        struct ata_host *host = dev_instance;
1485        u8 irq_stat;
1486        irqreturn_t ret;
1487
1488        spin_lock(&host->lock);
1489        irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1490        ret = nv_do_interrupt(host, irq_stat);
1491        spin_unlock(&host->lock);
1492
1493        return ret;
1494}
1495
1496static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1497{
1498        struct ata_host *host = dev_instance;
1499        u8 irq_stat;
1500        irqreturn_t ret;
1501
1502        spin_lock(&host->lock);
1503        irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1504        ret = nv_do_interrupt(host, irq_stat);
1505        spin_unlock(&host->lock);
1506
1507        return ret;
1508}
1509
1510static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1511{
1512        if (sc_reg > SCR_CONTROL)
1513                return -EINVAL;
1514
1515        *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1516        return 0;
1517}
1518
1519static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1520{
1521        if (sc_reg > SCR_CONTROL)
1522                return -EINVAL;
1523
1524        iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1525        return 0;
1526}
1527
1528static int nv_hardreset(struct ata_link *link, unsigned int *class,
1529                        unsigned long deadline)
1530{
1531        struct ata_eh_context *ehc = &link->eh_context;
1532
1533        /* Do hardreset iff it's post-boot probing, please read the
1534         * comment above port ops for details.
1535         */
1536        if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1537            !ata_dev_enabled(link->device))
1538                sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1539                                    NULL, NULL);
1540        else {
1541                const unsigned long *timing = sata_ehc_deb_timing(ehc);
1542                int rc;
1543
1544                if (!(ehc->i.flags & ATA_EHI_QUIET))
1545                        ata_link_info(link,
1546                                      "nv: skipping hardreset on occupied port\n");
1547
1548                /* make sure the link is online */
1549                rc = sata_link_resume(link, timing, deadline);
1550                /* whine about phy resume failure but proceed */
1551                if (rc && rc != -EOPNOTSUPP)
1552                        ata_link_warn(link, "failed to resume link (errno=%d)\n",
1553                                      rc);
1554        }
1555
1556        /* device signature acquisition is unreliable */
1557        return -EAGAIN;
1558}
1559
1560static void nv_nf2_freeze(struct ata_port *ap)
1561{
1562        void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1563        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1564        u8 mask;
1565
1566        mask = ioread8(scr_addr + NV_INT_ENABLE);
1567        mask &= ~(NV_INT_ALL << shift);
1568        iowrite8(mask, scr_addr + NV_INT_ENABLE);
1569}
1570
1571static void nv_nf2_thaw(struct ata_port *ap)
1572{
1573        void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1574        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1575        u8 mask;
1576
1577        iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1578
1579        mask = ioread8(scr_addr + NV_INT_ENABLE);
1580        mask |= (NV_INT_MASK << shift);
1581        iowrite8(mask, scr_addr + NV_INT_ENABLE);
1582}
1583
1584static void nv_ck804_freeze(struct ata_port *ap)
1585{
1586        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1587        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1588        u8 mask;
1589
1590        mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1591        mask &= ~(NV_INT_ALL << shift);
1592        writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1593}
1594
1595static void nv_ck804_thaw(struct ata_port *ap)
1596{
1597        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1598        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1599        u8 mask;
1600
1601        writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1602
1603        mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1604        mask |= (NV_INT_MASK << shift);
1605        writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1606}
1607
1608static void nv_mcp55_freeze(struct ata_port *ap)
1609{
1610        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1611        int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1612        u32 mask;
1613
1614        writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1615
1616        mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1617        mask &= ~(NV_INT_ALL_MCP55 << shift);
1618        writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1619}
1620
1621static void nv_mcp55_thaw(struct ata_port *ap)
1622{
1623        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1624        int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1625        u32 mask;
1626
1627        writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1628
1629        mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1630        mask |= (NV_INT_MASK_MCP55 << shift);
1631        writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1632}
1633
1634static void nv_adma_error_handler(struct ata_port *ap)
1635{
1636        struct nv_adma_port_priv *pp = ap->private_data;
1637        if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1638                void __iomem *mmio = pp->ctl_block;
1639                int i;
1640                u16 tmp;
1641
1642                if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1643                        u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1644                        u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1645                        u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1646                        u32 status = readw(mmio + NV_ADMA_STAT);
1647                        u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1648                        u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1649
1650                        ata_port_err(ap,
1651                                "EH in ADMA mode, notifier 0x%X "
1652                                "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1653                                "next cpb count 0x%X next cpb idx 0x%x\n",
1654                                notifier, notifier_error, gen_ctl, status,
1655                                cpb_count, next_cpb_idx);
1656
1657                        for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1658                                struct nv_adma_cpb *cpb = &pp->cpb[i];
1659                                if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1660                                    ap->link.sactive & (1 << i))
1661                                        ata_port_err(ap,
1662                                                "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1663                                                i, cpb->ctl_flags, cpb->resp_flags);
1664                        }
1665                }
1666
1667                /* Push us back into port register mode for error handling. */
1668                nv_adma_register_mode(ap);
1669
1670                /* Mark all of the CPBs as invalid to prevent them from
1671                   being executed */
1672                for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1673                        pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1674
1675                /* clear CPB fetch count */
1676                writew(0, mmio + NV_ADMA_CPB_COUNT);
1677
1678                /* Reset channel */
1679                tmp = readw(mmio + NV_ADMA_CTL);
1680                writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1681                readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1682                udelay(1);
1683                writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1684                readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1685        }
1686
1687        ata_bmdma_error_handler(ap);
1688}
1689
1690static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1691{
1692        struct nv_swncq_port_priv *pp = ap->private_data;
1693        struct defer_queue *dq = &pp->defer_queue;
1694
1695        /* queue is full */
1696        WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1697        dq->defer_bits |= (1 << qc->hw_tag);
1698        dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1699}
1700
1701static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1702{
1703        struct nv_swncq_port_priv *pp = ap->private_data;
1704        struct defer_queue *dq = &pp->defer_queue;
1705        unsigned int tag;
1706
1707        if (dq->head == dq->tail)       /* null queue */
1708                return NULL;
1709
1710        tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1711        dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1712        WARN_ON(!(dq->defer_bits & (1 << tag)));
1713        dq->defer_bits &= ~(1 << tag);
1714
1715        return ata_qc_from_tag(ap, tag);
1716}
1717
1718static void nv_swncq_fis_reinit(struct ata_port *ap)
1719{
1720        struct nv_swncq_port_priv *pp = ap->private_data;
1721
1722        pp->dhfis_bits = 0;
1723        pp->dmafis_bits = 0;
1724        pp->sdbfis_bits = 0;
1725        pp->ncq_flags = 0;
1726}
1727
1728static void nv_swncq_pp_reinit(struct ata_port *ap)
1729{
1730        struct nv_swncq_port_priv *pp = ap->private_data;
1731        struct defer_queue *dq = &pp->defer_queue;
1732
1733        dq->head = 0;
1734        dq->tail = 0;
1735        dq->defer_bits = 0;
1736        pp->qc_active = 0;
1737        pp->last_issue_tag = ATA_TAG_POISON;
1738        nv_swncq_fis_reinit(ap);
1739}
1740
1741static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1742{
1743        struct nv_swncq_port_priv *pp = ap->private_data;
1744
1745        writew(fis, pp->irq_block);
1746}
1747
1748static void __ata_bmdma_stop(struct ata_port *ap)
1749{
1750        struct ata_queued_cmd qc;
1751
1752        qc.ap = ap;
1753        ata_bmdma_stop(&qc);
1754}
1755
1756static void nv_swncq_ncq_stop(struct ata_port *ap)
1757{
1758        struct nv_swncq_port_priv *pp = ap->private_data;
1759        unsigned int i;
1760        u32 sactive;
1761        u32 done_mask;
1762
1763        ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1764                     ap->qc_active, ap->link.sactive);
1765        ata_port_err(ap,
1766                "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1767                "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1768                pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1769                pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1770
1771        ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1772                     ap->ops->sff_check_status(ap),
1773                     ioread8(ap->ioaddr.error_addr));
1774
1775        sactive = readl(pp->sactive_block);
1776        done_mask = pp->qc_active ^ sactive;
1777
1778        ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1779        for (i = 0; i < ATA_MAX_QUEUE; i++) {
1780                u8 err = 0;
1781                if (pp->qc_active & (1 << i))
1782                        err = 0;
1783                else if (done_mask & (1 << i))
1784                        err = 1;
1785                else
1786                        continue;
1787
1788                ata_port_err(ap,
1789                             "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1790                             (pp->dhfis_bits >> i) & 0x1,
1791                             (pp->dmafis_bits >> i) & 0x1,
1792                             (pp->sdbfis_bits >> i) & 0x1,
1793                             (sactive >> i) & 0x1,
1794                             (err ? "error! tag doesn't exit" : " "));
1795        }
1796
1797        nv_swncq_pp_reinit(ap);
1798        ap->ops->sff_irq_clear(ap);
1799        __ata_bmdma_stop(ap);
1800        nv_swncq_irq_clear(ap, 0xffff);
1801}
1802
1803static void nv_swncq_error_handler(struct ata_port *ap)
1804{
1805        struct ata_eh_context *ehc = &ap->link.eh_context;
1806
1807        if (ap->link.sactive) {
1808                nv_swncq_ncq_stop(ap);
1809                ehc->i.action |= ATA_EH_RESET;
1810        }
1811
1812        ata_bmdma_error_handler(ap);
1813}
1814
1815#ifdef CONFIG_PM
1816static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1817{
1818        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1819        u32 tmp;
1820
1821        /* clear irq */
1822        writel(~0, mmio + NV_INT_STATUS_MCP55);
1823
1824        /* disable irq */
1825        writel(0, mmio + NV_INT_ENABLE_MCP55);
1826
1827        /* disable swncq */
1828        tmp = readl(mmio + NV_CTL_MCP55);
1829        tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1830        writel(tmp, mmio + NV_CTL_MCP55);
1831
1832        return 0;
1833}
1834
1835static int nv_swncq_port_resume(struct ata_port *ap)
1836{
1837        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1838        u32 tmp;
1839
1840        /* clear irq */
1841        writel(~0, mmio + NV_INT_STATUS_MCP55);
1842
1843        /* enable irq */
1844        writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1845
1846        /* enable swncq */
1847        tmp = readl(mmio + NV_CTL_MCP55);
1848        writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1849
1850        return 0;
1851}
1852#endif
1853
1854static void nv_swncq_host_init(struct ata_host *host)
1855{
1856        u32 tmp;
1857        void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1858        struct pci_dev *pdev = to_pci_dev(host->dev);
1859        u8 regval;
1860
1861        /* disable  ECO 398 */
1862        pci_read_config_byte(pdev, 0x7f, &regval);
1863        regval &= ~(1 << 7);
1864        pci_write_config_byte(pdev, 0x7f, regval);
1865
1866        /* enable swncq */
1867        tmp = readl(mmio + NV_CTL_MCP55);
1868        VPRINTK("HOST_CTL:0x%X\n", tmp);
1869        writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1870
1871        /* enable irq intr */
1872        tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1873        VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1874        writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1875
1876        /*  clear port irq */
1877        writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1878}
1879
1880static int nv_swncq_slave_config(struct scsi_device *sdev)
1881{
1882        struct ata_port *ap = ata_shost_to_port(sdev->host);
1883        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1884        struct ata_device *dev;
1885        int rc;
1886        u8 rev;
1887        u8 check_maxtor = 0;
1888        unsigned char model_num[ATA_ID_PROD_LEN + 1];
1889
1890        rc = ata_scsi_slave_config(sdev);
1891        if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1892                /* Not a proper libata device, ignore */
1893                return rc;
1894
1895        dev = &ap->link.device[sdev->id];
1896        if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1897                return rc;
1898
1899        /* if MCP51 and Maxtor, then disable ncq */
1900        if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1901                pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1902                check_maxtor = 1;
1903
1904        /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1905        if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1906                pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1907                pci_read_config_byte(pdev, 0x8, &rev);
1908                if (rev <= 0xa2)
1909                        check_maxtor = 1;
1910        }
1911
1912        if (!check_maxtor)
1913                return rc;
1914
1915        ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1916
1917        if (strncmp(model_num, "Maxtor", 6) == 0) {
1918                ata_scsi_change_queue_depth(sdev, 1);
1919                ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1920                               sdev->queue_depth);
1921        }
1922
1923        return rc;
1924}
1925
1926static int nv_swncq_port_start(struct ata_port *ap)
1927{
1928        struct device *dev = ap->host->dev;
1929        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1930        struct nv_swncq_port_priv *pp;
1931        int rc;
1932
1933        /* we might fallback to bmdma, allocate bmdma resources */
1934        rc = ata_bmdma_port_start(ap);
1935        if (rc)
1936                return rc;
1937
1938        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1939        if (!pp)
1940                return -ENOMEM;
1941
1942        pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1943                                      &pp->prd_dma, GFP_KERNEL);
1944        if (!pp->prd)
1945                return -ENOMEM;
1946
1947        ap->private_data = pp;
1948        pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1949        pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1950        pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1951
1952        return 0;
1953}
1954
1955static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1956{
1957        if (qc->tf.protocol != ATA_PROT_NCQ) {
1958                ata_bmdma_qc_prep(qc);
1959                return AC_ERR_OK;
1960        }
1961
1962        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1963                return AC_ERR_OK;
1964
1965        nv_swncq_fill_sg(qc);
1966
1967        return AC_ERR_OK;
1968}
1969
1970static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1971{
1972        struct ata_port *ap = qc->ap;
1973        struct scatterlist *sg;
1974        struct nv_swncq_port_priv *pp = ap->private_data;
1975        struct ata_bmdma_prd *prd;
1976        unsigned int si, idx;
1977
1978        prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
1979
1980        idx = 0;
1981        for_each_sg(qc->sg, sg, qc->n_elem, si) {
1982                u32 addr, offset;
1983                u32 sg_len, len;
1984
1985                addr = (u32)sg_dma_address(sg);
1986                sg_len = sg_dma_len(sg);
1987
1988                while (sg_len) {
1989                        offset = addr & 0xffff;
1990                        len = sg_len;
1991                        if ((offset + sg_len) > 0x10000)
1992                                len = 0x10000 - offset;
1993
1994                        prd[idx].addr = cpu_to_le32(addr);
1995                        prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1996
1997                        idx++;
1998                        sg_len -= len;
1999                        addr += len;
2000                }
2001        }
2002
2003        prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2004}
2005
2006static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2007                                          struct ata_queued_cmd *qc)
2008{
2009        struct nv_swncq_port_priv *pp = ap->private_data;
2010
2011        if (qc == NULL)
2012                return 0;
2013
2014        DPRINTK("Enter\n");
2015
2016        writel((1 << qc->hw_tag), pp->sactive_block);
2017        pp->last_issue_tag = qc->hw_tag;
2018        pp->dhfis_bits &= ~(1 << qc->hw_tag);
2019        pp->dmafis_bits &= ~(1 << qc->hw_tag);
2020        pp->qc_active |= (0x1 << qc->hw_tag);
2021
2022        ap->ops->sff_tf_load(ap, &qc->tf);       /* load tf registers */
2023        ap->ops->sff_exec_command(ap, &qc->tf);
2024
2025        DPRINTK("Issued tag %u\n", qc->hw_tag);
2026
2027        return 0;
2028}
2029
2030static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2031{
2032        struct ata_port *ap = qc->ap;
2033        struct nv_swncq_port_priv *pp = ap->private_data;
2034
2035        if (qc->tf.protocol != ATA_PROT_NCQ)
2036                return ata_bmdma_qc_issue(qc);
2037
2038        DPRINTK("Enter\n");
2039
2040        if (!pp->qc_active)
2041                nv_swncq_issue_atacmd(ap, qc);
2042        else
2043                nv_swncq_qc_to_dq(ap, qc);      /* add qc to defer queue */
2044
2045        return 0;
2046}
2047
2048static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2049{
2050        u32 serror;
2051        struct ata_eh_info *ehi = &ap->link.eh_info;
2052
2053        ata_ehi_clear_desc(ehi);
2054
2055        /* AHCI needs SError cleared; otherwise, it might lock up */
2056        sata_scr_read(&ap->link, SCR_ERROR, &serror);
2057        sata_scr_write(&ap->link, SCR_ERROR, serror);
2058
2059        /* analyze @irq_stat */
2060        if (fis & NV_SWNCQ_IRQ_ADDED)
2061                ata_ehi_push_desc(ehi, "hot plug");
2062        else if (fis & NV_SWNCQ_IRQ_REMOVED)
2063                ata_ehi_push_desc(ehi, "hot unplug");
2064
2065        ata_ehi_hotplugged(ehi);
2066
2067        /* okay, let's hand over to EH */
2068        ehi->serror |= serror;
2069
2070        ata_port_freeze(ap);
2071}
2072
2073static int nv_swncq_sdbfis(struct ata_port *ap)
2074{
2075        struct ata_queued_cmd *qc;
2076        struct nv_swncq_port_priv *pp = ap->private_data;
2077        struct ata_eh_info *ehi = &ap->link.eh_info;
2078        u32 sactive;
2079        u32 done_mask;
2080        u8 host_stat;
2081        u8 lack_dhfis = 0;
2082
2083        host_stat = ap->ops->bmdma_status(ap);
2084        if (unlikely(host_stat & ATA_DMA_ERR)) {
2085                /* error when transferring data to/from memory */
2086                ata_ehi_clear_desc(ehi);
2087                ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2088                ehi->err_mask |= AC_ERR_HOST_BUS;
2089                ehi->action |= ATA_EH_RESET;
2090                return -EINVAL;
2091        }
2092
2093        ap->ops->sff_irq_clear(ap);
2094        __ata_bmdma_stop(ap);
2095
2096        sactive = readl(pp->sactive_block);
2097        done_mask = pp->qc_active ^ sactive;
2098
2099        pp->qc_active &= ~done_mask;
2100        pp->dhfis_bits &= ~done_mask;
2101        pp->dmafis_bits &= ~done_mask;
2102        pp->sdbfis_bits |= done_mask;
2103        ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2104
2105        if (!ap->qc_active) {
2106                DPRINTK("over\n");
2107                nv_swncq_pp_reinit(ap);
2108                return 0;
2109        }
2110
2111        if (pp->qc_active & pp->dhfis_bits)
2112                return 0;
2113
2114        if ((pp->ncq_flags & ncq_saw_backout) ||
2115            (pp->qc_active ^ pp->dhfis_bits))
2116                /* if the controller can't get a device to host register FIS,
2117                 * The driver needs to reissue the new command.
2118                 */
2119                lack_dhfis = 1;
2120
2121        DPRINTK("id 0x%x QC: qc_active 0x%x,"
2122                "SWNCQ:qc_active 0x%X defer_bits %X "
2123                "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2124                ap->print_id, ap->qc_active, pp->qc_active,
2125                pp->defer_queue.defer_bits, pp->dhfis_bits,
2126                pp->dmafis_bits, pp->last_issue_tag);
2127
2128        nv_swncq_fis_reinit(ap);
2129
2130        if (lack_dhfis) {
2131                qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2132                nv_swncq_issue_atacmd(ap, qc);
2133                return 0;
2134        }
2135
2136        if (pp->defer_queue.defer_bits) {
2137                /* send deferral queue command */
2138                qc = nv_swncq_qc_from_dq(ap);
2139                WARN_ON(qc == NULL);
2140                nv_swncq_issue_atacmd(ap, qc);
2141        }
2142
2143        return 0;
2144}
2145
2146static inline u32 nv_swncq_tag(struct ata_port *ap)
2147{
2148        struct nv_swncq_port_priv *pp = ap->private_data;
2149        u32 tag;
2150
2151        tag = readb(pp->tag_block) >> 2;
2152        return (tag & 0x1f);
2153}
2154
2155static void nv_swncq_dmafis(struct ata_port *ap)
2156{
2157        struct ata_queued_cmd *qc;
2158        unsigned int rw;
2159        u8 dmactl;
2160        u32 tag;
2161        struct nv_swncq_port_priv *pp = ap->private_data;
2162
2163        __ata_bmdma_stop(ap);
2164        tag = nv_swncq_tag(ap);
2165
2166        DPRINTK("dma setup tag 0x%x\n", tag);
2167        qc = ata_qc_from_tag(ap, tag);
2168
2169        if (unlikely(!qc))
2170                return;
2171
2172        rw = qc->tf.flags & ATA_TFLAG_WRITE;
2173
2174        /* load PRD table addr. */
2175        iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2176                  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2177
2178        /* specify data direction, triple-check start bit is clear */
2179        dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2180        dmactl &= ~ATA_DMA_WR;
2181        if (!rw)
2182                dmactl |= ATA_DMA_WR;
2183
2184        iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2185}
2186
2187static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2188{
2189        struct nv_swncq_port_priv *pp = ap->private_data;
2190        struct ata_queued_cmd *qc;
2191        struct ata_eh_info *ehi = &ap->link.eh_info;
2192        u32 serror;
2193        u8 ata_stat;
2194
2195        ata_stat = ap->ops->sff_check_status(ap);
2196        nv_swncq_irq_clear(ap, fis);
2197        if (!fis)
2198                return;
2199
2200        if (ap->pflags & ATA_PFLAG_FROZEN)
2201                return;
2202
2203        if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2204                nv_swncq_hotplug(ap, fis);
2205                return;
2206        }
2207
2208        if (!pp->qc_active)
2209                return;
2210
2211        if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2212                return;
2213        ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2214
2215        if (ata_stat & ATA_ERR) {
2216                ata_ehi_clear_desc(ehi);
2217                ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2218                ehi->err_mask |= AC_ERR_DEV;
2219                ehi->serror |= serror;
2220                ehi->action |= ATA_EH_RESET;
2221                ata_port_freeze(ap);
2222                return;
2223        }
2224
2225        if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2226                /* If the IRQ is backout, driver must issue
2227                 * the new command again some time later.
2228                 */
2229                pp->ncq_flags |= ncq_saw_backout;
2230        }
2231
2232        if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2233                pp->ncq_flags |= ncq_saw_sdb;
2234                DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2235                        "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2236                        ap->print_id, pp->qc_active, pp->dhfis_bits,
2237                        pp->dmafis_bits, readl(pp->sactive_block));
2238                if (nv_swncq_sdbfis(ap) < 0)
2239                        goto irq_error;
2240        }
2241
2242        if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2243                /* The interrupt indicates the new command
2244                 * was transmitted correctly to the drive.
2245                 */
2246                pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2247                pp->ncq_flags |= ncq_saw_d2h;
2248                if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2249                        ata_ehi_push_desc(ehi, "illegal fis transaction");
2250                        ehi->err_mask |= AC_ERR_HSM;
2251                        ehi->action |= ATA_EH_RESET;
2252                        goto irq_error;
2253                }
2254
2255                if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2256                    !(pp->ncq_flags & ncq_saw_dmas)) {
2257                        ata_stat = ap->ops->sff_check_status(ap);
2258                        if (ata_stat & ATA_BUSY)
2259                                goto irq_exit;
2260
2261                        if (pp->defer_queue.defer_bits) {
2262                                DPRINTK("send next command\n");
2263                                qc = nv_swncq_qc_from_dq(ap);
2264                                nv_swncq_issue_atacmd(ap, qc);
2265                        }
2266                }
2267        }
2268
2269        if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2270                /* program the dma controller with appropriate PRD buffers
2271                 * and start the DMA transfer for requested command.
2272                 */
2273                pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2274                pp->ncq_flags |= ncq_saw_dmas;
2275                nv_swncq_dmafis(ap);
2276        }
2277
2278irq_exit:
2279        return;
2280irq_error:
2281        ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2282        ata_port_freeze(ap);
2283        return;
2284}
2285
2286static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2287{
2288        struct ata_host *host = dev_instance;
2289        unsigned int i;
2290        unsigned int handled = 0;
2291        unsigned long flags;
2292        u32 irq_stat;
2293
2294        spin_lock_irqsave(&host->lock, flags);
2295
2296        irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2297
2298        for (i = 0; i < host->n_ports; i++) {
2299                struct ata_port *ap = host->ports[i];
2300
2301                if (ap->link.sactive) {
2302                        nv_swncq_host_interrupt(ap, (u16)irq_stat);
2303                        handled = 1;
2304                } else {
2305                        if (irq_stat)   /* reserve Hotplug */
2306                                nv_swncq_irq_clear(ap, 0xfff0);
2307
2308                        handled += nv_host_intr(ap, (u8)irq_stat);
2309                }
2310                irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2311        }
2312
2313        spin_unlock_irqrestore(&host->lock, flags);
2314
2315        return IRQ_RETVAL(handled);
2316}
2317
2318static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2319{
2320        const struct ata_port_info *ppi[] = { NULL, NULL };
2321        struct nv_pi_priv *ipriv;
2322        struct ata_host *host;
2323        struct nv_host_priv *hpriv;
2324        int rc;
2325        u32 bar;
2326        void __iomem *base;
2327        unsigned long type = ent->driver_data;
2328
2329        // Make sure this is a SATA controller by counting the number of bars
2330        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2331        // it's an IDE controller and we ignore it.
2332        for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
2333                if (pci_resource_start(pdev, bar) == 0)
2334                        return -ENODEV;
2335
2336        ata_print_version_once(&pdev->dev, DRV_VERSION);
2337
2338        rc = pcim_enable_device(pdev);
2339        if (rc)
2340                return rc;
2341
2342        /* determine type and allocate host */
2343        if (type == CK804 && adma_enabled) {
2344                dev_notice(&pdev->dev, "Using ADMA mode\n");
2345                type = ADMA;
2346        } else if (type == MCP5x && swncq_enabled) {
2347                dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2348                type = SWNCQ;
2349        }
2350
2351        ppi[0] = &nv_port_info[type];
2352        ipriv = ppi[0]->private_data;
2353        rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2354        if (rc)
2355                return rc;
2356
2357        hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2358        if (!hpriv)
2359                return -ENOMEM;
2360        hpriv->type = type;
2361        host->private_data = hpriv;
2362
2363        /* request and iomap NV_MMIO_BAR */
2364        rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2365        if (rc)
2366                return rc;
2367
2368        /* configure SCR access */
2369        base = host->iomap[NV_MMIO_BAR];
2370        host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2371        host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2372
2373        /* enable SATA space for CK804 */
2374        if (type >= CK804) {
2375                u8 regval;
2376
2377                pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2378                regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2379                pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2380        }
2381
2382        /* init ADMA */
2383        if (type == ADMA) {
2384                rc = nv_adma_host_init(host);
2385                if (rc)
2386                        return rc;
2387        } else if (type == SWNCQ)
2388                nv_swncq_host_init(host);
2389
2390        if (msi_enabled) {
2391                dev_notice(&pdev->dev, "Using MSI\n");
2392                pci_enable_msi(pdev);
2393        }
2394
2395        pci_set_master(pdev);
2396        return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2397}
2398
2399#ifdef CONFIG_PM_SLEEP
2400static int nv_pci_device_resume(struct pci_dev *pdev)
2401{
2402        struct ata_host *host = pci_get_drvdata(pdev);
2403        struct nv_host_priv *hpriv = host->private_data;
2404        int rc;
2405
2406        rc = ata_pci_device_do_resume(pdev);
2407        if (rc)
2408                return rc;
2409
2410        if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2411                if (hpriv->type >= CK804) {
2412                        u8 regval;
2413
2414                        pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2415                        regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2416                        pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2417                }
2418                if (hpriv->type == ADMA) {
2419                        u32 tmp32;
2420                        struct nv_adma_port_priv *pp;
2421                        /* enable/disable ADMA on the ports appropriately */
2422                        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2423
2424                        pp = host->ports[0]->private_data;
2425                        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2426                                tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2427                                           NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2428                        else
2429                                tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2430                                           NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2431                        pp = host->ports[1]->private_data;
2432                        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2433                                tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2434                                           NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2435                        else
2436                                tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2437                                           NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2438
2439                        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2440                }
2441        }
2442
2443        ata_host_resume(host);
2444
2445        return 0;
2446}
2447#endif
2448
2449static void nv_ck804_host_stop(struct ata_host *host)
2450{
2451        struct pci_dev *pdev = to_pci_dev(host->dev);
2452        u8 regval;
2453
2454        /* disable SATA space for CK804 */
2455        pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2456        regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2457        pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2458}
2459
2460static void nv_adma_host_stop(struct ata_host *host)
2461{
2462        struct pci_dev *pdev = to_pci_dev(host->dev);
2463        u32 tmp32;
2464
2465        /* disable ADMA on the ports */
2466        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2467        tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2468                   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2469                   NV_MCP_SATA_CFG_20_PORT1_EN |
2470                   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2471
2472        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2473
2474        nv_ck804_host_stop(host);
2475}
2476
2477module_pci_driver(nv_pci_driver);
2478
2479module_param_named(adma, adma_enabled, bool, 0444);
2480MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2481module_param_named(swncq, swncq_enabled, bool, 0444);
2482MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2483module_param_named(msi, msi_enabled, bool, 0444);
2484MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2485