linux/drivers/ata/sata_nv.c
<<
>>
Prefs
   1/*
   2 *  sata_nv.c - NVIDIA nForce SATA
   3 *
   4 *  Copyright 2004 NVIDIA Corp.  All rights reserved.
   5 *  Copyright 2004 Andrew Chew
   6 *
   7 *
   8 *  This program is free software; you can redistribute it and/or modify
   9 *  it under the terms of the GNU General Public License as published by
  10 *  the Free Software Foundation; either version 2, or (at your option)
  11 *  any later version.
  12 *
  13 *  This program is distributed in the hope that it will be useful,
  14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 *  GNU General Public License for more details.
  17 *
  18 *  You should have received a copy of the GNU General Public License
  19 *  along with this program; see the file COPYING.  If not, write to
  20 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  21 *
  22 *
  23 *  libata documentation is available via 'make {ps|pdf}docs',
  24 *  as Documentation/driver-api/libata.rst
  25 *
  26 *  No hardware documentation available outside of NVIDIA.
  27 *  This driver programs the NVIDIA SATA controller in a similar
  28 *  fashion as with other PCI IDE BMDMA controllers, with a few
  29 *  NV-specific details such as register offsets, SATA phy location,
  30 *  hotplug info, etc.
  31 *
  32 *  CK804/MCP04 controllers support an alternate programming interface
  33 *  similar to the ADMA specification (with some modifications).
  34 *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
  35 *  sent through the legacy interface.
  36 *
  37 */
  38
  39#include <linux/kernel.h>
  40#include <linux/module.h>
  41#include <linux/gfp.h>
  42#include <linux/pci.h>
  43#include <linux/blkdev.h>
  44#include <linux/delay.h>
  45#include <linux/interrupt.h>
  46#include <linux/device.h>
  47#include <scsi/scsi_host.h>
  48#include <scsi/scsi_device.h>
  49#include <linux/libata.h>
  50
  51#define DRV_NAME                        "sata_nv"
  52#define DRV_VERSION                     "3.5"
  53
  54#define NV_ADMA_DMA_BOUNDARY            0xffffffffUL
  55
  56enum {
  57        NV_MMIO_BAR                     = 5,
  58
  59        NV_PORTS                        = 2,
  60        NV_PIO_MASK                     = ATA_PIO4,
  61        NV_MWDMA_MASK                   = ATA_MWDMA2,
  62        NV_UDMA_MASK                    = ATA_UDMA6,
  63        NV_PORT0_SCR_REG_OFFSET         = 0x00,
  64        NV_PORT1_SCR_REG_OFFSET         = 0x40,
  65
  66        /* INT_STATUS/ENABLE */
  67        NV_INT_STATUS                   = 0x10,
  68        NV_INT_ENABLE                   = 0x11,
  69        NV_INT_STATUS_CK804             = 0x440,
  70        NV_INT_ENABLE_CK804             = 0x441,
  71
  72        /* INT_STATUS/ENABLE bits */
  73        NV_INT_DEV                      = 0x01,
  74        NV_INT_PM                       = 0x02,
  75        NV_INT_ADDED                    = 0x04,
  76        NV_INT_REMOVED                  = 0x08,
  77
  78        NV_INT_PORT_SHIFT               = 4,    /* each port occupies 4 bits */
  79
  80        NV_INT_ALL                      = 0x0f,
  81        NV_INT_MASK                     = NV_INT_DEV |
  82                                          NV_INT_ADDED | NV_INT_REMOVED,
  83
  84        /* INT_CONFIG */
  85        NV_INT_CONFIG                   = 0x12,
  86        NV_INT_CONFIG_METHD             = 0x01, // 0 = INT, 1 = SMI
  87
  88        // For PCI config register 20
  89        NV_MCP_SATA_CFG_20              = 0x50,
  90        NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
  91        NV_MCP_SATA_CFG_20_PORT0_EN     = (1 << 17),
  92        NV_MCP_SATA_CFG_20_PORT1_EN     = (1 << 16),
  93        NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
  94        NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
  95
  96        NV_ADMA_MAX_CPBS                = 32,
  97        NV_ADMA_CPB_SZ                  = 128,
  98        NV_ADMA_APRD_SZ                 = 16,
  99        NV_ADMA_SGTBL_LEN               = (1024 - NV_ADMA_CPB_SZ) /
 100                                           NV_ADMA_APRD_SZ,
 101        NV_ADMA_SGTBL_TOTAL_LEN         = NV_ADMA_SGTBL_LEN + 5,
 102        NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
 103        NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
 104                                           (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
 105
 106        /* BAR5 offset to ADMA general registers */
 107        NV_ADMA_GEN                     = 0x400,
 108        NV_ADMA_GEN_CTL                 = 0x00,
 109        NV_ADMA_NOTIFIER_CLEAR          = 0x30,
 110
 111        /* BAR5 offset to ADMA ports */
 112        NV_ADMA_PORT                    = 0x480,
 113
 114        /* size of ADMA port register space  */
 115        NV_ADMA_PORT_SIZE               = 0x100,
 116
 117        /* ADMA port registers */
 118        NV_ADMA_CTL                     = 0x40,
 119        NV_ADMA_CPB_COUNT               = 0x42,
 120        NV_ADMA_NEXT_CPB_IDX            = 0x43,
 121        NV_ADMA_STAT                    = 0x44,
 122        NV_ADMA_CPB_BASE_LOW            = 0x48,
 123        NV_ADMA_CPB_BASE_HIGH           = 0x4C,
 124        NV_ADMA_APPEND                  = 0x50,
 125        NV_ADMA_NOTIFIER                = 0x68,
 126        NV_ADMA_NOTIFIER_ERROR          = 0x6C,
 127
 128        /* NV_ADMA_CTL register bits */
 129        NV_ADMA_CTL_HOTPLUG_IEN         = (1 << 0),
 130        NV_ADMA_CTL_CHANNEL_RESET       = (1 << 5),
 131        NV_ADMA_CTL_GO                  = (1 << 7),
 132        NV_ADMA_CTL_AIEN                = (1 << 8),
 133        NV_ADMA_CTL_READ_NON_COHERENT   = (1 << 11),
 134        NV_ADMA_CTL_WRITE_NON_COHERENT  = (1 << 12),
 135
 136        /* CPB response flag bits */
 137        NV_CPB_RESP_DONE                = (1 << 0),
 138        NV_CPB_RESP_ATA_ERR             = (1 << 3),
 139        NV_CPB_RESP_CMD_ERR             = (1 << 4),
 140        NV_CPB_RESP_CPB_ERR             = (1 << 7),
 141
 142        /* CPB control flag bits */
 143        NV_CPB_CTL_CPB_VALID            = (1 << 0),
 144        NV_CPB_CTL_QUEUE                = (1 << 1),
 145        NV_CPB_CTL_APRD_VALID           = (1 << 2),
 146        NV_CPB_CTL_IEN                  = (1 << 3),
 147        NV_CPB_CTL_FPDMA                = (1 << 4),
 148
 149        /* APRD flags */
 150        NV_APRD_WRITE                   = (1 << 1),
 151        NV_APRD_END                     = (1 << 2),
 152        NV_APRD_CONT                    = (1 << 3),
 153
 154        /* NV_ADMA_STAT flags */
 155        NV_ADMA_STAT_TIMEOUT            = (1 << 0),
 156        NV_ADMA_STAT_HOTUNPLUG          = (1 << 1),
 157        NV_ADMA_STAT_HOTPLUG            = (1 << 2),
 158        NV_ADMA_STAT_CPBERR             = (1 << 4),
 159        NV_ADMA_STAT_SERROR             = (1 << 5),
 160        NV_ADMA_STAT_CMD_COMPLETE       = (1 << 6),
 161        NV_ADMA_STAT_IDLE               = (1 << 8),
 162        NV_ADMA_STAT_LEGACY             = (1 << 9),
 163        NV_ADMA_STAT_STOPPED            = (1 << 10),
 164        NV_ADMA_STAT_DONE               = (1 << 12),
 165        NV_ADMA_STAT_ERR                = NV_ADMA_STAT_CPBERR |
 166                                          NV_ADMA_STAT_TIMEOUT,
 167
 168        /* port flags */
 169        NV_ADMA_PORT_REGISTER_MODE      = (1 << 0),
 170        NV_ADMA_ATAPI_SETUP_COMPLETE    = (1 << 1),
 171
 172        /* MCP55 reg offset */
 173        NV_CTL_MCP55                    = 0x400,
 174        NV_INT_STATUS_MCP55             = 0x440,
 175        NV_INT_ENABLE_MCP55             = 0x444,
 176        NV_NCQ_REG_MCP55                = 0x448,
 177
 178        /* MCP55 */
 179        NV_INT_ALL_MCP55                = 0xffff,
 180        NV_INT_PORT_SHIFT_MCP55         = 16,   /* each port occupies 16 bits */
 181        NV_INT_MASK_MCP55               = NV_INT_ALL_MCP55 & 0xfffd,
 182
 183        /* SWNCQ ENABLE BITS*/
 184        NV_CTL_PRI_SWNCQ                = 0x02,
 185        NV_CTL_SEC_SWNCQ                = 0x04,
 186
 187        /* SW NCQ status bits*/
 188        NV_SWNCQ_IRQ_DEV                = (1 << 0),
 189        NV_SWNCQ_IRQ_PM                 = (1 << 1),
 190        NV_SWNCQ_IRQ_ADDED              = (1 << 2),
 191        NV_SWNCQ_IRQ_REMOVED            = (1 << 3),
 192
 193        NV_SWNCQ_IRQ_BACKOUT            = (1 << 4),
 194        NV_SWNCQ_IRQ_SDBFIS             = (1 << 5),
 195        NV_SWNCQ_IRQ_DHREGFIS           = (1 << 6),
 196        NV_SWNCQ_IRQ_DMASETUP           = (1 << 7),
 197
 198        NV_SWNCQ_IRQ_HOTPLUG            = NV_SWNCQ_IRQ_ADDED |
 199                                          NV_SWNCQ_IRQ_REMOVED,
 200
 201};
 202
 203/* ADMA Physical Region Descriptor - one SG segment */
 204struct nv_adma_prd {
 205        __le64                  addr;
 206        __le32                  len;
 207        u8                      flags;
 208        u8                      packet_len;
 209        __le16                  reserved;
 210};
 211
 212enum nv_adma_regbits {
 213        CMDEND  = (1 << 15),            /* end of command list */
 214        WNB     = (1 << 14),            /* wait-not-BSY */
 215        IGN     = (1 << 13),            /* ignore this entry */
 216        CS1n    = (1 << (4 + 8)),       /* std. PATA signals follow... */
 217        DA2     = (1 << (2 + 8)),
 218        DA1     = (1 << (1 + 8)),
 219        DA0     = (1 << (0 + 8)),
 220};
 221
 222/* ADMA Command Parameter Block
 223   The first 5 SG segments are stored inside the Command Parameter Block itself.
 224   If there are more than 5 segments the remainder are stored in a separate
 225   memory area indicated by next_aprd. */
 226struct nv_adma_cpb {
 227        u8                      resp_flags;    /* 0 */
 228        u8                      reserved1;     /* 1 */
 229        u8                      ctl_flags;     /* 2 */
 230        /* len is length of taskfile in 64 bit words */
 231        u8                      len;            /* 3  */
 232        u8                      tag;           /* 4 */
 233        u8                      next_cpb_idx;  /* 5 */
 234        __le16                  reserved2;     /* 6-7 */
 235        __le16                  tf[12];        /* 8-31 */
 236        struct nv_adma_prd      aprd[5];       /* 32-111 */
 237        __le64                  next_aprd;     /* 112-119 */
 238        __le64                  reserved3;     /* 120-127 */
 239};
 240
 241
 242struct nv_adma_port_priv {
 243        struct nv_adma_cpb      *cpb;
 244        dma_addr_t              cpb_dma;
 245        struct nv_adma_prd      *aprd;
 246        dma_addr_t              aprd_dma;
 247        void __iomem            *ctl_block;
 248        void __iomem            *gen_block;
 249        void __iomem            *notifier_clear_block;
 250        u64                     adma_dma_mask;
 251        u8                      flags;
 252        int                     last_issue_ncq;
 253};
 254
 255struct nv_host_priv {
 256        unsigned long           type;
 257};
 258
 259struct defer_queue {
 260        u32             defer_bits;
 261        unsigned int    head;
 262        unsigned int    tail;
 263        unsigned int    tag[ATA_MAX_QUEUE];
 264};
 265
 266enum ncq_saw_flag_list {
 267        ncq_saw_d2h     = (1U << 0),
 268        ncq_saw_dmas    = (1U << 1),
 269        ncq_saw_sdb     = (1U << 2),
 270        ncq_saw_backout = (1U << 3),
 271};
 272
 273struct nv_swncq_port_priv {
 274        struct ata_bmdma_prd *prd;       /* our SG list */
 275        dma_addr_t      prd_dma; /* and its DMA mapping */
 276        void __iomem    *sactive_block;
 277        void __iomem    *irq_block;
 278        void __iomem    *tag_block;
 279        u32             qc_active;
 280
 281        unsigned int    last_issue_tag;
 282
 283        /* fifo circular queue to store deferral command */
 284        struct defer_queue defer_queue;
 285
 286        /* for NCQ interrupt analysis */
 287        u32             dhfis_bits;
 288        u32             dmafis_bits;
 289        u32             sdbfis_bits;
 290
 291        unsigned int    ncq_flags;
 292};
 293
 294
 295#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
 296
 297static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 298#ifdef CONFIG_PM_SLEEP
 299static int nv_pci_device_resume(struct pci_dev *pdev);
 300#endif
 301static void nv_ck804_host_stop(struct ata_host *host);
 302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
 303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
 304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
 305static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 306static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
 307
 308static int nv_hardreset(struct ata_link *link, unsigned int *class,
 309                        unsigned long deadline);
 310static void nv_nf2_freeze(struct ata_port *ap);
 311static void nv_nf2_thaw(struct ata_port *ap);
 312static void nv_ck804_freeze(struct ata_port *ap);
 313static void nv_ck804_thaw(struct ata_port *ap);
 314static int nv_adma_slave_config(struct scsi_device *sdev);
 315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
 316static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
 317static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
 318static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
 319static void nv_adma_irq_clear(struct ata_port *ap);
 320static int nv_adma_port_start(struct ata_port *ap);
 321static void nv_adma_port_stop(struct ata_port *ap);
 322#ifdef CONFIG_PM
 323static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
 324static int nv_adma_port_resume(struct ata_port *ap);
 325#endif
 326static void nv_adma_freeze(struct ata_port *ap);
 327static void nv_adma_thaw(struct ata_port *ap);
 328static void nv_adma_error_handler(struct ata_port *ap);
 329static void nv_adma_host_stop(struct ata_host *host);
 330static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
 331static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 332
 333static void nv_mcp55_thaw(struct ata_port *ap);
 334static void nv_mcp55_freeze(struct ata_port *ap);
 335static void nv_swncq_error_handler(struct ata_port *ap);
 336static int nv_swncq_slave_config(struct scsi_device *sdev);
 337static int nv_swncq_port_start(struct ata_port *ap);
 338static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
 339static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
 340static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
 341static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
 342static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
 343#ifdef CONFIG_PM
 344static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
 345static int nv_swncq_port_resume(struct ata_port *ap);
 346#endif
 347
 348enum nv_host_type
 349{
 350        GENERIC,
 351        NFORCE2,
 352        NFORCE3 = NFORCE2,      /* NF2 == NF3 as far as sata_nv is concerned */
 353        CK804,
 354        ADMA,
 355        MCP5x,
 356        SWNCQ,
 357};
 358
 359static const struct pci_device_id nv_pci_tbl[] = {
 360        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
 361        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
 362        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
 363        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
 364        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
 365        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
 366        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
 367        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
 368        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
 369        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
 370        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
 371        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
 372        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
 373        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
 374
 375        { } /* terminate list */
 376};
 377
 378static struct pci_driver nv_pci_driver = {
 379        .name                   = DRV_NAME,
 380        .id_table               = nv_pci_tbl,
 381        .probe                  = nv_init_one,
 382#ifdef CONFIG_PM_SLEEP
 383        .suspend                = ata_pci_device_suspend,
 384        .resume                 = nv_pci_device_resume,
 385#endif
 386        .remove                 = ata_pci_remove_one,
 387};
 388
 389static struct scsi_host_template nv_sht = {
 390        ATA_BMDMA_SHT(DRV_NAME),
 391};
 392
 393static struct scsi_host_template nv_adma_sht = {
 394        ATA_NCQ_SHT(DRV_NAME),
 395        .can_queue              = NV_ADMA_MAX_CPBS,
 396        .sg_tablesize           = NV_ADMA_SGTBL_TOTAL_LEN,
 397        .dma_boundary           = NV_ADMA_DMA_BOUNDARY,
 398        .slave_configure        = nv_adma_slave_config,
 399};
 400
 401static struct scsi_host_template nv_swncq_sht = {
 402        ATA_NCQ_SHT(DRV_NAME),
 403        .can_queue              = ATA_MAX_QUEUE,
 404        .sg_tablesize           = LIBATA_MAX_PRD,
 405        .dma_boundary           = ATA_DMA_BOUNDARY,
 406        .slave_configure        = nv_swncq_slave_config,
 407};
 408
 409/*
 410 * NV SATA controllers have various different problems with hardreset
 411 * protocol depending on the specific controller and device.
 412 *
 413 * GENERIC:
 414 *
 415 *  bko11195 reports that link doesn't come online after hardreset on
 416 *  generic nv's and there have been several other similar reports on
 417 *  linux-ide.
 418 *
 419 *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
 420 *  softreset.
 421 *
 422 * NF2/3:
 423 *
 424 *  bko3352 reports nf2/3 controllers can't determine device signature
 425 *  reliably after hardreset.  The following thread reports detection
 426 *  failure on cold boot with the standard debouncing timing.
 427 *
 428 *  http://thread.gmane.org/gmane.linux.ide/34098
 429 *
 430 *  bko12176 reports that hardreset fails to bring up the link during
 431 *  boot on nf2.
 432 *
 433 * CK804:
 434 *
 435 *  For initial probing after boot and hot plugging, hardreset mostly
 436 *  works fine on CK804 but curiously, reprobing on the initial port
 437 *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
 438 *  FIS in somewhat undeterministic way.
 439 *
 440 * SWNCQ:
 441 *
 442 *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
 443 *  hardreset should be used and hardreset can't report proper
 444 *  signature, which suggests that mcp5x is closer to nf2 as long as
 445 *  reset quirkiness is concerned.
 446 *
 447 *  bko12703 reports that boot probing fails for intel SSD with
 448 *  hardreset.  Link fails to come online.  Softreset works fine.
 449 *
 450 * The failures are varied but the following patterns seem true for
 451 * all flavors.
 452 *
 453 * - Softreset during boot always works.
 454 *
 455 * - Hardreset during boot sometimes fails to bring up the link on
 456 *   certain comibnations and device signature acquisition is
 457 *   unreliable.
 458 *
 459 * - Hardreset is often necessary after hotplug.
 460 *
 461 * So, preferring softreset for boot probing and error handling (as
 462 * hardreset might bring down the link) but using hardreset for
 463 * post-boot probing should work around the above issues in most
 464 * cases.  Define nv_hardreset() which only kicks in for post-boot
 465 * probing and use it for all variants.
 466 */
 467static struct ata_port_operations nv_generic_ops = {
 468        .inherits               = &ata_bmdma_port_ops,
 469        .lost_interrupt         = ATA_OP_NULL,
 470        .scr_read               = nv_scr_read,
 471        .scr_write              = nv_scr_write,
 472        .hardreset              = nv_hardreset,
 473};
 474
 475static struct ata_port_operations nv_nf2_ops = {
 476        .inherits               = &nv_generic_ops,
 477        .freeze                 = nv_nf2_freeze,
 478        .thaw                   = nv_nf2_thaw,
 479};
 480
 481static struct ata_port_operations nv_ck804_ops = {
 482        .inherits               = &nv_generic_ops,
 483        .freeze                 = nv_ck804_freeze,
 484        .thaw                   = nv_ck804_thaw,
 485        .host_stop              = nv_ck804_host_stop,
 486};
 487
 488static struct ata_port_operations nv_adma_ops = {
 489        .inherits               = &nv_ck804_ops,
 490
 491        .check_atapi_dma        = nv_adma_check_atapi_dma,
 492        .sff_tf_read            = nv_adma_tf_read,
 493        .qc_defer               = ata_std_qc_defer,
 494        .qc_prep                = nv_adma_qc_prep,
 495        .qc_issue               = nv_adma_qc_issue,
 496        .sff_irq_clear          = nv_adma_irq_clear,
 497
 498        .freeze                 = nv_adma_freeze,
 499        .thaw                   = nv_adma_thaw,
 500        .error_handler          = nv_adma_error_handler,
 501        .post_internal_cmd      = nv_adma_post_internal_cmd,
 502
 503        .port_start             = nv_adma_port_start,
 504        .port_stop              = nv_adma_port_stop,
 505#ifdef CONFIG_PM
 506        .port_suspend           = nv_adma_port_suspend,
 507        .port_resume            = nv_adma_port_resume,
 508#endif
 509        .host_stop              = nv_adma_host_stop,
 510};
 511
 512static struct ata_port_operations nv_swncq_ops = {
 513        .inherits               = &nv_generic_ops,
 514
 515        .qc_defer               = ata_std_qc_defer,
 516        .qc_prep                = nv_swncq_qc_prep,
 517        .qc_issue               = nv_swncq_qc_issue,
 518
 519        .freeze                 = nv_mcp55_freeze,
 520        .thaw                   = nv_mcp55_thaw,
 521        .error_handler          = nv_swncq_error_handler,
 522
 523#ifdef CONFIG_PM
 524        .port_suspend           = nv_swncq_port_suspend,
 525        .port_resume            = nv_swncq_port_resume,
 526#endif
 527        .port_start             = nv_swncq_port_start,
 528};
 529
 530struct nv_pi_priv {
 531        irq_handler_t                   irq_handler;
 532        struct scsi_host_template       *sht;
 533};
 534
 535#define NV_PI_PRIV(_irq_handler, _sht) \
 536        &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
 537
 538static const struct ata_port_info nv_port_info[] = {
 539        /* generic */
 540        {
 541                .flags          = ATA_FLAG_SATA,
 542                .pio_mask       = NV_PIO_MASK,
 543                .mwdma_mask     = NV_MWDMA_MASK,
 544                .udma_mask      = NV_UDMA_MASK,
 545                .port_ops       = &nv_generic_ops,
 546                .private_data   = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 547        },
 548        /* nforce2/3 */
 549        {
 550                .flags          = ATA_FLAG_SATA,
 551                .pio_mask       = NV_PIO_MASK,
 552                .mwdma_mask     = NV_MWDMA_MASK,
 553                .udma_mask      = NV_UDMA_MASK,
 554                .port_ops       = &nv_nf2_ops,
 555                .private_data   = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
 556        },
 557        /* ck804 */
 558        {
 559                .flags          = ATA_FLAG_SATA,
 560                .pio_mask       = NV_PIO_MASK,
 561                .mwdma_mask     = NV_MWDMA_MASK,
 562                .udma_mask      = NV_UDMA_MASK,
 563                .port_ops       = &nv_ck804_ops,
 564                .private_data   = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
 565        },
 566        /* ADMA */
 567        {
 568                .flags          = ATA_FLAG_SATA | ATA_FLAG_NCQ,
 569                .pio_mask       = NV_PIO_MASK,
 570                .mwdma_mask     = NV_MWDMA_MASK,
 571                .udma_mask      = NV_UDMA_MASK,
 572                .port_ops       = &nv_adma_ops,
 573                .private_data   = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
 574        },
 575        /* MCP5x */
 576        {
 577                .flags          = ATA_FLAG_SATA,
 578                .pio_mask       = NV_PIO_MASK,
 579                .mwdma_mask     = NV_MWDMA_MASK,
 580                .udma_mask      = NV_UDMA_MASK,
 581                .port_ops       = &nv_generic_ops,
 582                .private_data   = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 583        },
 584        /* SWNCQ */
 585        {
 586                .flags          = ATA_FLAG_SATA | ATA_FLAG_NCQ,
 587                .pio_mask       = NV_PIO_MASK,
 588                .mwdma_mask     = NV_MWDMA_MASK,
 589                .udma_mask      = NV_UDMA_MASK,
 590                .port_ops       = &nv_swncq_ops,
 591                .private_data   = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
 592        },
 593};
 594
 595MODULE_AUTHOR("NVIDIA");
 596MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
 597MODULE_LICENSE("GPL");
 598MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
 599MODULE_VERSION(DRV_VERSION);
 600
 601static bool adma_enabled;
 602static bool swncq_enabled = true;
 603static bool msi_enabled;
 604
 605static void nv_adma_register_mode(struct ata_port *ap)
 606{
 607        struct nv_adma_port_priv *pp = ap->private_data;
 608        void __iomem *mmio = pp->ctl_block;
 609        u16 tmp, status;
 610        int count = 0;
 611
 612        if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
 613                return;
 614
 615        status = readw(mmio + NV_ADMA_STAT);
 616        while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
 617                ndelay(50);
 618                status = readw(mmio + NV_ADMA_STAT);
 619                count++;
 620        }
 621        if (count == 20)
 622                ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
 623                              status);
 624
 625        tmp = readw(mmio + NV_ADMA_CTL);
 626        writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 627
 628        count = 0;
 629        status = readw(mmio + NV_ADMA_STAT);
 630        while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
 631                ndelay(50);
 632                status = readw(mmio + NV_ADMA_STAT);
 633                count++;
 634        }
 635        if (count == 20)
 636                ata_port_warn(ap,
 637                              "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
 638                              status);
 639
 640        pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
 641}
 642
 643static void nv_adma_mode(struct ata_port *ap)
 644{
 645        struct nv_adma_port_priv *pp = ap->private_data;
 646        void __iomem *mmio = pp->ctl_block;
 647        u16 tmp, status;
 648        int count = 0;
 649
 650        if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
 651                return;
 652
 653        WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 654
 655        tmp = readw(mmio + NV_ADMA_CTL);
 656        writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 657
 658        status = readw(mmio + NV_ADMA_STAT);
 659        while (((status & NV_ADMA_STAT_LEGACY) ||
 660              !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
 661                ndelay(50);
 662                status = readw(mmio + NV_ADMA_STAT);
 663                count++;
 664        }
 665        if (count == 20)
 666                ata_port_warn(ap,
 667                        "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
 668                        status);
 669
 670        pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
 671}
 672
 673static int nv_adma_slave_config(struct scsi_device *sdev)
 674{
 675        struct ata_port *ap = ata_shost_to_port(sdev->host);
 676        struct nv_adma_port_priv *pp = ap->private_data;
 677        struct nv_adma_port_priv *port0, *port1;
 678        struct scsi_device *sdev0, *sdev1;
 679        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 680        unsigned long segment_boundary, flags;
 681        unsigned short sg_tablesize;
 682        int rc;
 683        int adma_enable;
 684        u32 current_reg, new_reg, config_mask;
 685
 686        rc = ata_scsi_slave_config(sdev);
 687
 688        if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
 689                /* Not a proper libata device, ignore */
 690                return rc;
 691
 692        spin_lock_irqsave(ap->lock, flags);
 693
 694        if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
 695                /*
 696                 * NVIDIA reports that ADMA mode does not support ATAPI commands.
 697                 * Therefore ATAPI commands are sent through the legacy interface.
 698                 * However, the legacy interface only supports 32-bit DMA.
 699                 * Restrict DMA parameters as required by the legacy interface
 700                 * when an ATAPI device is connected.
 701                 */
 702                segment_boundary = ATA_DMA_BOUNDARY;
 703                /* Subtract 1 since an extra entry may be needed for padding, see
 704                   libata-scsi.c */
 705                sg_tablesize = LIBATA_MAX_PRD - 1;
 706
 707                /* Since the legacy DMA engine is in use, we need to disable ADMA
 708                   on the port. */
 709                adma_enable = 0;
 710                nv_adma_register_mode(ap);
 711        } else {
 712                segment_boundary = NV_ADMA_DMA_BOUNDARY;
 713                sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
 714                adma_enable = 1;
 715        }
 716
 717        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
 718
 719        if (ap->port_no == 1)
 720                config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
 721                              NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
 722        else
 723                config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
 724                              NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
 725
 726        if (adma_enable) {
 727                new_reg = current_reg | config_mask;
 728                pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
 729        } else {
 730                new_reg = current_reg & ~config_mask;
 731                pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
 732        }
 733
 734        if (current_reg != new_reg)
 735                pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
 736
 737        port0 = ap->host->ports[0]->private_data;
 738        port1 = ap->host->ports[1]->private_data;
 739        sdev0 = ap->host->ports[0]->link.device[0].sdev;
 740        sdev1 = ap->host->ports[1]->link.device[0].sdev;
 741        if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
 742            (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
 743                /** We have to set the DMA mask to 32-bit if either port is in
 744                    ATAPI mode, since they are on the same PCI device which is
 745                    used for DMA mapping. If we set the mask we also need to set
 746                    the bounce limit on both ports to ensure that the block
 747                    layer doesn't feed addresses that cause DMA mapping to
 748                    choke. If either SCSI device is not allocated yet, it's OK
 749                    since that port will discover its correct setting when it
 750                    does get allocated.
 751                    Note: Setting 32-bit mask should not fail. */
 752                if (sdev0)
 753                        blk_queue_bounce_limit(sdev0->request_queue,
 754                                               ATA_DMA_MASK);
 755                if (sdev1)
 756                        blk_queue_bounce_limit(sdev1->request_queue,
 757                                               ATA_DMA_MASK);
 758
 759                dma_set_mask(&pdev->dev, ATA_DMA_MASK);
 760        } else {
 761                /** This shouldn't fail as it was set to this value before */
 762                dma_set_mask(&pdev->dev, pp->adma_dma_mask);
 763                if (sdev0)
 764                        blk_queue_bounce_limit(sdev0->request_queue,
 765                                               pp->adma_dma_mask);
 766                if (sdev1)
 767                        blk_queue_bounce_limit(sdev1->request_queue,
 768                                               pp->adma_dma_mask);
 769        }
 770
 771        blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
 772        blk_queue_max_segments(sdev->request_queue, sg_tablesize);
 773        ata_port_info(ap,
 774                      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
 775                      (unsigned long long)*ap->host->dev->dma_mask,
 776                      segment_boundary, sg_tablesize);
 777
 778        spin_unlock_irqrestore(ap->lock, flags);
 779
 780        return rc;
 781}
 782
 783static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
 784{
 785        struct nv_adma_port_priv *pp = qc->ap->private_data;
 786        return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 787}
 788
 789static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 790{
 791        /* Other than when internal or pass-through commands are executed,
 792           the only time this function will be called in ADMA mode will be
 793           if a command fails. In the failure case we don't care about going
 794           into register mode with ADMA commands pending, as the commands will
 795           all shortly be aborted anyway. We assume that NCQ commands are not
 796           issued via passthrough, which is the only way that switching into
 797           ADMA mode could abort outstanding commands. */
 798        nv_adma_register_mode(ap);
 799
 800        ata_sff_tf_read(ap, tf);
 801}
 802
 803static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
 804{
 805        unsigned int idx = 0;
 806
 807        if (tf->flags & ATA_TFLAG_ISADDR) {
 808                if (tf->flags & ATA_TFLAG_LBA48) {
 809                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
 810                        cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
 811                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
 812                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
 813                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
 814                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
 815                } else
 816                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
 817
 818                cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
 819                cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
 820                cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
 821                cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
 822        }
 823
 824        if (tf->flags & ATA_TFLAG_DEVICE)
 825                cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
 826
 827        cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
 828
 829        while (idx < 12)
 830                cpb[idx++] = cpu_to_le16(IGN);
 831
 832        return idx;
 833}
 834
 835static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
 836{
 837        struct nv_adma_port_priv *pp = ap->private_data;
 838        u8 flags = pp->cpb[cpb_num].resp_flags;
 839
 840        VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
 841
 842        if (unlikely((force_err ||
 843                     flags & (NV_CPB_RESP_ATA_ERR |
 844                              NV_CPB_RESP_CMD_ERR |
 845                              NV_CPB_RESP_CPB_ERR)))) {
 846                struct ata_eh_info *ehi = &ap->link.eh_info;
 847                int freeze = 0;
 848
 849                ata_ehi_clear_desc(ehi);
 850                __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
 851                if (flags & NV_CPB_RESP_ATA_ERR) {
 852                        ata_ehi_push_desc(ehi, "ATA error");
 853                        ehi->err_mask |= AC_ERR_DEV;
 854                } else if (flags & NV_CPB_RESP_CMD_ERR) {
 855                        ata_ehi_push_desc(ehi, "CMD error");
 856                        ehi->err_mask |= AC_ERR_DEV;
 857                } else if (flags & NV_CPB_RESP_CPB_ERR) {
 858                        ata_ehi_push_desc(ehi, "CPB error");
 859                        ehi->err_mask |= AC_ERR_SYSTEM;
 860                        freeze = 1;
 861                } else {
 862                        /* notifier error, but no error in CPB flags? */
 863                        ata_ehi_push_desc(ehi, "unknown");
 864                        ehi->err_mask |= AC_ERR_OTHER;
 865                        freeze = 1;
 866                }
 867                /* Kill all commands. EH will determine what actually failed. */
 868                if (freeze)
 869                        ata_port_freeze(ap);
 870                else
 871                        ata_port_abort(ap);
 872                return -1;
 873        }
 874
 875        if (likely(flags & NV_CPB_RESP_DONE))
 876                return 1;
 877        return 0;
 878}
 879
 880static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
 881{
 882        struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
 883
 884        /* freeze if hotplugged */
 885        if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
 886                ata_port_freeze(ap);
 887                return 1;
 888        }
 889
 890        /* bail out if not our interrupt */
 891        if (!(irq_stat & NV_INT_DEV))
 892                return 0;
 893
 894        /* DEV interrupt w/ no active qc? */
 895        if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 896                ata_sff_check_status(ap);
 897                return 1;
 898        }
 899
 900        /* handle interrupt */
 901        return ata_bmdma_port_intr(ap, qc);
 902}
 903
 904static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
 905{
 906        struct ata_host *host = dev_instance;
 907        int i, handled = 0;
 908        u32 notifier_clears[2];
 909
 910        spin_lock(&host->lock);
 911
 912        for (i = 0; i < host->n_ports; i++) {
 913                struct ata_port *ap = host->ports[i];
 914                struct nv_adma_port_priv *pp = ap->private_data;
 915                void __iomem *mmio = pp->ctl_block;
 916                u16 status;
 917                u32 gen_ctl;
 918                u32 notifier, notifier_error;
 919
 920                notifier_clears[i] = 0;
 921
 922                /* if ADMA is disabled, use standard ata interrupt handler */
 923                if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
 924                        u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 925                                >> (NV_INT_PORT_SHIFT * i);
 926                        handled += nv_host_intr(ap, irq_stat);
 927                        continue;
 928                }
 929
 930                /* if in ATA register mode, check for standard interrupts */
 931                if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
 932                        u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 933                                >> (NV_INT_PORT_SHIFT * i);
 934                        if (ata_tag_valid(ap->link.active_tag))
 935                                /** NV_INT_DEV indication seems unreliable
 936                                    at times at least in ADMA mode. Force it
 937                                    on always when a command is active, to
 938                                    prevent losing interrupts. */
 939                                irq_stat |= NV_INT_DEV;
 940                        handled += nv_host_intr(ap, irq_stat);
 941                }
 942
 943                notifier = readl(mmio + NV_ADMA_NOTIFIER);
 944                notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
 945                notifier_clears[i] = notifier | notifier_error;
 946
 947                gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
 948
 949                if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
 950                    !notifier_error)
 951                        /* Nothing to do */
 952                        continue;
 953
 954                status = readw(mmio + NV_ADMA_STAT);
 955
 956                /*
 957                 * Clear status. Ensure the controller sees the
 958                 * clearing before we start looking at any of the CPB
 959                 * statuses, so that any CPB completions after this
 960                 * point in the handler will raise another interrupt.
 961                 */
 962                writew(status, mmio + NV_ADMA_STAT);
 963                readw(mmio + NV_ADMA_STAT); /* flush posted write */
 964                rmb();
 965
 966                handled++; /* irq handled if we got here */
 967
 968                /* freeze if hotplugged or controller error */
 969                if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
 970                                       NV_ADMA_STAT_HOTUNPLUG |
 971                                       NV_ADMA_STAT_TIMEOUT |
 972                                       NV_ADMA_STAT_SERROR))) {
 973                        struct ata_eh_info *ehi = &ap->link.eh_info;
 974
 975                        ata_ehi_clear_desc(ehi);
 976                        __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
 977                        if (status & NV_ADMA_STAT_TIMEOUT) {
 978                                ehi->err_mask |= AC_ERR_SYSTEM;
 979                                ata_ehi_push_desc(ehi, "timeout");
 980                        } else if (status & NV_ADMA_STAT_HOTPLUG) {
 981                                ata_ehi_hotplugged(ehi);
 982                                ata_ehi_push_desc(ehi, "hotplug");
 983                        } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
 984                                ata_ehi_hotplugged(ehi);
 985                                ata_ehi_push_desc(ehi, "hot unplug");
 986                        } else if (status & NV_ADMA_STAT_SERROR) {
 987                                /* let EH analyze SError and figure out cause */
 988                                ata_ehi_push_desc(ehi, "SError");
 989                        } else
 990                                ata_ehi_push_desc(ehi, "unknown");
 991                        ata_port_freeze(ap);
 992                        continue;
 993                }
 994
 995                if (status & (NV_ADMA_STAT_DONE |
 996                              NV_ADMA_STAT_CPBERR |
 997                              NV_ADMA_STAT_CMD_COMPLETE)) {
 998                        u32 check_commands = notifier_clears[i];
 999                        u32 done_mask = 0;
1000                        int pos, rc;
1001
1002                        if (status & NV_ADMA_STAT_CPBERR) {
1003                                /* check all active commands */
1004                                if (ata_tag_valid(ap->link.active_tag))
1005                                        check_commands = 1 <<
1006                                                ap->link.active_tag;
1007                                else
1008                                        check_commands = ap->link.sactive;
1009                        }
1010
1011                        /* check CPBs for completed commands */
1012                        while ((pos = ffs(check_commands))) {
1013                                pos--;
1014                                rc = nv_adma_check_cpb(ap, pos,
1015                                                notifier_error & (1 << pos));
1016                                if (rc > 0)
1017                                        done_mask |= 1 << pos;
1018                                else if (unlikely(rc < 0))
1019                                        check_commands = 0;
1020                                check_commands &= ~(1 << pos);
1021                        }
1022                        ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1023                }
1024        }
1025
1026        if (notifier_clears[0] || notifier_clears[1]) {
1027                /* Note: Both notifier clear registers must be written
1028                   if either is set, even if one is zero, according to NVIDIA. */
1029                struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1030                writel(notifier_clears[0], pp->notifier_clear_block);
1031                pp = host->ports[1]->private_data;
1032                writel(notifier_clears[1], pp->notifier_clear_block);
1033        }
1034
1035        spin_unlock(&host->lock);
1036
1037        return IRQ_RETVAL(handled);
1038}
1039
1040static void nv_adma_freeze(struct ata_port *ap)
1041{
1042        struct nv_adma_port_priv *pp = ap->private_data;
1043        void __iomem *mmio = pp->ctl_block;
1044        u16 tmp;
1045
1046        nv_ck804_freeze(ap);
1047
1048        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1049                return;
1050
1051        /* clear any outstanding CK804 notifications */
1052        writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1053                ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1054
1055        /* Disable interrupt */
1056        tmp = readw(mmio + NV_ADMA_CTL);
1057        writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1058                mmio + NV_ADMA_CTL);
1059        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1060}
1061
1062static void nv_adma_thaw(struct ata_port *ap)
1063{
1064        struct nv_adma_port_priv *pp = ap->private_data;
1065        void __iomem *mmio = pp->ctl_block;
1066        u16 tmp;
1067
1068        nv_ck804_thaw(ap);
1069
1070        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1071                return;
1072
1073        /* Enable interrupt */
1074        tmp = readw(mmio + NV_ADMA_CTL);
1075        writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1076                mmio + NV_ADMA_CTL);
1077        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1078}
1079
1080static void nv_adma_irq_clear(struct ata_port *ap)
1081{
1082        struct nv_adma_port_priv *pp = ap->private_data;
1083        void __iomem *mmio = pp->ctl_block;
1084        u32 notifier_clears[2];
1085
1086        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1087                ata_bmdma_irq_clear(ap);
1088                return;
1089        }
1090
1091        /* clear any outstanding CK804 notifications */
1092        writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1093                ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1094
1095        /* clear ADMA status */
1096        writew(0xffff, mmio + NV_ADMA_STAT);
1097
1098        /* clear notifiers - note both ports need to be written with
1099           something even though we are only clearing on one */
1100        if (ap->port_no == 0) {
1101                notifier_clears[0] = 0xFFFFFFFF;
1102                notifier_clears[1] = 0;
1103        } else {
1104                notifier_clears[0] = 0;
1105                notifier_clears[1] = 0xFFFFFFFF;
1106        }
1107        pp = ap->host->ports[0]->private_data;
1108        writel(notifier_clears[0], pp->notifier_clear_block);
1109        pp = ap->host->ports[1]->private_data;
1110        writel(notifier_clears[1], pp->notifier_clear_block);
1111}
1112
1113static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1114{
1115        struct nv_adma_port_priv *pp = qc->ap->private_data;
1116
1117        if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1118                ata_bmdma_post_internal_cmd(qc);
1119}
1120
1121static int nv_adma_port_start(struct ata_port *ap)
1122{
1123        struct device *dev = ap->host->dev;
1124        struct nv_adma_port_priv *pp;
1125        int rc;
1126        void *mem;
1127        dma_addr_t mem_dma;
1128        void __iomem *mmio;
1129        struct pci_dev *pdev = to_pci_dev(dev);
1130        u16 tmp;
1131
1132        VPRINTK("ENTER\n");
1133
1134        /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1135           pad buffers */
1136        rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1137        if (rc)
1138                return rc;
1139        rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1140        if (rc)
1141                return rc;
1142
1143        /* we might fallback to bmdma, allocate bmdma resources */
1144        rc = ata_bmdma_port_start(ap);
1145        if (rc)
1146                return rc;
1147
1148        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1149        if (!pp)
1150                return -ENOMEM;
1151
1152        mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1153               ap->port_no * NV_ADMA_PORT_SIZE;
1154        pp->ctl_block = mmio;
1155        pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1156        pp->notifier_clear_block = pp->gen_block +
1157               NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1158
1159        /* Now that the legacy PRD and padding buffer are allocated we can
1160           safely raise the DMA mask to allocate the CPB/APRD table.
1161           These are allowed to fail since we store the value that ends up
1162           being used to set as the bounce limit in slave_config later if
1163           needed. */
1164        dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1165        dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1166        pp->adma_dma_mask = *dev->dma_mask;
1167
1168        mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1169                                  &mem_dma, GFP_KERNEL);
1170        if (!mem)
1171                return -ENOMEM;
1172        memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1173
1174        /*
1175         * First item in chunk of DMA memory:
1176         * 128-byte command parameter block (CPB)
1177         * one for each command tag
1178         */
1179        pp->cpb     = mem;
1180        pp->cpb_dma = mem_dma;
1181
1182        writel(mem_dma & 0xFFFFFFFF,    mmio + NV_ADMA_CPB_BASE_LOW);
1183        writel((mem_dma >> 16) >> 16,   mmio + NV_ADMA_CPB_BASE_HIGH);
1184
1185        mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1186        mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1187
1188        /*
1189         * Second item: block of ADMA_SGTBL_LEN s/g entries
1190         */
1191        pp->aprd = mem;
1192        pp->aprd_dma = mem_dma;
1193
1194        ap->private_data = pp;
1195
1196        /* clear any outstanding interrupt conditions */
1197        writew(0xffff, mmio + NV_ADMA_STAT);
1198
1199        /* initialize port variables */
1200        pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1201
1202        /* clear CPB fetch count */
1203        writew(0, mmio + NV_ADMA_CPB_COUNT);
1204
1205        /* clear GO for register mode, enable interrupt */
1206        tmp = readw(mmio + NV_ADMA_CTL);
1207        writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1208                NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1209
1210        tmp = readw(mmio + NV_ADMA_CTL);
1211        writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1212        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1213        udelay(1);
1214        writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1215        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1216
1217        return 0;
1218}
1219
1220static void nv_adma_port_stop(struct ata_port *ap)
1221{
1222        struct nv_adma_port_priv *pp = ap->private_data;
1223        void __iomem *mmio = pp->ctl_block;
1224
1225        VPRINTK("ENTER\n");
1226        writew(0, mmio + NV_ADMA_CTL);
1227}
1228
1229#ifdef CONFIG_PM
1230static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1231{
1232        struct nv_adma_port_priv *pp = ap->private_data;
1233        void __iomem *mmio = pp->ctl_block;
1234
1235        /* Go to register mode - clears GO */
1236        nv_adma_register_mode(ap);
1237
1238        /* clear CPB fetch count */
1239        writew(0, mmio + NV_ADMA_CPB_COUNT);
1240
1241        /* disable interrupt, shut down port */
1242        writew(0, mmio + NV_ADMA_CTL);
1243
1244        return 0;
1245}
1246
1247static int nv_adma_port_resume(struct ata_port *ap)
1248{
1249        struct nv_adma_port_priv *pp = ap->private_data;
1250        void __iomem *mmio = pp->ctl_block;
1251        u16 tmp;
1252
1253        /* set CPB block location */
1254        writel(pp->cpb_dma & 0xFFFFFFFF,        mmio + NV_ADMA_CPB_BASE_LOW);
1255        writel((pp->cpb_dma >> 16) >> 16,       mmio + NV_ADMA_CPB_BASE_HIGH);
1256
1257        /* clear any outstanding interrupt conditions */
1258        writew(0xffff, mmio + NV_ADMA_STAT);
1259
1260        /* initialize port variables */
1261        pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1262
1263        /* clear CPB fetch count */
1264        writew(0, mmio + NV_ADMA_CPB_COUNT);
1265
1266        /* clear GO for register mode, enable interrupt */
1267        tmp = readw(mmio + NV_ADMA_CTL);
1268        writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1269                NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1270
1271        tmp = readw(mmio + NV_ADMA_CTL);
1272        writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1273        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1274        udelay(1);
1275        writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1276        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1277
1278        return 0;
1279}
1280#endif
1281
1282static void nv_adma_setup_port(struct ata_port *ap)
1283{
1284        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1285        struct ata_ioports *ioport = &ap->ioaddr;
1286
1287        VPRINTK("ENTER\n");
1288
1289        mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1290
1291        ioport->cmd_addr        = mmio;
1292        ioport->data_addr       = mmio + (ATA_REG_DATA * 4);
1293        ioport->error_addr      =
1294        ioport->feature_addr    = mmio + (ATA_REG_ERR * 4);
1295        ioport->nsect_addr      = mmio + (ATA_REG_NSECT * 4);
1296        ioport->lbal_addr       = mmio + (ATA_REG_LBAL * 4);
1297        ioport->lbam_addr       = mmio + (ATA_REG_LBAM * 4);
1298        ioport->lbah_addr       = mmio + (ATA_REG_LBAH * 4);
1299        ioport->device_addr     = mmio + (ATA_REG_DEVICE * 4);
1300        ioport->status_addr     =
1301        ioport->command_addr    = mmio + (ATA_REG_STATUS * 4);
1302        ioport->altstatus_addr  =
1303        ioport->ctl_addr        = mmio + 0x20;
1304}
1305
1306static int nv_adma_host_init(struct ata_host *host)
1307{
1308        struct pci_dev *pdev = to_pci_dev(host->dev);
1309        unsigned int i;
1310        u32 tmp32;
1311
1312        VPRINTK("ENTER\n");
1313
1314        /* enable ADMA on the ports */
1315        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1316        tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1317                 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1318                 NV_MCP_SATA_CFG_20_PORT1_EN |
1319                 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1320
1321        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1322
1323        for (i = 0; i < host->n_ports; i++)
1324                nv_adma_setup_port(host->ports[i]);
1325
1326        return 0;
1327}
1328
1329static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1330                              struct scatterlist *sg,
1331                              int idx,
1332                              struct nv_adma_prd *aprd)
1333{
1334        u8 flags = 0;
1335        if (qc->tf.flags & ATA_TFLAG_WRITE)
1336                flags |= NV_APRD_WRITE;
1337        if (idx == qc->n_elem - 1)
1338                flags |= NV_APRD_END;
1339        else if (idx != 4)
1340                flags |= NV_APRD_CONT;
1341
1342        aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1343        aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1344        aprd->flags = flags;
1345        aprd->packet_len = 0;
1346}
1347
1348static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1349{
1350        struct nv_adma_port_priv *pp = qc->ap->private_data;
1351        struct nv_adma_prd *aprd;
1352        struct scatterlist *sg;
1353        unsigned int si;
1354
1355        VPRINTK("ENTER\n");
1356
1357        for_each_sg(qc->sg, sg, qc->n_elem, si) {
1358                aprd = (si < 5) ? &cpb->aprd[si] :
1359                               &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1360                nv_adma_fill_aprd(qc, sg, si, aprd);
1361        }
1362        if (si > 5)
1363                cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1364        else
1365                cpb->next_aprd = cpu_to_le64(0);
1366}
1367
1368static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1369{
1370        struct nv_adma_port_priv *pp = qc->ap->private_data;
1371
1372        /* ADMA engine can only be used for non-ATAPI DMA commands,
1373           or interrupt-driven no-data commands. */
1374        if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1375           (qc->tf.flags & ATA_TFLAG_POLLING))
1376                return 1;
1377
1378        if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1379           (qc->tf.protocol == ATA_PROT_NODATA))
1380                return 0;
1381
1382        return 1;
1383}
1384
1385static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1386{
1387        struct nv_adma_port_priv *pp = qc->ap->private_data;
1388        struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1389        u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1390                       NV_CPB_CTL_IEN;
1391
1392        if (nv_adma_use_reg_mode(qc)) {
1393                BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1394                        (qc->flags & ATA_QCFLAG_DMAMAP));
1395                nv_adma_register_mode(qc->ap);
1396                ata_bmdma_qc_prep(qc);
1397                return;
1398        }
1399
1400        cpb->resp_flags = NV_CPB_RESP_DONE;
1401        wmb();
1402        cpb->ctl_flags = 0;
1403        wmb();
1404
1405        cpb->len                = 3;
1406        cpb->tag                = qc->tag;
1407        cpb->next_cpb_idx       = 0;
1408
1409        /* turn on NCQ flags for NCQ commands */
1410        if (qc->tf.protocol == ATA_PROT_NCQ)
1411                ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1412
1413        VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1414
1415        nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1416
1417        if (qc->flags & ATA_QCFLAG_DMAMAP) {
1418                nv_adma_fill_sg(qc, cpb);
1419                ctl_flags |= NV_CPB_CTL_APRD_VALID;
1420        } else
1421                memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1422
1423        /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1424           until we are finished filling in all of the contents */
1425        wmb();
1426        cpb->ctl_flags = ctl_flags;
1427        wmb();
1428        cpb->resp_flags = 0;
1429}
1430
1431static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1432{
1433        struct nv_adma_port_priv *pp = qc->ap->private_data;
1434        void __iomem *mmio = pp->ctl_block;
1435        int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1436
1437        VPRINTK("ENTER\n");
1438
1439        /* We can't handle result taskfile with NCQ commands, since
1440           retrieving the taskfile switches us out of ADMA mode and would abort
1441           existing commands. */
1442        if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1443                     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1444                ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1445                return AC_ERR_SYSTEM;
1446        }
1447
1448        if (nv_adma_use_reg_mode(qc)) {
1449                /* use ATA register mode */
1450                VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1451                BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1452                        (qc->flags & ATA_QCFLAG_DMAMAP));
1453                nv_adma_register_mode(qc->ap);
1454                return ata_bmdma_qc_issue(qc);
1455        } else
1456                nv_adma_mode(qc->ap);
1457
1458        /* write append register, command tag in lower 8 bits
1459           and (number of cpbs to append -1) in top 8 bits */
1460        wmb();
1461
1462        if (curr_ncq != pp->last_issue_ncq) {
1463                /* Seems to need some delay before switching between NCQ and
1464                   non-NCQ commands, else we get command timeouts and such. */
1465                udelay(20);
1466                pp->last_issue_ncq = curr_ncq;
1467        }
1468
1469        writew(qc->tag, mmio + NV_ADMA_APPEND);
1470
1471        DPRINTK("Issued tag %u\n", qc->tag);
1472
1473        return 0;
1474}
1475
1476static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1477{
1478        struct ata_host *host = dev_instance;
1479        unsigned int i;
1480        unsigned int handled = 0;
1481        unsigned long flags;
1482
1483        spin_lock_irqsave(&host->lock, flags);
1484
1485        for (i = 0; i < host->n_ports; i++) {
1486                struct ata_port *ap = host->ports[i];
1487                struct ata_queued_cmd *qc;
1488
1489                qc = ata_qc_from_tag(ap, ap->link.active_tag);
1490                if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1491                        handled += ata_bmdma_port_intr(ap, qc);
1492                } else {
1493                        /*
1494                         * No request pending?  Clear interrupt status
1495                         * anyway, in case there's one pending.
1496                         */
1497                        ap->ops->sff_check_status(ap);
1498                }
1499        }
1500
1501        spin_unlock_irqrestore(&host->lock, flags);
1502
1503        return IRQ_RETVAL(handled);
1504}
1505
1506static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1507{
1508        int i, handled = 0;
1509
1510        for (i = 0; i < host->n_ports; i++) {
1511                handled += nv_host_intr(host->ports[i], irq_stat);
1512                irq_stat >>= NV_INT_PORT_SHIFT;
1513        }
1514
1515        return IRQ_RETVAL(handled);
1516}
1517
1518static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1519{
1520        struct ata_host *host = dev_instance;
1521        u8 irq_stat;
1522        irqreturn_t ret;
1523
1524        spin_lock(&host->lock);
1525        irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1526        ret = nv_do_interrupt(host, irq_stat);
1527        spin_unlock(&host->lock);
1528
1529        return ret;
1530}
1531
1532static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1533{
1534        struct ata_host *host = dev_instance;
1535        u8 irq_stat;
1536        irqreturn_t ret;
1537
1538        spin_lock(&host->lock);
1539        irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1540        ret = nv_do_interrupt(host, irq_stat);
1541        spin_unlock(&host->lock);
1542
1543        return ret;
1544}
1545
1546static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1547{
1548        if (sc_reg > SCR_CONTROL)
1549                return -EINVAL;
1550
1551        *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1552        return 0;
1553}
1554
1555static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1556{
1557        if (sc_reg > SCR_CONTROL)
1558                return -EINVAL;
1559
1560        iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1561        return 0;
1562}
1563
1564static int nv_hardreset(struct ata_link *link, unsigned int *class,
1565                        unsigned long deadline)
1566{
1567        struct ata_eh_context *ehc = &link->eh_context;
1568
1569        /* Do hardreset iff it's post-boot probing, please read the
1570         * comment above port ops for details.
1571         */
1572        if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1573            !ata_dev_enabled(link->device))
1574                sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1575                                    NULL, NULL);
1576        else {
1577                const unsigned long *timing = sata_ehc_deb_timing(ehc);
1578                int rc;
1579
1580                if (!(ehc->i.flags & ATA_EHI_QUIET))
1581                        ata_link_info(link,
1582                                      "nv: skipping hardreset on occupied port\n");
1583
1584                /* make sure the link is online */
1585                rc = sata_link_resume(link, timing, deadline);
1586                /* whine about phy resume failure but proceed */
1587                if (rc && rc != -EOPNOTSUPP)
1588                        ata_link_warn(link, "failed to resume link (errno=%d)\n",
1589                                      rc);
1590        }
1591
1592        /* device signature acquisition is unreliable */
1593        return -EAGAIN;
1594}
1595
1596static void nv_nf2_freeze(struct ata_port *ap)
1597{
1598        void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1599        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1600        u8 mask;
1601
1602        mask = ioread8(scr_addr + NV_INT_ENABLE);
1603        mask &= ~(NV_INT_ALL << shift);
1604        iowrite8(mask, scr_addr + NV_INT_ENABLE);
1605}
1606
1607static void nv_nf2_thaw(struct ata_port *ap)
1608{
1609        void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1610        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1611        u8 mask;
1612
1613        iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1614
1615        mask = ioread8(scr_addr + NV_INT_ENABLE);
1616        mask |= (NV_INT_MASK << shift);
1617        iowrite8(mask, scr_addr + NV_INT_ENABLE);
1618}
1619
1620static void nv_ck804_freeze(struct ata_port *ap)
1621{
1622        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1623        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1624        u8 mask;
1625
1626        mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1627        mask &= ~(NV_INT_ALL << shift);
1628        writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1629}
1630
1631static void nv_ck804_thaw(struct ata_port *ap)
1632{
1633        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1634        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1635        u8 mask;
1636
1637        writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1638
1639        mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1640        mask |= (NV_INT_MASK << shift);
1641        writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1642}
1643
1644static void nv_mcp55_freeze(struct ata_port *ap)
1645{
1646        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1647        int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1648        u32 mask;
1649
1650        writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1651
1652        mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1653        mask &= ~(NV_INT_ALL_MCP55 << shift);
1654        writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1655}
1656
1657static void nv_mcp55_thaw(struct ata_port *ap)
1658{
1659        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1660        int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1661        u32 mask;
1662
1663        writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1664
1665        mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1666        mask |= (NV_INT_MASK_MCP55 << shift);
1667        writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1668}
1669
1670static void nv_adma_error_handler(struct ata_port *ap)
1671{
1672        struct nv_adma_port_priv *pp = ap->private_data;
1673        if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1674                void __iomem *mmio = pp->ctl_block;
1675                int i;
1676                u16 tmp;
1677
1678                if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1679                        u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1680                        u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1681                        u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1682                        u32 status = readw(mmio + NV_ADMA_STAT);
1683                        u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1684                        u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1685
1686                        ata_port_err(ap,
1687                                "EH in ADMA mode, notifier 0x%X "
1688                                "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1689                                "next cpb count 0x%X next cpb idx 0x%x\n",
1690                                notifier, notifier_error, gen_ctl, status,
1691                                cpb_count, next_cpb_idx);
1692
1693                        for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1694                                struct nv_adma_cpb *cpb = &pp->cpb[i];
1695                                if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1696                                    ap->link.sactive & (1 << i))
1697                                        ata_port_err(ap,
1698                                                "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1699                                                i, cpb->ctl_flags, cpb->resp_flags);
1700                        }
1701                }
1702
1703                /* Push us back into port register mode for error handling. */
1704                nv_adma_register_mode(ap);
1705
1706                /* Mark all of the CPBs as invalid to prevent them from
1707                   being executed */
1708                for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1709                        pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1710
1711                /* clear CPB fetch count */
1712                writew(0, mmio + NV_ADMA_CPB_COUNT);
1713
1714                /* Reset channel */
1715                tmp = readw(mmio + NV_ADMA_CTL);
1716                writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1717                readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1718                udelay(1);
1719                writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1720                readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1721        }
1722
1723        ata_bmdma_error_handler(ap);
1724}
1725
1726static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1727{
1728        struct nv_swncq_port_priv *pp = ap->private_data;
1729        struct defer_queue *dq = &pp->defer_queue;
1730
1731        /* queue is full */
1732        WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1733        dq->defer_bits |= (1 << qc->tag);
1734        dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1735}
1736
1737static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1738{
1739        struct nv_swncq_port_priv *pp = ap->private_data;
1740        struct defer_queue *dq = &pp->defer_queue;
1741        unsigned int tag;
1742
1743        if (dq->head == dq->tail)       /* null queue */
1744                return NULL;
1745
1746        tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1747        dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1748        WARN_ON(!(dq->defer_bits & (1 << tag)));
1749        dq->defer_bits &= ~(1 << tag);
1750
1751        return ata_qc_from_tag(ap, tag);
1752}
1753
1754static void nv_swncq_fis_reinit(struct ata_port *ap)
1755{
1756        struct nv_swncq_port_priv *pp = ap->private_data;
1757
1758        pp->dhfis_bits = 0;
1759        pp->dmafis_bits = 0;
1760        pp->sdbfis_bits = 0;
1761        pp->ncq_flags = 0;
1762}
1763
1764static void nv_swncq_pp_reinit(struct ata_port *ap)
1765{
1766        struct nv_swncq_port_priv *pp = ap->private_data;
1767        struct defer_queue *dq = &pp->defer_queue;
1768
1769        dq->head = 0;
1770        dq->tail = 0;
1771        dq->defer_bits = 0;
1772        pp->qc_active = 0;
1773        pp->last_issue_tag = ATA_TAG_POISON;
1774        nv_swncq_fis_reinit(ap);
1775}
1776
1777static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1778{
1779        struct nv_swncq_port_priv *pp = ap->private_data;
1780
1781        writew(fis, pp->irq_block);
1782}
1783
1784static void __ata_bmdma_stop(struct ata_port *ap)
1785{
1786        struct ata_queued_cmd qc;
1787
1788        qc.ap = ap;
1789        ata_bmdma_stop(&qc);
1790}
1791
1792static void nv_swncq_ncq_stop(struct ata_port *ap)
1793{
1794        struct nv_swncq_port_priv *pp = ap->private_data;
1795        unsigned int i;
1796        u32 sactive;
1797        u32 done_mask;
1798
1799        ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1800                     ap->qc_active, ap->link.sactive);
1801        ata_port_err(ap,
1802                "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1803                "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1804                pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1805                pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1806
1807        ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1808                     ap->ops->sff_check_status(ap),
1809                     ioread8(ap->ioaddr.error_addr));
1810
1811        sactive = readl(pp->sactive_block);
1812        done_mask = pp->qc_active ^ sactive;
1813
1814        ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1815        for (i = 0; i < ATA_MAX_QUEUE; i++) {
1816                u8 err = 0;
1817                if (pp->qc_active & (1 << i))
1818                        err = 0;
1819                else if (done_mask & (1 << i))
1820                        err = 1;
1821                else
1822                        continue;
1823
1824                ata_port_err(ap,
1825                             "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1826                             (pp->dhfis_bits >> i) & 0x1,
1827                             (pp->dmafis_bits >> i) & 0x1,
1828                             (pp->sdbfis_bits >> i) & 0x1,
1829                             (sactive >> i) & 0x1,
1830                             (err ? "error! tag doesn't exit" : " "));
1831        }
1832
1833        nv_swncq_pp_reinit(ap);
1834        ap->ops->sff_irq_clear(ap);
1835        __ata_bmdma_stop(ap);
1836        nv_swncq_irq_clear(ap, 0xffff);
1837}
1838
1839static void nv_swncq_error_handler(struct ata_port *ap)
1840{
1841        struct ata_eh_context *ehc = &ap->link.eh_context;
1842
1843        if (ap->link.sactive) {
1844                nv_swncq_ncq_stop(ap);
1845                ehc->i.action |= ATA_EH_RESET;
1846        }
1847
1848        ata_bmdma_error_handler(ap);
1849}
1850
1851#ifdef CONFIG_PM
1852static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1853{
1854        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1855        u32 tmp;
1856
1857        /* clear irq */
1858        writel(~0, mmio + NV_INT_STATUS_MCP55);
1859
1860        /* disable irq */
1861        writel(0, mmio + NV_INT_ENABLE_MCP55);
1862
1863        /* disable swncq */
1864        tmp = readl(mmio + NV_CTL_MCP55);
1865        tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1866        writel(tmp, mmio + NV_CTL_MCP55);
1867
1868        return 0;
1869}
1870
1871static int nv_swncq_port_resume(struct ata_port *ap)
1872{
1873        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1874        u32 tmp;
1875
1876        /* clear irq */
1877        writel(~0, mmio + NV_INT_STATUS_MCP55);
1878
1879        /* enable irq */
1880        writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1881
1882        /* enable swncq */
1883        tmp = readl(mmio + NV_CTL_MCP55);
1884        writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1885
1886        return 0;
1887}
1888#endif
1889
1890static void nv_swncq_host_init(struct ata_host *host)
1891{
1892        u32 tmp;
1893        void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1894        struct pci_dev *pdev = to_pci_dev(host->dev);
1895        u8 regval;
1896
1897        /* disable  ECO 398 */
1898        pci_read_config_byte(pdev, 0x7f, &regval);
1899        regval &= ~(1 << 7);
1900        pci_write_config_byte(pdev, 0x7f, regval);
1901
1902        /* enable swncq */
1903        tmp = readl(mmio + NV_CTL_MCP55);
1904        VPRINTK("HOST_CTL:0x%X\n", tmp);
1905        writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1906
1907        /* enable irq intr */
1908        tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1909        VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1910        writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1911
1912        /*  clear port irq */
1913        writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1914}
1915
1916static int nv_swncq_slave_config(struct scsi_device *sdev)
1917{
1918        struct ata_port *ap = ata_shost_to_port(sdev->host);
1919        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1920        struct ata_device *dev;
1921        int rc;
1922        u8 rev;
1923        u8 check_maxtor = 0;
1924        unsigned char model_num[ATA_ID_PROD_LEN + 1];
1925
1926        rc = ata_scsi_slave_config(sdev);
1927        if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1928                /* Not a proper libata device, ignore */
1929                return rc;
1930
1931        dev = &ap->link.device[sdev->id];
1932        if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1933                return rc;
1934
1935        /* if MCP51 and Maxtor, then disable ncq */
1936        if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1937                pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1938                check_maxtor = 1;
1939
1940        /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1941        if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1942                pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1943                pci_read_config_byte(pdev, 0x8, &rev);
1944                if (rev <= 0xa2)
1945                        check_maxtor = 1;
1946        }
1947
1948        if (!check_maxtor)
1949                return rc;
1950
1951        ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1952
1953        if (strncmp(model_num, "Maxtor", 6) == 0) {
1954                ata_scsi_change_queue_depth(sdev, 1);
1955                ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1956                               sdev->queue_depth);
1957        }
1958
1959        return rc;
1960}
1961
1962static int nv_swncq_port_start(struct ata_port *ap)
1963{
1964        struct device *dev = ap->host->dev;
1965        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1966        struct nv_swncq_port_priv *pp;
1967        int rc;
1968
1969        /* we might fallback to bmdma, allocate bmdma resources */
1970        rc = ata_bmdma_port_start(ap);
1971        if (rc)
1972                return rc;
1973
1974        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1975        if (!pp)
1976                return -ENOMEM;
1977
1978        pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1979                                      &pp->prd_dma, GFP_KERNEL);
1980        if (!pp->prd)
1981                return -ENOMEM;
1982        memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1983
1984        ap->private_data = pp;
1985        pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1986        pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1987        pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1988
1989        return 0;
1990}
1991
1992static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1993{
1994        if (qc->tf.protocol != ATA_PROT_NCQ) {
1995                ata_bmdma_qc_prep(qc);
1996                return;
1997        }
1998
1999        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2000                return;
2001
2002        nv_swncq_fill_sg(qc);
2003}
2004
2005static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2006{
2007        struct ata_port *ap = qc->ap;
2008        struct scatterlist *sg;
2009        struct nv_swncq_port_priv *pp = ap->private_data;
2010        struct ata_bmdma_prd *prd;
2011        unsigned int si, idx;
2012
2013        prd = pp->prd + ATA_MAX_PRD * qc->tag;
2014
2015        idx = 0;
2016        for_each_sg(qc->sg, sg, qc->n_elem, si) {
2017                u32 addr, offset;
2018                u32 sg_len, len;
2019
2020                addr = (u32)sg_dma_address(sg);
2021                sg_len = sg_dma_len(sg);
2022
2023                while (sg_len) {
2024                        offset = addr & 0xffff;
2025                        len = sg_len;
2026                        if ((offset + sg_len) > 0x10000)
2027                                len = 0x10000 - offset;
2028
2029                        prd[idx].addr = cpu_to_le32(addr);
2030                        prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2031
2032                        idx++;
2033                        sg_len -= len;
2034                        addr += len;
2035                }
2036        }
2037
2038        prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2039}
2040
2041static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2042                                          struct ata_queued_cmd *qc)
2043{
2044        struct nv_swncq_port_priv *pp = ap->private_data;
2045
2046        if (qc == NULL)
2047                return 0;
2048
2049        DPRINTK("Enter\n");
2050
2051        writel((1 << qc->tag), pp->sactive_block);
2052        pp->last_issue_tag = qc->tag;
2053        pp->dhfis_bits &= ~(1 << qc->tag);
2054        pp->dmafis_bits &= ~(1 << qc->tag);
2055        pp->qc_active |= (0x1 << qc->tag);
2056
2057        ap->ops->sff_tf_load(ap, &qc->tf);       /* load tf registers */
2058        ap->ops->sff_exec_command(ap, &qc->tf);
2059
2060        DPRINTK("Issued tag %u\n", qc->tag);
2061
2062        return 0;
2063}
2064
2065static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2066{
2067        struct ata_port *ap = qc->ap;
2068        struct nv_swncq_port_priv *pp = ap->private_data;
2069
2070        if (qc->tf.protocol != ATA_PROT_NCQ)
2071                return ata_bmdma_qc_issue(qc);
2072
2073        DPRINTK("Enter\n");
2074
2075        if (!pp->qc_active)
2076                nv_swncq_issue_atacmd(ap, qc);
2077        else
2078                nv_swncq_qc_to_dq(ap, qc);      /* add qc to defer queue */
2079
2080        return 0;
2081}
2082
2083static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2084{
2085        u32 serror;
2086        struct ata_eh_info *ehi = &ap->link.eh_info;
2087
2088        ata_ehi_clear_desc(ehi);
2089
2090        /* AHCI needs SError cleared; otherwise, it might lock up */
2091        sata_scr_read(&ap->link, SCR_ERROR, &serror);
2092        sata_scr_write(&ap->link, SCR_ERROR, serror);
2093
2094        /* analyze @irq_stat */
2095        if (fis & NV_SWNCQ_IRQ_ADDED)
2096                ata_ehi_push_desc(ehi, "hot plug");
2097        else if (fis & NV_SWNCQ_IRQ_REMOVED)
2098                ata_ehi_push_desc(ehi, "hot unplug");
2099
2100        ata_ehi_hotplugged(ehi);
2101
2102        /* okay, let's hand over to EH */
2103        ehi->serror |= serror;
2104
2105        ata_port_freeze(ap);
2106}
2107
2108static int nv_swncq_sdbfis(struct ata_port *ap)
2109{
2110        struct ata_queued_cmd *qc;
2111        struct nv_swncq_port_priv *pp = ap->private_data;
2112        struct ata_eh_info *ehi = &ap->link.eh_info;
2113        u32 sactive;
2114        u32 done_mask;
2115        u8 host_stat;
2116        u8 lack_dhfis = 0;
2117
2118        host_stat = ap->ops->bmdma_status(ap);
2119        if (unlikely(host_stat & ATA_DMA_ERR)) {
2120                /* error when transferring data to/from memory */
2121                ata_ehi_clear_desc(ehi);
2122                ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2123                ehi->err_mask |= AC_ERR_HOST_BUS;
2124                ehi->action |= ATA_EH_RESET;
2125                return -EINVAL;
2126        }
2127
2128        ap->ops->sff_irq_clear(ap);
2129        __ata_bmdma_stop(ap);
2130
2131        sactive = readl(pp->sactive_block);
2132        done_mask = pp->qc_active ^ sactive;
2133
2134        pp->qc_active &= ~done_mask;
2135        pp->dhfis_bits &= ~done_mask;
2136        pp->dmafis_bits &= ~done_mask;
2137        pp->sdbfis_bits |= done_mask;
2138        ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2139
2140        if (!ap->qc_active) {
2141                DPRINTK("over\n");
2142                nv_swncq_pp_reinit(ap);
2143                return 0;
2144        }
2145
2146        if (pp->qc_active & pp->dhfis_bits)
2147                return 0;
2148
2149        if ((pp->ncq_flags & ncq_saw_backout) ||
2150            (pp->qc_active ^ pp->dhfis_bits))
2151                /* if the controller can't get a device to host register FIS,
2152                 * The driver needs to reissue the new command.
2153                 */
2154                lack_dhfis = 1;
2155
2156        DPRINTK("id 0x%x QC: qc_active 0x%x,"
2157                "SWNCQ:qc_active 0x%X defer_bits %X "
2158                "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2159                ap->print_id, ap->qc_active, pp->qc_active,
2160                pp->defer_queue.defer_bits, pp->dhfis_bits,
2161                pp->dmafis_bits, pp->last_issue_tag);
2162
2163        nv_swncq_fis_reinit(ap);
2164
2165        if (lack_dhfis) {
2166                qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2167                nv_swncq_issue_atacmd(ap, qc);
2168                return 0;
2169        }
2170
2171        if (pp->defer_queue.defer_bits) {
2172                /* send deferral queue command */
2173                qc = nv_swncq_qc_from_dq(ap);
2174                WARN_ON(qc == NULL);
2175                nv_swncq_issue_atacmd(ap, qc);
2176        }
2177
2178        return 0;
2179}
2180
2181static inline u32 nv_swncq_tag(struct ata_port *ap)
2182{
2183        struct nv_swncq_port_priv *pp = ap->private_data;
2184        u32 tag;
2185
2186        tag = readb(pp->tag_block) >> 2;
2187        return (tag & 0x1f);
2188}
2189
2190static void nv_swncq_dmafis(struct ata_port *ap)
2191{
2192        struct ata_queued_cmd *qc;
2193        unsigned int rw;
2194        u8 dmactl;
2195        u32 tag;
2196        struct nv_swncq_port_priv *pp = ap->private_data;
2197
2198        __ata_bmdma_stop(ap);
2199        tag = nv_swncq_tag(ap);
2200
2201        DPRINTK("dma setup tag 0x%x\n", tag);
2202        qc = ata_qc_from_tag(ap, tag);
2203
2204        if (unlikely(!qc))
2205                return;
2206
2207        rw = qc->tf.flags & ATA_TFLAG_WRITE;
2208
2209        /* load PRD table addr. */
2210        iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2211                  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2212
2213        /* specify data direction, triple-check start bit is clear */
2214        dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2215        dmactl &= ~ATA_DMA_WR;
2216        if (!rw)
2217                dmactl |= ATA_DMA_WR;
2218
2219        iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2220}
2221
2222static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2223{
2224        struct nv_swncq_port_priv *pp = ap->private_data;
2225        struct ata_queued_cmd *qc;
2226        struct ata_eh_info *ehi = &ap->link.eh_info;
2227        u32 serror;
2228        u8 ata_stat;
2229
2230        ata_stat = ap->ops->sff_check_status(ap);
2231        nv_swncq_irq_clear(ap, fis);
2232        if (!fis)
2233                return;
2234
2235        if (ap->pflags & ATA_PFLAG_FROZEN)
2236                return;
2237
2238        if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2239                nv_swncq_hotplug(ap, fis);
2240                return;
2241        }
2242
2243        if (!pp->qc_active)
2244                return;
2245
2246        if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2247                return;
2248        ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2249
2250        if (ata_stat & ATA_ERR) {
2251                ata_ehi_clear_desc(ehi);
2252                ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2253                ehi->err_mask |= AC_ERR_DEV;
2254                ehi->serror |= serror;
2255                ehi->action |= ATA_EH_RESET;
2256                ata_port_freeze(ap);
2257                return;
2258        }
2259
2260        if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2261                /* If the IRQ is backout, driver must issue
2262                 * the new command again some time later.
2263                 */
2264                pp->ncq_flags |= ncq_saw_backout;
2265        }
2266
2267        if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2268                pp->ncq_flags |= ncq_saw_sdb;
2269                DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2270                        "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2271                        ap->print_id, pp->qc_active, pp->dhfis_bits,
2272                        pp->dmafis_bits, readl(pp->sactive_block));
2273                if (nv_swncq_sdbfis(ap) < 0)
2274                        goto irq_error;
2275        }
2276
2277        if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2278                /* The interrupt indicates the new command
2279                 * was transmitted correctly to the drive.
2280                 */
2281                pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2282                pp->ncq_flags |= ncq_saw_d2h;
2283                if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2284                        ata_ehi_push_desc(ehi, "illegal fis transaction");
2285                        ehi->err_mask |= AC_ERR_HSM;
2286                        ehi->action |= ATA_EH_RESET;
2287                        goto irq_error;
2288                }
2289
2290                if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2291                    !(pp->ncq_flags & ncq_saw_dmas)) {
2292                        ata_stat = ap->ops->sff_check_status(ap);
2293                        if (ata_stat & ATA_BUSY)
2294                                goto irq_exit;
2295
2296                        if (pp->defer_queue.defer_bits) {
2297                                DPRINTK("send next command\n");
2298                                qc = nv_swncq_qc_from_dq(ap);
2299                                nv_swncq_issue_atacmd(ap, qc);
2300                        }
2301                }
2302        }
2303
2304        if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2305                /* program the dma controller with appropriate PRD buffers
2306                 * and start the DMA transfer for requested command.
2307                 */
2308                pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2309                pp->ncq_flags |= ncq_saw_dmas;
2310                nv_swncq_dmafis(ap);
2311        }
2312
2313irq_exit:
2314        return;
2315irq_error:
2316        ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2317        ata_port_freeze(ap);
2318        return;
2319}
2320
2321static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2322{
2323        struct ata_host *host = dev_instance;
2324        unsigned int i;
2325        unsigned int handled = 0;
2326        unsigned long flags;
2327        u32 irq_stat;
2328
2329        spin_lock_irqsave(&host->lock, flags);
2330
2331        irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2332
2333        for (i = 0; i < host->n_ports; i++) {
2334                struct ata_port *ap = host->ports[i];
2335
2336                if (ap->link.sactive) {
2337                        nv_swncq_host_interrupt(ap, (u16)irq_stat);
2338                        handled = 1;
2339                } else {
2340                        if (irq_stat)   /* reserve Hotplug */
2341                                nv_swncq_irq_clear(ap, 0xfff0);
2342
2343                        handled += nv_host_intr(ap, (u8)irq_stat);
2344                }
2345                irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2346        }
2347
2348        spin_unlock_irqrestore(&host->lock, flags);
2349
2350        return IRQ_RETVAL(handled);
2351}
2352
2353static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2354{
2355        const struct ata_port_info *ppi[] = { NULL, NULL };
2356        struct nv_pi_priv *ipriv;
2357        struct ata_host *host;
2358        struct nv_host_priv *hpriv;
2359        int rc;
2360        u32 bar;
2361        void __iomem *base;
2362        unsigned long type = ent->driver_data;
2363
2364        // Make sure this is a SATA controller by counting the number of bars
2365        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2366        // it's an IDE controller and we ignore it.
2367        for (bar = 0; bar < 6; bar++)
2368                if (pci_resource_start(pdev, bar) == 0)
2369                        return -ENODEV;
2370
2371        ata_print_version_once(&pdev->dev, DRV_VERSION);
2372
2373        rc = pcim_enable_device(pdev);
2374        if (rc)
2375                return rc;
2376
2377        /* determine type and allocate host */
2378        if (type == CK804 && adma_enabled) {
2379                dev_notice(&pdev->dev, "Using ADMA mode\n");
2380                type = ADMA;
2381        } else if (type == MCP5x && swncq_enabled) {
2382                dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2383                type = SWNCQ;
2384        }
2385
2386        ppi[0] = &nv_port_info[type];
2387        ipriv = ppi[0]->private_data;
2388        rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2389        if (rc)
2390                return rc;
2391
2392        hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2393        if (!hpriv)
2394                return -ENOMEM;
2395        hpriv->type = type;
2396        host->private_data = hpriv;
2397
2398        /* request and iomap NV_MMIO_BAR */
2399        rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2400        if (rc)
2401                return rc;
2402
2403        /* configure SCR access */
2404        base = host->iomap[NV_MMIO_BAR];
2405        host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2406        host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2407
2408        /* enable SATA space for CK804 */
2409        if (type >= CK804) {
2410                u8 regval;
2411
2412                pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2413                regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2414                pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2415        }
2416
2417        /* init ADMA */
2418        if (type == ADMA) {
2419                rc = nv_adma_host_init(host);
2420                if (rc)
2421                        return rc;
2422        } else if (type == SWNCQ)
2423                nv_swncq_host_init(host);
2424
2425        if (msi_enabled) {
2426                dev_notice(&pdev->dev, "Using MSI\n");
2427                pci_enable_msi(pdev);
2428        }
2429
2430        pci_set_master(pdev);
2431        return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2432}
2433
2434#ifdef CONFIG_PM_SLEEP
2435static int nv_pci_device_resume(struct pci_dev *pdev)
2436{
2437        struct ata_host *host = pci_get_drvdata(pdev);
2438        struct nv_host_priv *hpriv = host->private_data;
2439        int rc;
2440
2441        rc = ata_pci_device_do_resume(pdev);
2442        if (rc)
2443                return rc;
2444
2445        if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2446                if (hpriv->type >= CK804) {
2447                        u8 regval;
2448
2449                        pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2450                        regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2451                        pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2452                }
2453                if (hpriv->type == ADMA) {
2454                        u32 tmp32;
2455                        struct nv_adma_port_priv *pp;
2456                        /* enable/disable ADMA on the ports appropriately */
2457                        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2458
2459                        pp = host->ports[0]->private_data;
2460                        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2461                                tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2462                                           NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2463                        else
2464                                tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2465                                           NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2466                        pp = host->ports[1]->private_data;
2467                        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2468                                tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2469                                           NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2470                        else
2471                                tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2472                                           NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2473
2474                        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2475                }
2476        }
2477
2478        ata_host_resume(host);
2479
2480        return 0;
2481}
2482#endif
2483
2484static void nv_ck804_host_stop(struct ata_host *host)
2485{
2486        struct pci_dev *pdev = to_pci_dev(host->dev);
2487        u8 regval;
2488
2489        /* disable SATA space for CK804 */
2490        pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2491        regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2492        pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2493}
2494
2495static void nv_adma_host_stop(struct ata_host *host)
2496{
2497        struct pci_dev *pdev = to_pci_dev(host->dev);
2498        u32 tmp32;
2499
2500        /* disable ADMA on the ports */
2501        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2502        tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2503                   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2504                   NV_MCP_SATA_CFG_20_PORT1_EN |
2505                   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2506
2507        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2508
2509        nv_ck804_host_stop(host);
2510}
2511
2512module_pci_driver(nv_pci_driver);
2513
2514module_param_named(adma, adma_enabled, bool, 0444);
2515MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2516module_param_named(swncq, swncq_enabled, bool, 0444);
2517MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2518module_param_named(msi, msi_enabled, bool, 0444);
2519MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2520