linux/drivers/ata/libata-sff.c
<<
>>
Prefs
   1/*
   2 *  libata-sff.c - helper library for PCI IDE BMDMA
   3 *
   4 *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
   5 *                  Please ALWAYS copy linux-ide@vger.kernel.org
   6 *                  on emails.
   7 *
   8 *  Copyright 2003-2006 Red Hat, Inc.  All rights reserved.
   9 *  Copyright 2003-2006 Jeff Garzik
  10 *
  11 *
  12 *  This program is free software; you can redistribute it and/or modify
  13 *  it under the terms of the GNU General Public License as published by
  14 *  the Free Software Foundation; either version 2, or (at your option)
  15 *  any later version.
  16 *
  17 *  This program is distributed in the hope that it will be useful,
  18 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 *  GNU General Public License for more details.
  21 *
  22 *  You should have received a copy of the GNU General Public License
  23 *  along with this program; see the file COPYING.  If not, write to
  24 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25 *
  26 *
  27 *  libata documentation is available via 'make {ps|pdf}docs',
  28 *  as Documentation/DocBook/libata.*
  29 *
  30 *  Hardware documentation available from http://www.t13.org/ and
  31 *  http://www.sata-io.org/
  32 *
  33 */
  34
  35#include <linux/kernel.h>
  36#include <linux/pci.h>
  37#include <linux/libata.h>
  38#include <linux/highmem.h>
  39
  40#include "libata.h"
  41
  42const struct ata_port_operations ata_sff_port_ops = {
  43        .inherits               = &ata_base_port_ops,
  44
  45        .qc_prep                = ata_sff_qc_prep,
  46        .qc_issue               = ata_sff_qc_issue,
  47        .qc_fill_rtf            = ata_sff_qc_fill_rtf,
  48
  49        .freeze                 = ata_sff_freeze,
  50        .thaw                   = ata_sff_thaw,
  51        .prereset               = ata_sff_prereset,
  52        .softreset              = ata_sff_softreset,
  53        .hardreset              = sata_sff_hardreset,
  54        .postreset              = ata_sff_postreset,
  55        .drain_fifo             = ata_sff_drain_fifo,
  56        .error_handler          = ata_sff_error_handler,
  57        .post_internal_cmd      = ata_sff_post_internal_cmd,
  58
  59        .sff_dev_select         = ata_sff_dev_select,
  60        .sff_check_status       = ata_sff_check_status,
  61        .sff_tf_load            = ata_sff_tf_load,
  62        .sff_tf_read            = ata_sff_tf_read,
  63        .sff_exec_command       = ata_sff_exec_command,
  64        .sff_data_xfer          = ata_sff_data_xfer,
  65        .sff_irq_on             = ata_sff_irq_on,
  66        .sff_irq_clear          = ata_sff_irq_clear,
  67
  68        .lost_interrupt         = ata_sff_lost_interrupt,
  69
  70        .port_start             = ata_sff_port_start,
  71};
  72EXPORT_SYMBOL_GPL(ata_sff_port_ops);
  73
  74const struct ata_port_operations ata_bmdma_port_ops = {
  75        .inherits               = &ata_sff_port_ops,
  76
  77        .mode_filter            = ata_bmdma_mode_filter,
  78
  79        .bmdma_setup            = ata_bmdma_setup,
  80        .bmdma_start            = ata_bmdma_start,
  81        .bmdma_stop             = ata_bmdma_stop,
  82        .bmdma_status           = ata_bmdma_status,
  83};
  84EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
  85
  86const struct ata_port_operations ata_bmdma32_port_ops = {
  87        .inherits               = &ata_bmdma_port_ops,
  88
  89        .sff_data_xfer          = ata_sff_data_xfer32,
  90        .port_start             = ata_sff_port_start32,
  91};
  92EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
  93
  94/**
  95 *      ata_fill_sg - Fill PCI IDE PRD table
  96 *      @qc: Metadata associated with taskfile to be transferred
  97 *
  98 *      Fill PCI IDE PRD (scatter-gather) table with segments
  99 *      associated with the current disk command.
 100 *
 101 *      LOCKING:
 102 *      spin_lock_irqsave(host lock)
 103 *
 104 */
 105static void ata_fill_sg(struct ata_queued_cmd *qc)
 106{
 107        struct ata_port *ap = qc->ap;
 108        struct scatterlist *sg;
 109        unsigned int si, pi;
 110
 111        pi = 0;
 112        for_each_sg(qc->sg, sg, qc->n_elem, si) {
 113                u32 addr, offset;
 114                u32 sg_len, len;
 115
 116                /* determine if physical DMA addr spans 64K boundary.
 117                 * Note h/w doesn't support 64-bit, so we unconditionally
 118                 * truncate dma_addr_t to u32.
 119                 */
 120                addr = (u32) sg_dma_address(sg);
 121                sg_len = sg_dma_len(sg);
 122
 123                while (sg_len) {
 124                        offset = addr & 0xffff;
 125                        len = sg_len;
 126                        if ((offset + sg_len) > 0x10000)
 127                                len = 0x10000 - offset;
 128
 129                        ap->prd[pi].addr = cpu_to_le32(addr);
 130                        ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
 131                        VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
 132
 133                        pi++;
 134                        sg_len -= len;
 135                        addr += len;
 136                }
 137        }
 138
 139        ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
 140}
 141
 142/**
 143 *      ata_fill_sg_dumb - Fill PCI IDE PRD table
 144 *      @qc: Metadata associated with taskfile to be transferred
 145 *
 146 *      Fill PCI IDE PRD (scatter-gather) table with segments
 147 *      associated with the current disk command. Perform the fill
 148 *      so that we avoid writing any length 64K records for
 149 *      controllers that don't follow the spec.
 150 *
 151 *      LOCKING:
 152 *      spin_lock_irqsave(host lock)
 153 *
 154 */
 155static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
 156{
 157        struct ata_port *ap = qc->ap;
 158        struct scatterlist *sg;
 159        unsigned int si, pi;
 160
 161        pi = 0;
 162        for_each_sg(qc->sg, sg, qc->n_elem, si) {
 163                u32 addr, offset;
 164                u32 sg_len, len, blen;
 165
 166                /* determine if physical DMA addr spans 64K boundary.
 167                 * Note h/w doesn't support 64-bit, so we unconditionally
 168                 * truncate dma_addr_t to u32.
 169                 */
 170                addr = (u32) sg_dma_address(sg);
 171                sg_len = sg_dma_len(sg);
 172
 173                while (sg_len) {
 174                        offset = addr & 0xffff;
 175                        len = sg_len;
 176                        if ((offset + sg_len) > 0x10000)
 177                                len = 0x10000 - offset;
 178
 179                        blen = len & 0xffff;
 180                        ap->prd[pi].addr = cpu_to_le32(addr);
 181                        if (blen == 0) {
 182                                /* Some PATA chipsets like the CS5530 can't
 183                                   cope with 0x0000 meaning 64K as the spec
 184                                   says */
 185                                ap->prd[pi].flags_len = cpu_to_le32(0x8000);
 186                                blen = 0x8000;
 187                                ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
 188                        }
 189                        ap->prd[pi].flags_len = cpu_to_le32(blen);
 190                        VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
 191
 192                        pi++;
 193                        sg_len -= len;
 194                        addr += len;
 195                }
 196        }
 197
 198        ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
 199}
 200
 201/**
 202 *      ata_sff_qc_prep - Prepare taskfile for submission
 203 *      @qc: Metadata associated with taskfile to be prepared
 204 *
 205 *      Prepare ATA taskfile for submission.
 206 *
 207 *      LOCKING:
 208 *      spin_lock_irqsave(host lock)
 209 */
 210void ata_sff_qc_prep(struct ata_queued_cmd *qc)
 211{
 212        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
 213                return;
 214
 215        ata_fill_sg(qc);
 216}
 217EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
 218
 219/**
 220 *      ata_sff_dumb_qc_prep - Prepare taskfile for submission
 221 *      @qc: Metadata associated with taskfile to be prepared
 222 *
 223 *      Prepare ATA taskfile for submission.
 224 *
 225 *      LOCKING:
 226 *      spin_lock_irqsave(host lock)
 227 */
 228void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
 229{
 230        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
 231                return;
 232
 233        ata_fill_sg_dumb(qc);
 234}
 235EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
 236
 237/**
 238 *      ata_sff_check_status - Read device status reg & clear interrupt
 239 *      @ap: port where the device is
 240 *
 241 *      Reads ATA taskfile status register for currently-selected device
 242 *      and return its value. This also clears pending interrupts
 243 *      from this device
 244 *
 245 *      LOCKING:
 246 *      Inherited from caller.
 247 */
 248u8 ata_sff_check_status(struct ata_port *ap)
 249{
 250        return ioread8(ap->ioaddr.status_addr);
 251}
 252EXPORT_SYMBOL_GPL(ata_sff_check_status);
 253
 254/**
 255 *      ata_sff_altstatus - Read device alternate status reg
 256 *      @ap: port where the device is
 257 *
 258 *      Reads ATA taskfile alternate status register for
 259 *      currently-selected device and return its value.
 260 *
 261 *      Note: may NOT be used as the check_altstatus() entry in
 262 *      ata_port_operations.
 263 *
 264 *      LOCKING:
 265 *      Inherited from caller.
 266 */
 267static u8 ata_sff_altstatus(struct ata_port *ap)
 268{
 269        if (ap->ops->sff_check_altstatus)
 270                return ap->ops->sff_check_altstatus(ap);
 271
 272        return ioread8(ap->ioaddr.altstatus_addr);
 273}
 274
 275/**
 276 *      ata_sff_irq_status - Check if the device is busy
 277 *      @ap: port where the device is
 278 *
 279 *      Determine if the port is currently busy. Uses altstatus
 280 *      if available in order to avoid clearing shared IRQ status
 281 *      when finding an IRQ source. Non ctl capable devices don't
 282 *      share interrupt lines fortunately for us.
 283 *
 284 *      LOCKING:
 285 *      Inherited from caller.
 286 */
 287static u8 ata_sff_irq_status(struct ata_port *ap)
 288{
 289        u8 status;
 290
 291        if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
 292                status = ata_sff_altstatus(ap);
 293                /* Not us: We are busy */
 294                if (status & ATA_BUSY)
 295                        return status;
 296        }
 297        /* Clear INTRQ latch */
 298        status = ap->ops->sff_check_status(ap);
 299        return status;
 300}
 301
 302/**
 303 *      ata_sff_sync - Flush writes
 304 *      @ap: Port to wait for.
 305 *
 306 *      CAUTION:
 307 *      If we have an mmio device with no ctl and no altstatus
 308 *      method this will fail. No such devices are known to exist.
 309 *
 310 *      LOCKING:
 311 *      Inherited from caller.
 312 */
 313
 314static void ata_sff_sync(struct ata_port *ap)
 315{
 316        if (ap->ops->sff_check_altstatus)
 317                ap->ops->sff_check_altstatus(ap);
 318        else if (ap->ioaddr.altstatus_addr)
 319                ioread8(ap->ioaddr.altstatus_addr);
 320}
 321
 322/**
 323 *      ata_sff_pause           -       Flush writes and wait 400nS
 324 *      @ap: Port to pause for.
 325 *
 326 *      CAUTION:
 327 *      If we have an mmio device with no ctl and no altstatus
 328 *      method this will fail. No such devices are known to exist.
 329 *
 330 *      LOCKING:
 331 *      Inherited from caller.
 332 */
 333
 334void ata_sff_pause(struct ata_port *ap)
 335{
 336        ata_sff_sync(ap);
 337        ndelay(400);
 338}
 339EXPORT_SYMBOL_GPL(ata_sff_pause);
 340
 341/**
 342 *      ata_sff_dma_pause       -       Pause before commencing DMA
 343 *      @ap: Port to pause for.
 344 *
 345 *      Perform I/O fencing and ensure sufficient cycle delays occur
 346 *      for the HDMA1:0 transition
 347 */
 348
 349void ata_sff_dma_pause(struct ata_port *ap)
 350{
 351        if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
 352                /* An altstatus read will cause the needed delay without
 353                   messing up the IRQ status */
 354                ata_sff_altstatus(ap);
 355                return;
 356        }
 357        /* There are no DMA controllers without ctl. BUG here to ensure
 358           we never violate the HDMA1:0 transition timing and risk
 359           corruption. */
 360        BUG();
 361}
 362EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
 363
 364/**
 365 *      ata_sff_busy_sleep - sleep until BSY clears, or timeout
 366 *      @ap: port containing status register to be polled
 367 *      @tmout_pat: impatience timeout in msecs
 368 *      @tmout: overall timeout in msecs
 369 *
 370 *      Sleep until ATA Status register bit BSY clears,
 371 *      or a timeout occurs.
 372 *
 373 *      LOCKING:
 374 *      Kernel thread context (may sleep).
 375 *
 376 *      RETURNS:
 377 *      0 on success, -errno otherwise.
 378 */
 379int ata_sff_busy_sleep(struct ata_port *ap,
 380                       unsigned long tmout_pat, unsigned long tmout)
 381{
 382        unsigned long timer_start, timeout;
 383        u8 status;
 384
 385        status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
 386        timer_start = jiffies;
 387        timeout = ata_deadline(timer_start, tmout_pat);
 388        while (status != 0xff && (status & ATA_BUSY) &&
 389               time_before(jiffies, timeout)) {
 390                msleep(50);
 391                status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
 392        }
 393
 394        if (status != 0xff && (status & ATA_BUSY))
 395                ata_port_printk(ap, KERN_WARNING,
 396                                "port is slow to respond, please be patient "
 397                                "(Status 0x%x)\n", status);
 398
 399        timeout = ata_deadline(timer_start, tmout);
 400        while (status != 0xff && (status & ATA_BUSY) &&
 401               time_before(jiffies, timeout)) {
 402                msleep(50);
 403                status = ap->ops->sff_check_status(ap);
 404        }
 405
 406        if (status == 0xff)
 407                return -ENODEV;
 408
 409        if (status & ATA_BUSY) {
 410                ata_port_printk(ap, KERN_ERR, "port failed to respond "
 411                                "(%lu secs, Status 0x%x)\n",
 412                                DIV_ROUND_UP(tmout, 1000), status);
 413                return -EBUSY;
 414        }
 415
 416        return 0;
 417}
 418EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
 419
 420static int ata_sff_check_ready(struct ata_link *link)
 421{
 422        u8 status = link->ap->ops->sff_check_status(link->ap);
 423
 424        return ata_check_ready(status);
 425}
 426
 427/**
 428 *      ata_sff_wait_ready - sleep until BSY clears, or timeout
 429 *      @link: SFF link to wait ready status for
 430 *      @deadline: deadline jiffies for the operation
 431 *
 432 *      Sleep until ATA Status register bit BSY clears, or timeout
 433 *      occurs.
 434 *
 435 *      LOCKING:
 436 *      Kernel thread context (may sleep).
 437 *
 438 *      RETURNS:
 439 *      0 on success, -errno otherwise.
 440 */
 441int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
 442{
 443        return ata_wait_ready(link, deadline, ata_sff_check_ready);
 444}
 445EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
 446
 447/**
 448 *      ata_sff_dev_select - Select device 0/1 on ATA bus
 449 *      @ap: ATA channel to manipulate
 450 *      @device: ATA device (numbered from zero) to select
 451 *
 452 *      Use the method defined in the ATA specification to
 453 *      make either device 0, or device 1, active on the
 454 *      ATA channel.  Works with both PIO and MMIO.
 455 *
 456 *      May be used as the dev_select() entry in ata_port_operations.
 457 *
 458 *      LOCKING:
 459 *      caller.
 460 */
 461void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
 462{
 463        u8 tmp;
 464
 465        if (device == 0)
 466                tmp = ATA_DEVICE_OBS;
 467        else
 468                tmp = ATA_DEVICE_OBS | ATA_DEV1;
 469
 470        iowrite8(tmp, ap->ioaddr.device_addr);
 471        ata_sff_pause(ap);      /* needed; also flushes, for mmio */
 472}
 473EXPORT_SYMBOL_GPL(ata_sff_dev_select);
 474
 475/**
 476 *      ata_dev_select - Select device 0/1 on ATA bus
 477 *      @ap: ATA channel to manipulate
 478 *      @device: ATA device (numbered from zero) to select
 479 *      @wait: non-zero to wait for Status register BSY bit to clear
 480 *      @can_sleep: non-zero if context allows sleeping
 481 *
 482 *      Use the method defined in the ATA specification to
 483 *      make either device 0, or device 1, active on the
 484 *      ATA channel.
 485 *
 486 *      This is a high-level version of ata_sff_dev_select(), which
 487 *      additionally provides the services of inserting the proper
 488 *      pauses and status polling, where needed.
 489 *
 490 *      LOCKING:
 491 *      caller.
 492 */
 493void ata_dev_select(struct ata_port *ap, unsigned int device,
 494                           unsigned int wait, unsigned int can_sleep)
 495{
 496        if (ata_msg_probe(ap))
 497                ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
 498                                "device %u, wait %u\n", device, wait);
 499
 500        if (wait)
 501                ata_wait_idle(ap);
 502
 503        ap->ops->sff_dev_select(ap, device);
 504
 505        if (wait) {
 506                if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
 507                        msleep(150);
 508                ata_wait_idle(ap);
 509        }
 510}
 511
 512/**
 513 *      ata_sff_irq_on - Enable interrupts on a port.
 514 *      @ap: Port on which interrupts are enabled.
 515 *
 516 *      Enable interrupts on a legacy IDE device using MMIO or PIO,
 517 *      wait for idle, clear any pending interrupts.
 518 *
 519 *      LOCKING:
 520 *      Inherited from caller.
 521 */
 522u8 ata_sff_irq_on(struct ata_port *ap)
 523{
 524        struct ata_ioports *ioaddr = &ap->ioaddr;
 525        u8 tmp;
 526
 527        ap->ctl &= ~ATA_NIEN;
 528        ap->last_ctl = ap->ctl;
 529
 530        if (ioaddr->ctl_addr)
 531                iowrite8(ap->ctl, ioaddr->ctl_addr);
 532        tmp = ata_wait_idle(ap);
 533
 534        ap->ops->sff_irq_clear(ap);
 535
 536        return tmp;
 537}
 538EXPORT_SYMBOL_GPL(ata_sff_irq_on);
 539
 540/**
 541 *      ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt.
 542 *      @ap: Port associated with this ATA transaction.
 543 *
 544 *      Clear interrupt and error flags in DMA status register.
 545 *
 546 *      May be used as the irq_clear() entry in ata_port_operations.
 547 *
 548 *      LOCKING:
 549 *      spin_lock_irqsave(host lock)
 550 */
 551void ata_sff_irq_clear(struct ata_port *ap)
 552{
 553        void __iomem *mmio = ap->ioaddr.bmdma_addr;
 554
 555        if (!mmio)
 556                return;
 557
 558        iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
 559}
 560EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
 561
 562/**
 563 *      ata_sff_tf_load - send taskfile registers to host controller
 564 *      @ap: Port to which output is sent
 565 *      @tf: ATA taskfile register set
 566 *
 567 *      Outputs ATA taskfile to standard ATA host controller.
 568 *
 569 *      LOCKING:
 570 *      Inherited from caller.
 571 */
 572void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
 573{
 574        struct ata_ioports *ioaddr = &ap->ioaddr;
 575        unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
 576
 577        if (tf->ctl != ap->last_ctl) {
 578                if (ioaddr->ctl_addr)
 579                        iowrite8(tf->ctl, ioaddr->ctl_addr);
 580                ap->last_ctl = tf->ctl;
 581                ata_wait_idle(ap);
 582        }
 583
 584        if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
 585                WARN_ON_ONCE(!ioaddr->ctl_addr);
 586                iowrite8(tf->hob_feature, ioaddr->feature_addr);
 587                iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
 588                iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
 589                iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
 590                iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
 591                VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
 592                        tf->hob_feature,
 593                        tf->hob_nsect,
 594                        tf->hob_lbal,
 595                        tf->hob_lbam,
 596                        tf->hob_lbah);
 597        }
 598
 599        if (is_addr) {
 600                iowrite8(tf->feature, ioaddr->feature_addr);
 601                iowrite8(tf->nsect, ioaddr->nsect_addr);
 602                iowrite8(tf->lbal, ioaddr->lbal_addr);
 603                iowrite8(tf->lbam, ioaddr->lbam_addr);
 604                iowrite8(tf->lbah, ioaddr->lbah_addr);
 605                VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
 606                        tf->feature,
 607                        tf->nsect,
 608                        tf->lbal,
 609                        tf->lbam,
 610                        tf->lbah);
 611        }
 612
 613        if (tf->flags & ATA_TFLAG_DEVICE) {
 614                iowrite8(tf->device, ioaddr->device_addr);
 615                VPRINTK("device 0x%X\n", tf->device);
 616        }
 617
 618        ata_wait_idle(ap);
 619}
 620EXPORT_SYMBOL_GPL(ata_sff_tf_load);
 621
 622/**
 623 *      ata_sff_tf_read - input device's ATA taskfile shadow registers
 624 *      @ap: Port from which input is read
 625 *      @tf: ATA taskfile register set for storing input
 626 *
 627 *      Reads ATA taskfile registers for currently-selected device
 628 *      into @tf. Assumes the device has a fully SFF compliant task file
 629 *      layout and behaviour. If you device does not (eg has a different
 630 *      status method) then you will need to provide a replacement tf_read
 631 *
 632 *      LOCKING:
 633 *      Inherited from caller.
 634 */
 635void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 636{
 637        struct ata_ioports *ioaddr = &ap->ioaddr;
 638
 639        tf->command = ata_sff_check_status(ap);
 640        tf->feature = ioread8(ioaddr->error_addr);
 641        tf->nsect = ioread8(ioaddr->nsect_addr);
 642        tf->lbal = ioread8(ioaddr->lbal_addr);
 643        tf->lbam = ioread8(ioaddr->lbam_addr);
 644        tf->lbah = ioread8(ioaddr->lbah_addr);
 645        tf->device = ioread8(ioaddr->device_addr);
 646
 647        if (tf->flags & ATA_TFLAG_LBA48) {
 648                if (likely(ioaddr->ctl_addr)) {
 649                        iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
 650                        tf->hob_feature = ioread8(ioaddr->error_addr);
 651                        tf->hob_nsect = ioread8(ioaddr->nsect_addr);
 652                        tf->hob_lbal = ioread8(ioaddr->lbal_addr);
 653                        tf->hob_lbam = ioread8(ioaddr->lbam_addr);
 654                        tf->hob_lbah = ioread8(ioaddr->lbah_addr);
 655                        iowrite8(tf->ctl, ioaddr->ctl_addr);
 656                        ap->last_ctl = tf->ctl;
 657                } else
 658                        WARN_ON_ONCE(1);
 659        }
 660}
 661EXPORT_SYMBOL_GPL(ata_sff_tf_read);
 662
 663/**
 664 *      ata_sff_exec_command - issue ATA command to host controller
 665 *      @ap: port to which command is being issued
 666 *      @tf: ATA taskfile register set
 667 *
 668 *      Issues ATA command, with proper synchronization with interrupt
 669 *      handler / other threads.
 670 *
 671 *      LOCKING:
 672 *      spin_lock_irqsave(host lock)
 673 */
 674void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
 675{
 676        DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
 677
 678        iowrite8(tf->command, ap->ioaddr.command_addr);
 679        ata_sff_pause(ap);
 680}
 681EXPORT_SYMBOL_GPL(ata_sff_exec_command);
 682
 683/**
 684 *      ata_tf_to_host - issue ATA taskfile to host controller
 685 *      @ap: port to which command is being issued
 686 *      @tf: ATA taskfile register set
 687 *
 688 *      Issues ATA taskfile register set to ATA host controller,
 689 *      with proper synchronization with interrupt handler and
 690 *      other threads.
 691 *
 692 *      LOCKING:
 693 *      spin_lock_irqsave(host lock)
 694 */
 695static inline void ata_tf_to_host(struct ata_port *ap,
 696                                  const struct ata_taskfile *tf)
 697{
 698        ap->ops->sff_tf_load(ap, tf);
 699        ap->ops->sff_exec_command(ap, tf);
 700}
 701
 702/**
 703 *      ata_sff_data_xfer - Transfer data by PIO
 704 *      @dev: device to target
 705 *      @buf: data buffer
 706 *      @buflen: buffer length
 707 *      @rw: read/write
 708 *
 709 *      Transfer data from/to the device data register by PIO.
 710 *
 711 *      LOCKING:
 712 *      Inherited from caller.
 713 *
 714 *      RETURNS:
 715 *      Bytes consumed.
 716 */
 717unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
 718                               unsigned int buflen, int rw)
 719{
 720        struct ata_port *ap = dev->link->ap;
 721        void __iomem *data_addr = ap->ioaddr.data_addr;
 722        unsigned int words = buflen >> 1;
 723
 724        /* Transfer multiple of 2 bytes */
 725        if (rw == READ)
 726                ioread16_rep(data_addr, buf, words);
 727        else
 728                iowrite16_rep(data_addr, buf, words);
 729
 730        /* Transfer trailing byte, if any. */
 731        if (unlikely(buflen & 0x01)) {
 732                unsigned char pad[2];
 733
 734                /* Point buf to the tail of buffer */
 735                buf += buflen - 1;
 736
 737                /*
 738                 * Use io*16_rep() accessors here as well to avoid pointlessly
 739                 * swapping bytes to and fro on the big endian machines...
 740                 */
 741                if (rw == READ) {
 742                        ioread16_rep(data_addr, pad, 1);
 743                        *buf = pad[0];
 744                } else {
 745                        pad[0] = *buf;
 746                        iowrite16_rep(data_addr, pad, 1);
 747                }
 748                words++;
 749        }
 750
 751        return words << 1;
 752}
 753EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
 754
 755/**
 756 *      ata_sff_data_xfer32 - Transfer data by PIO
 757 *      @dev: device to target
 758 *      @buf: data buffer
 759 *      @buflen: buffer length
 760 *      @rw: read/write
 761 *
 762 *      Transfer data from/to the device data register by PIO using 32bit
 763 *      I/O operations.
 764 *
 765 *      LOCKING:
 766 *      Inherited from caller.
 767 *
 768 *      RETURNS:
 769 *      Bytes consumed.
 770 */
 771
 772unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
 773                               unsigned int buflen, int rw)
 774{
 775        struct ata_port *ap = dev->link->ap;
 776        void __iomem *data_addr = ap->ioaddr.data_addr;
 777        unsigned int words = buflen >> 2;
 778        int slop = buflen & 3;
 779        
 780        if (!(ap->pflags & ATA_PFLAG_PIO32))
 781                return ata_sff_data_xfer(dev, buf, buflen, rw);
 782
 783        /* Transfer multiple of 4 bytes */
 784        if (rw == READ)
 785                ioread32_rep(data_addr, buf, words);
 786        else
 787                iowrite32_rep(data_addr, buf, words);
 788
 789        /* Transfer trailing bytes, if any */
 790        if (unlikely(slop)) {
 791                unsigned char pad[4];
 792
 793                /* Point buf to the tail of buffer */
 794                buf += buflen - slop;
 795
 796                /*
 797                 * Use io*_rep() accessors here as well to avoid pointlessly
 798                 * swapping bytes to and fro on the big endian machines...
 799                 */
 800                if (rw == READ) {
 801                        if (slop < 3)
 802                                ioread16_rep(data_addr, pad, 1);
 803                        else
 804                                ioread32_rep(data_addr, pad, 1);
 805                        memcpy(buf, pad, slop);
 806                } else {
 807                        memcpy(pad, buf, slop);
 808                        if (slop < 3)
 809                                iowrite16_rep(data_addr, pad, 1);
 810                        else
 811                                iowrite32_rep(data_addr, pad, 1);
 812                }
 813        }
 814        return (buflen + 1) & ~1;
 815}
 816EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
 817
 818/**
 819 *      ata_sff_data_xfer_noirq - Transfer data by PIO
 820 *      @dev: device to target
 821 *      @buf: data buffer
 822 *      @buflen: buffer length
 823 *      @rw: read/write
 824 *
 825 *      Transfer data from/to the device data register by PIO. Do the
 826 *      transfer with interrupts disabled.
 827 *
 828 *      LOCKING:
 829 *      Inherited from caller.
 830 *
 831 *      RETURNS:
 832 *      Bytes consumed.
 833 */
 834unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
 835                                     unsigned int buflen, int rw)
 836{
 837        unsigned long flags;
 838        unsigned int consumed;
 839
 840        local_irq_save(flags);
 841        consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
 842        local_irq_restore(flags);
 843
 844        return consumed;
 845}
 846EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
 847
 848/**
 849 *      ata_pio_sector - Transfer a sector of data.
 850 *      @qc: Command on going
 851 *
 852 *      Transfer qc->sect_size bytes of data from/to the ATA device.
 853 *
 854 *      LOCKING:
 855 *      Inherited from caller.
 856 */
 857static void ata_pio_sector(struct ata_queued_cmd *qc)
 858{
 859        int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
 860        struct ata_port *ap = qc->ap;
 861        struct page *page;
 862        unsigned int offset;
 863        unsigned char *buf;
 864
 865        if (qc->curbytes == qc->nbytes - qc->sect_size)
 866                ap->hsm_task_state = HSM_ST_LAST;
 867
 868        page = sg_page(qc->cursg);
 869        offset = qc->cursg->offset + qc->cursg_ofs;
 870
 871        /* get the current page and offset */
 872        page = nth_page(page, (offset >> PAGE_SHIFT));
 873        offset %= PAGE_SIZE;
 874
 875        DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
 876
 877        if (PageHighMem(page)) {
 878                unsigned long flags;
 879
 880                /* FIXME: use a bounce buffer */
 881                local_irq_save(flags);
 882                buf = kmap_atomic(page, KM_IRQ0);
 883
 884                /* do the actual data transfer */
 885                ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
 886                                       do_write);
 887
 888                kunmap_atomic(buf, KM_IRQ0);
 889                local_irq_restore(flags);
 890        } else {
 891                buf = page_address(page);
 892                ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
 893                                       do_write);
 894        }
 895
 896        qc->curbytes += qc->sect_size;
 897        qc->cursg_ofs += qc->sect_size;
 898
 899        if (qc->cursg_ofs == qc->cursg->length) {
 900                qc->cursg = sg_next(qc->cursg);
 901                qc->cursg_ofs = 0;
 902        }
 903}
 904
 905/**
 906 *      ata_pio_sectors - Transfer one or many sectors.
 907 *      @qc: Command on going
 908 *
 909 *      Transfer one or many sectors of data from/to the
 910 *      ATA device for the DRQ request.
 911 *
 912 *      LOCKING:
 913 *      Inherited from caller.
 914 */
 915static void ata_pio_sectors(struct ata_queued_cmd *qc)
 916{
 917        if (is_multi_taskfile(&qc->tf)) {
 918                /* READ/WRITE MULTIPLE */
 919                unsigned int nsect;
 920
 921                WARN_ON_ONCE(qc->dev->multi_count == 0);
 922
 923                nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
 924                            qc->dev->multi_count);
 925                while (nsect--)
 926                        ata_pio_sector(qc);
 927        } else
 928                ata_pio_sector(qc);
 929
 930        ata_sff_sync(qc->ap); /* flush */
 931}
 932
 933/**
 934 *      atapi_send_cdb - Write CDB bytes to hardware
 935 *      @ap: Port to which ATAPI device is attached.
 936 *      @qc: Taskfile currently active
 937 *
 938 *      When device has indicated its readiness to accept
 939 *      a CDB, this function is called.  Send the CDB.
 940 *
 941 *      LOCKING:
 942 *      caller.
 943 */
 944static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
 945{
 946        /* send SCSI cdb */
 947        DPRINTK("send cdb\n");
 948        WARN_ON_ONCE(qc->dev->cdb_len < 12);
 949
 950        ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
 951        ata_sff_sync(ap);
 952        /* FIXME: If the CDB is for DMA do we need to do the transition delay
 953           or is bmdma_start guaranteed to do it ? */
 954        switch (qc->tf.protocol) {
 955        case ATAPI_PROT_PIO:
 956                ap->hsm_task_state = HSM_ST;
 957                break;
 958        case ATAPI_PROT_NODATA:
 959                ap->hsm_task_state = HSM_ST_LAST;
 960                break;
 961        case ATAPI_PROT_DMA:
 962                ap->hsm_task_state = HSM_ST_LAST;
 963                /* initiate bmdma */
 964                ap->ops->bmdma_start(qc);
 965                break;
 966        }
 967}
 968
 969/**
 970 *      __atapi_pio_bytes - Transfer data from/to the ATAPI device.
 971 *      @qc: Command on going
 972 *      @bytes: number of bytes
 973 *
 974 *      Transfer Transfer data from/to the ATAPI device.
 975 *
 976 *      LOCKING:
 977 *      Inherited from caller.
 978 *
 979 */
 980static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
 981{
 982        int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
 983        struct ata_port *ap = qc->ap;
 984        struct ata_device *dev = qc->dev;
 985        struct ata_eh_info *ehi = &dev->link->eh_info;
 986        struct scatterlist *sg;
 987        struct page *page;
 988        unsigned char *buf;
 989        unsigned int offset, count, consumed;
 990
 991next_sg:
 992        sg = qc->cursg;
 993        if (unlikely(!sg)) {
 994                ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
 995                                  "buf=%u cur=%u bytes=%u",
 996                                  qc->nbytes, qc->curbytes, bytes);
 997                return -1;
 998        }
 999
1000        page = sg_page(sg);
1001        offset = sg->offset + qc->cursg_ofs;
1002
1003        /* get the current page and offset */
1004        page = nth_page(page, (offset >> PAGE_SHIFT));
1005        offset %= PAGE_SIZE;
1006
1007        /* don't overrun current sg */
1008        count = min(sg->length - qc->cursg_ofs, bytes);
1009
1010        /* don't cross page boundaries */
1011        count = min(count, (unsigned int)PAGE_SIZE - offset);
1012
1013        DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
1014
1015        if (PageHighMem(page)) {
1016                unsigned long flags;
1017
1018                /* FIXME: use bounce buffer */
1019                local_irq_save(flags);
1020                buf = kmap_atomic(page, KM_IRQ0);
1021
1022                /* do the actual data transfer */
1023                consumed = ap->ops->sff_data_xfer(dev,  buf + offset,
1024                                                                count, rw);
1025
1026                kunmap_atomic(buf, KM_IRQ0);
1027                local_irq_restore(flags);
1028        } else {
1029                buf = page_address(page);
1030                consumed = ap->ops->sff_data_xfer(dev,  buf + offset,
1031                                                                count, rw);
1032        }
1033
1034        bytes -= min(bytes, consumed);
1035        qc->curbytes += count;
1036        qc->cursg_ofs += count;
1037
1038        if (qc->cursg_ofs == sg->length) {
1039                qc->cursg = sg_next(qc->cursg);
1040                qc->cursg_ofs = 0;
1041        }
1042
1043        /*
1044         * There used to be a  WARN_ON_ONCE(qc->cursg && count != consumed);
1045         * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
1046         * check correctly as it doesn't know if it is the last request being
1047         * made. Somebody should implement a proper sanity check.
1048         */
1049        if (bytes)
1050                goto next_sg;
1051        return 0;
1052}
1053
1054/**
1055 *      atapi_pio_bytes - Transfer data from/to the ATAPI device.
1056 *      @qc: Command on going
1057 *
1058 *      Transfer Transfer data from/to the ATAPI device.
1059 *
1060 *      LOCKING:
1061 *      Inherited from caller.
1062 */
1063static void atapi_pio_bytes(struct ata_queued_cmd *qc)
1064{
1065        struct ata_port *ap = qc->ap;
1066        struct ata_device *dev = qc->dev;
1067        struct ata_eh_info *ehi = &dev->link->eh_info;
1068        unsigned int ireason, bc_lo, bc_hi, bytes;
1069        int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
1070
1071        /* Abuse qc->result_tf for temp storage of intermediate TF
1072         * here to save some kernel stack usage.
1073         * For normal completion, qc->result_tf is not relevant. For
1074         * error, qc->result_tf is later overwritten by ata_qc_complete().
1075         * So, the correctness of qc->result_tf is not affected.
1076         */
1077        ap->ops->sff_tf_read(ap, &qc->result_tf);
1078        ireason = qc->result_tf.nsect;
1079        bc_lo = qc->result_tf.lbam;
1080        bc_hi = qc->result_tf.lbah;
1081        bytes = (bc_hi << 8) | bc_lo;
1082
1083        /* shall be cleared to zero, indicating xfer of data */
1084        if (unlikely(ireason & (1 << 0)))
1085                goto atapi_check;
1086
1087        /* make sure transfer direction matches expected */
1088        i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
1089        if (unlikely(do_write != i_write))
1090                goto atapi_check;
1091
1092        if (unlikely(!bytes))
1093                goto atapi_check;
1094
1095        VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
1096
1097        if (unlikely(__atapi_pio_bytes(qc, bytes)))
1098                goto err_out;
1099        ata_sff_sync(ap); /* flush */
1100
1101        return;
1102
1103 atapi_check:
1104        ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
1105                          ireason, bytes);
1106 err_out:
1107        qc->err_mask |= AC_ERR_HSM;
1108        ap->hsm_task_state = HSM_ST_ERR;
1109}
1110
1111/**
1112 *      ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
1113 *      @ap: the target ata_port
1114 *      @qc: qc on going
1115 *
1116 *      RETURNS:
1117 *      1 if ok in workqueue, 0 otherwise.
1118 */
1119static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
1120                                                struct ata_queued_cmd *qc)
1121{
1122        if (qc->tf.flags & ATA_TFLAG_POLLING)
1123                return 1;
1124
1125        if (ap->hsm_task_state == HSM_ST_FIRST) {
1126                if (qc->tf.protocol == ATA_PROT_PIO &&
1127                   (qc->tf.flags & ATA_TFLAG_WRITE))
1128                    return 1;
1129
1130                if (ata_is_atapi(qc->tf.protocol) &&
1131                   !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1132                        return 1;
1133        }
1134
1135        return 0;
1136}
1137
1138/**
1139 *      ata_hsm_qc_complete - finish a qc running on standard HSM
1140 *      @qc: Command to complete
1141 *      @in_wq: 1 if called from workqueue, 0 otherwise
1142 *
1143 *      Finish @qc which is running on standard HSM.
1144 *
1145 *      LOCKING:
1146 *      If @in_wq is zero, spin_lock_irqsave(host lock).
1147 *      Otherwise, none on entry and grabs host lock.
1148 */
1149static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1150{
1151        struct ata_port *ap = qc->ap;
1152        unsigned long flags;
1153
1154        if (ap->ops->error_handler) {
1155                if (in_wq) {
1156                        spin_lock_irqsave(ap->lock, flags);
1157
1158                        /* EH might have kicked in while host lock is
1159                         * released.
1160                         */
1161                        qc = ata_qc_from_tag(ap, qc->tag);
1162                        if (qc) {
1163                                if (likely(!(qc->err_mask & AC_ERR_HSM))) {
1164                                        ap->ops->sff_irq_on(ap);
1165                                        ata_qc_complete(qc);
1166                                } else
1167                                        ata_port_freeze(ap);
1168                        }
1169
1170                        spin_unlock_irqrestore(ap->lock, flags);
1171                } else {
1172                        if (likely(!(qc->err_mask & AC_ERR_HSM)))
1173                                ata_qc_complete(qc);
1174                        else
1175                                ata_port_freeze(ap);
1176                }
1177        } else {
1178                if (in_wq) {
1179                        spin_lock_irqsave(ap->lock, flags);
1180                        ap->ops->sff_irq_on(ap);
1181                        ata_qc_complete(qc);
1182                        spin_unlock_irqrestore(ap->lock, flags);
1183                } else
1184                        ata_qc_complete(qc);
1185        }
1186}
1187
1188/**
1189 *      ata_sff_hsm_move - move the HSM to the next state.
1190 *      @ap: the target ata_port
1191 *      @qc: qc on going
1192 *      @status: current device status
1193 *      @in_wq: 1 if called from workqueue, 0 otherwise
1194 *
1195 *      RETURNS:
1196 *      1 when poll next status needed, 0 otherwise.
1197 */
1198int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1199                     u8 status, int in_wq)
1200{
1201        struct ata_eh_info *ehi = &ap->link.eh_info;
1202        unsigned long flags = 0;
1203        int poll_next;
1204
1205        WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1206
1207        /* Make sure ata_sff_qc_issue() does not throw things
1208         * like DMA polling into the workqueue. Notice that
1209         * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
1210         */
1211        WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
1212
1213fsm_start:
1214        DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
1215                ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
1216
1217        switch (ap->hsm_task_state) {
1218        case HSM_ST_FIRST:
1219                /* Send first data block or PACKET CDB */
1220
1221                /* If polling, we will stay in the work queue after
1222                 * sending the data. Otherwise, interrupt handler
1223                 * takes over after sending the data.
1224                 */
1225                poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1226
1227                /* check device status */
1228                if (unlikely((status & ATA_DRQ) == 0)) {
1229                        /* handle BSY=0, DRQ=0 as error */
1230                        if (likely(status & (ATA_ERR | ATA_DF)))
1231                                /* device stops HSM for abort/error */
1232                                qc->err_mask |= AC_ERR_DEV;
1233                        else {
1234                                /* HSM violation. Let EH handle this */
1235                                ata_ehi_push_desc(ehi,
1236                                        "ST_FIRST: !(DRQ|ERR|DF)");
1237                                qc->err_mask |= AC_ERR_HSM;
1238                        }
1239
1240                        ap->hsm_task_state = HSM_ST_ERR;
1241                        goto fsm_start;
1242                }
1243
1244                /* Device should not ask for data transfer (DRQ=1)
1245                 * when it finds something wrong.
1246                 * We ignore DRQ here and stop the HSM by
1247                 * changing hsm_task_state to HSM_ST_ERR and
1248                 * let the EH abort the command or reset the device.
1249                 */
1250                if (unlikely(status & (ATA_ERR | ATA_DF))) {
1251                        /* Some ATAPI tape drives forget to clear the ERR bit
1252                         * when doing the next command (mostly request sense).
1253                         * We ignore ERR here to workaround and proceed sending
1254                         * the CDB.
1255                         */
1256                        if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1257                                ata_ehi_push_desc(ehi, "ST_FIRST: "
1258                                        "DRQ=1 with device error, "
1259                                        "dev_stat 0x%X", status);
1260                                qc->err_mask |= AC_ERR_HSM;
1261                                ap->hsm_task_state = HSM_ST_ERR;
1262                                goto fsm_start;
1263                        }
1264                }
1265
1266                /* Send the CDB (atapi) or the first data block (ata pio out).
1267                 * During the state transition, interrupt handler shouldn't
1268                 * be invoked before the data transfer is complete and
1269                 * hsm_task_state is changed. Hence, the following locking.
1270                 */
1271                if (in_wq)
1272                        spin_lock_irqsave(ap->lock, flags);
1273
1274                if (qc->tf.protocol == ATA_PROT_PIO) {
1275                        /* PIO data out protocol.
1276                         * send first data block.
1277                         */
1278
1279                        /* ata_pio_sectors() might change the state
1280                         * to HSM_ST_LAST. so, the state is changed here
1281                         * before ata_pio_sectors().
1282                         */
1283                        ap->hsm_task_state = HSM_ST;
1284                        ata_pio_sectors(qc);
1285                } else
1286                        /* send CDB */
1287                        atapi_send_cdb(ap, qc);
1288
1289                if (in_wq)
1290                        spin_unlock_irqrestore(ap->lock, flags);
1291
1292                /* if polling, ata_pio_task() handles the rest.
1293                 * otherwise, interrupt handler takes over from here.
1294                 */
1295                break;
1296
1297        case HSM_ST:
1298                /* complete command or read/write the data register */
1299                if (qc->tf.protocol == ATAPI_PROT_PIO) {
1300                        /* ATAPI PIO protocol */
1301                        if ((status & ATA_DRQ) == 0) {
1302                                /* No more data to transfer or device error.
1303                                 * Device error will be tagged in HSM_ST_LAST.
1304                                 */
1305                                ap->hsm_task_state = HSM_ST_LAST;
1306                                goto fsm_start;
1307                        }
1308
1309                        /* Device should not ask for data transfer (DRQ=1)
1310                         * when it finds something wrong.
1311                         * We ignore DRQ here and stop the HSM by
1312                         * changing hsm_task_state to HSM_ST_ERR and
1313                         * let the EH abort the command or reset the device.
1314                         */
1315                        if (unlikely(status & (ATA_ERR | ATA_DF))) {
1316                                ata_ehi_push_desc(ehi, "ST-ATAPI: "
1317                                        "DRQ=1 with device error, "
1318                                        "dev_stat 0x%X", status);
1319                                qc->err_mask |= AC_ERR_HSM;
1320                                ap->hsm_task_state = HSM_ST_ERR;
1321                                goto fsm_start;
1322                        }
1323
1324                        atapi_pio_bytes(qc);
1325
1326                        if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1327                                /* bad ireason reported by device */
1328                                goto fsm_start;
1329
1330                } else {
1331                        /* ATA PIO protocol */
1332                        if (unlikely((status & ATA_DRQ) == 0)) {
1333                                /* handle BSY=0, DRQ=0 as error */
1334                                if (likely(status & (ATA_ERR | ATA_DF))) {
1335                                        /* device stops HSM for abort/error */
1336                                        qc->err_mask |= AC_ERR_DEV;
1337
1338                                        /* If diagnostic failed and this is
1339                                         * IDENTIFY, it's likely a phantom
1340                                         * device.  Mark hint.
1341                                         */
1342                                        if (qc->dev->horkage &
1343                                            ATA_HORKAGE_DIAGNOSTIC)
1344                                                qc->err_mask |=
1345                                                        AC_ERR_NODEV_HINT;
1346                                } else {
1347                                        /* HSM violation. Let EH handle this.
1348                                         * Phantom devices also trigger this
1349                                         * condition.  Mark hint.
1350                                         */
1351                                        ata_ehi_push_desc(ehi, "ST-ATA: "
1352                                                "DRQ=0 without device error, "
1353                                                "dev_stat 0x%X", status);
1354                                        qc->err_mask |= AC_ERR_HSM |
1355                                                        AC_ERR_NODEV_HINT;
1356                                }
1357
1358                                ap->hsm_task_state = HSM_ST_ERR;
1359                                goto fsm_start;
1360                        }
1361
1362                        /* For PIO reads, some devices may ask for
1363                         * data transfer (DRQ=1) alone with ERR=1.
1364                         * We respect DRQ here and transfer one
1365                         * block of junk data before changing the
1366                         * hsm_task_state to HSM_ST_ERR.
1367                         *
1368                         * For PIO writes, ERR=1 DRQ=1 doesn't make
1369                         * sense since the data block has been
1370                         * transferred to the device.
1371                         */
1372                        if (unlikely(status & (ATA_ERR | ATA_DF))) {
1373                                /* data might be corrputed */
1374                                qc->err_mask |= AC_ERR_DEV;
1375
1376                                if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1377                                        ata_pio_sectors(qc);
1378                                        status = ata_wait_idle(ap);
1379                                }
1380
1381                                if (status & (ATA_BUSY | ATA_DRQ)) {
1382                                        ata_ehi_push_desc(ehi, "ST-ATA: "
1383                                                "BUSY|DRQ persists on ERR|DF, "
1384                                                "dev_stat 0x%X", status);
1385                                        qc->err_mask |= AC_ERR_HSM;
1386                                }
1387
1388                                /* There are oddball controllers with
1389                                 * status register stuck at 0x7f and
1390                                 * lbal/m/h at zero which makes it
1391                                 * pass all other presence detection
1392                                 * mechanisms we have.  Set NODEV_HINT
1393                                 * for it.  Kernel bz#7241.
1394                                 */
1395                                if (status == 0x7f)
1396                                        qc->err_mask |= AC_ERR_NODEV_HINT;
1397
1398                                /* ata_pio_sectors() might change the
1399                                 * state to HSM_ST_LAST. so, the state
1400                                 * is changed after ata_pio_sectors().
1401                                 */
1402                                ap->hsm_task_state = HSM_ST_ERR;
1403                                goto fsm_start;
1404                        }
1405
1406                        ata_pio_sectors(qc);
1407
1408                        if (ap->hsm_task_state == HSM_ST_LAST &&
1409                            (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1410                                /* all data read */
1411                                status = ata_wait_idle(ap);
1412                                goto fsm_start;
1413                        }
1414                }
1415
1416                poll_next = 1;
1417                break;
1418
1419        case HSM_ST_LAST:
1420                if (unlikely(!ata_ok(status))) {
1421                        qc->err_mask |= __ac_err_mask(status);
1422                        ap->hsm_task_state = HSM_ST_ERR;
1423                        goto fsm_start;
1424                }
1425
1426                /* no more data to transfer */
1427                DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1428                        ap->print_id, qc->dev->devno, status);
1429
1430                WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1431
1432                ap->hsm_task_state = HSM_ST_IDLE;
1433
1434                /* complete taskfile transaction */
1435                ata_hsm_qc_complete(qc, in_wq);
1436
1437                poll_next = 0;
1438                break;
1439
1440        case HSM_ST_ERR:
1441                ap->hsm_task_state = HSM_ST_IDLE;
1442
1443                /* complete taskfile transaction */
1444                ata_hsm_qc_complete(qc, in_wq);
1445
1446                poll_next = 0;
1447                break;
1448        default:
1449                poll_next = 0;
1450                BUG();
1451        }
1452
1453        return poll_next;
1454}
1455EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1456
1457void ata_pio_task(struct work_struct *work)
1458{
1459        struct ata_port *ap =
1460                container_of(work, struct ata_port, port_task.work);
1461        struct ata_queued_cmd *qc = ap->port_task_data;
1462        u8 status;
1463        int poll_next;
1464
1465fsm_start:
1466        WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1467
1468        /*
1469         * This is purely heuristic.  This is a fast path.
1470         * Sometimes when we enter, BSY will be cleared in
1471         * a chk-status or two.  If not, the drive is probably seeking
1472         * or something.  Snooze for a couple msecs, then
1473         * chk-status again.  If still busy, queue delayed work.
1474         */
1475        status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1476        if (status & ATA_BUSY) {
1477                msleep(2);
1478                status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1479                if (status & ATA_BUSY) {
1480                        ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
1481                        return;
1482                }
1483        }
1484
1485        /* move the HSM */
1486        poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1487
1488        /* another command or interrupt handler
1489         * may be running at this point.
1490         */
1491        if (poll_next)
1492                goto fsm_start;
1493}
1494
1495/**
1496 *      ata_sff_qc_issue - issue taskfile to device in proto-dependent manner
1497 *      @qc: command to issue to device
1498 *
1499 *      Using various libata functions and hooks, this function
1500 *      starts an ATA command.  ATA commands are grouped into
1501 *      classes called "protocols", and issuing each type of protocol
1502 *      is slightly different.
1503 *
1504 *      May be used as the qc_issue() entry in ata_port_operations.
1505 *
1506 *      LOCKING:
1507 *      spin_lock_irqsave(host lock)
1508 *
1509 *      RETURNS:
1510 *      Zero on success, AC_ERR_* mask on failure
1511 */
1512unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1513{
1514        struct ata_port *ap = qc->ap;
1515
1516        /* Use polling pio if the LLD doesn't handle
1517         * interrupt driven pio and atapi CDB interrupt.
1518         */
1519        if (ap->flags & ATA_FLAG_PIO_POLLING) {
1520                switch (qc->tf.protocol) {
1521                case ATA_PROT_PIO:
1522                case ATA_PROT_NODATA:
1523                case ATAPI_PROT_PIO:
1524                case ATAPI_PROT_NODATA:
1525                        qc->tf.flags |= ATA_TFLAG_POLLING;
1526                        break;
1527                case ATAPI_PROT_DMA:
1528                        if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
1529                                /* see ata_dma_blacklisted() */
1530                                BUG();
1531                        break;
1532                default:
1533                        break;
1534                }
1535        }
1536
1537        /* select the device */
1538        ata_dev_select(ap, qc->dev->devno, 1, 0);
1539
1540        /* start the command */
1541        switch (qc->tf.protocol) {
1542        case ATA_PROT_NODATA:
1543                if (qc->tf.flags & ATA_TFLAG_POLLING)
1544                        ata_qc_set_polling(qc);
1545
1546                ata_tf_to_host(ap, &qc->tf);
1547                ap->hsm_task_state = HSM_ST_LAST;
1548
1549                if (qc->tf.flags & ATA_TFLAG_POLLING)
1550                        ata_pio_queue_task(ap, qc, 0);
1551
1552                break;
1553
1554        case ATA_PROT_DMA:
1555                WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
1556
1557                ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
1558                ap->ops->bmdma_setup(qc);           /* set up bmdma */
1559                ap->ops->bmdma_start(qc);           /* initiate bmdma */
1560                ap->hsm_task_state = HSM_ST_LAST;
1561                break;
1562
1563        case ATA_PROT_PIO:
1564                if (qc->tf.flags & ATA_TFLAG_POLLING)
1565                        ata_qc_set_polling(qc);
1566
1567                ata_tf_to_host(ap, &qc->tf);
1568
1569                if (qc->tf.flags & ATA_TFLAG_WRITE) {
1570                        /* PIO data out protocol */
1571                        ap->hsm_task_state = HSM_ST_FIRST;
1572                        ata_pio_queue_task(ap, qc, 0);
1573
1574                        /* always send first data block using
1575                         * the ata_pio_task() codepath.
1576                         */
1577                } else {
1578                        /* PIO data in protocol */
1579                        ap->hsm_task_state = HSM_ST;
1580
1581                        if (qc->tf.flags & ATA_TFLAG_POLLING)
1582                                ata_pio_queue_task(ap, qc, 0);
1583
1584                        /* if polling, ata_pio_task() handles the rest.
1585                         * otherwise, interrupt handler takes over from here.
1586                         */
1587                }
1588
1589                break;
1590
1591        case ATAPI_PROT_PIO:
1592        case ATAPI_PROT_NODATA:
1593                if (qc->tf.flags & ATA_TFLAG_POLLING)
1594                        ata_qc_set_polling(qc);
1595
1596                ata_tf_to_host(ap, &qc->tf);
1597
1598                ap->hsm_task_state = HSM_ST_FIRST;
1599
1600                /* send cdb by polling if no cdb interrupt */
1601                if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1602                    (qc->tf.flags & ATA_TFLAG_POLLING))
1603                        ata_pio_queue_task(ap, qc, 0);
1604                break;
1605
1606        case ATAPI_PROT_DMA:
1607                WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
1608
1609                ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
1610                ap->ops->bmdma_setup(qc);           /* set up bmdma */
1611                ap->hsm_task_state = HSM_ST_FIRST;
1612
1613                /* send cdb by polling if no cdb interrupt */
1614                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1615                        ata_pio_queue_task(ap, qc, 0);
1616                break;
1617
1618        default:
1619                WARN_ON_ONCE(1);
1620                return AC_ERR_SYSTEM;
1621        }
1622
1623        return 0;
1624}
1625EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1626
1627/**
1628 *      ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
1629 *      @qc: qc to fill result TF for
1630 *
1631 *      @qc is finished and result TF needs to be filled.  Fill it
1632 *      using ->sff_tf_read.
1633 *
1634 *      LOCKING:
1635 *      spin_lock_irqsave(host lock)
1636 *
1637 *      RETURNS:
1638 *      true indicating that result TF is successfully filled.
1639 */
1640bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1641{
1642        qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1643        return true;
1644}
1645EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1646
1647/**
1648 *      ata_sff_host_intr - Handle host interrupt for given (port, task)
1649 *      @ap: Port on which interrupt arrived (possibly...)
1650 *      @qc: Taskfile currently active in engine
1651 *
1652 *      Handle host interrupt for given queued command.  Currently,
1653 *      only DMA interrupts are handled.  All other commands are
1654 *      handled via polling with interrupts disabled (nIEN bit).
1655 *
1656 *      LOCKING:
1657 *      spin_lock_irqsave(host lock)
1658 *
1659 *      RETURNS:
1660 *      One if interrupt was handled, zero if not (shared irq).
1661 */
1662unsigned int ata_sff_host_intr(struct ata_port *ap,
1663                                      struct ata_queued_cmd *qc)
1664{
1665        struct ata_eh_info *ehi = &ap->link.eh_info;
1666        u8 status, host_stat = 0;
1667
1668        VPRINTK("ata%u: protocol %d task_state %d\n",
1669                ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1670
1671        /* Check whether we are expecting interrupt in this state */
1672        switch (ap->hsm_task_state) {
1673        case HSM_ST_FIRST:
1674                /* Some pre-ATAPI-4 devices assert INTRQ
1675                 * at this state when ready to receive CDB.
1676                 */
1677
1678                /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1679                 * The flag was turned on only for atapi devices.  No
1680                 * need to check ata_is_atapi(qc->tf.protocol) again.
1681                 */
1682                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1683                        goto idle_irq;
1684                break;
1685        case HSM_ST_LAST:
1686                if (qc->tf.protocol == ATA_PROT_DMA ||
1687                    qc->tf.protocol == ATAPI_PROT_DMA) {
1688                        /* check status of DMA engine */
1689                        host_stat = ap->ops->bmdma_status(ap);
1690                        VPRINTK("ata%u: host_stat 0x%X\n",
1691                                ap->print_id, host_stat);
1692
1693                        /* if it's not our irq... */
1694                        if (!(host_stat & ATA_DMA_INTR))
1695                                goto idle_irq;
1696
1697                        /* before we do anything else, clear DMA-Start bit */
1698                        ap->ops->bmdma_stop(qc);
1699
1700                        if (unlikely(host_stat & ATA_DMA_ERR)) {
1701                                /* error when transfering data to/from memory */
1702                                qc->err_mask |= AC_ERR_HOST_BUS;
1703                                ap->hsm_task_state = HSM_ST_ERR;
1704                        }
1705                }
1706                break;
1707        case HSM_ST:
1708                break;
1709        default:
1710                goto idle_irq;
1711        }
1712
1713
1714        /* check main status, clearing INTRQ if needed */
1715        status = ata_sff_irq_status(ap);
1716        if (status & ATA_BUSY)
1717                goto idle_irq;
1718
1719        /* ack bmdma irq events */
1720        ap->ops->sff_irq_clear(ap);
1721
1722        ata_sff_hsm_move(ap, qc, status, 0);
1723
1724        if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
1725                                       qc->tf.protocol == ATAPI_PROT_DMA))
1726                ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
1727
1728        return 1;       /* irq handled */
1729
1730idle_irq:
1731        ap->stats.idle_irq++;
1732
1733#ifdef ATA_IRQ_TRAP
1734        if ((ap->stats.idle_irq % 1000) == 0) {
1735                ap->ops->sff_check_status(ap);
1736                ap->ops->sff_irq_clear(ap);
1737                ata_port_printk(ap, KERN_WARNING, "irq trap\n");
1738                return 1;
1739        }
1740#endif
1741        return 0;       /* irq not handled */
1742}
1743EXPORT_SYMBOL_GPL(ata_sff_host_intr);
1744
1745/**
1746 *      ata_sff_interrupt - Default ATA host interrupt handler
1747 *      @irq: irq line (unused)
1748 *      @dev_instance: pointer to our ata_host information structure
1749 *
1750 *      Default interrupt handler for PCI IDE devices.  Calls
1751 *      ata_sff_host_intr() for each port that is not disabled.
1752 *
1753 *      LOCKING:
1754 *      Obtains host lock during operation.
1755 *
1756 *      RETURNS:
1757 *      IRQ_NONE or IRQ_HANDLED.
1758 */
1759irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1760{
1761        struct ata_host *host = dev_instance;
1762        unsigned int i;
1763        unsigned int handled = 0;
1764        unsigned long flags;
1765
1766        /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1767        spin_lock_irqsave(&host->lock, flags);
1768
1769        for (i = 0; i < host->n_ports; i++) {
1770                struct ata_port *ap;
1771
1772                ap = host->ports[i];
1773                if (ap &&
1774                    !(ap->flags & ATA_FLAG_DISABLED)) {
1775                        struct ata_queued_cmd *qc;
1776
1777                        qc = ata_qc_from_tag(ap, ap->link.active_tag);
1778                        if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
1779                            (qc->flags & ATA_QCFLAG_ACTIVE))
1780                                handled |= ata_sff_host_intr(ap, qc);
1781                }
1782        }
1783
1784        spin_unlock_irqrestore(&host->lock, flags);
1785
1786        return IRQ_RETVAL(handled);
1787}
1788EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1789
1790/**
1791 *      ata_sff_lost_interrupt  -       Check for an apparent lost interrupt
1792 *      @ap: port that appears to have timed out
1793 *
1794 *      Called from the libata error handlers when the core code suspects
1795 *      an interrupt has been lost. If it has complete anything we can and
1796 *      then return. Interface must support altstatus for this faster
1797 *      recovery to occur.
1798 *
1799 *      Locking:
1800 *      Caller holds host lock
1801 */
1802
1803void ata_sff_lost_interrupt(struct ata_port *ap)
1804{
1805        u8 status;
1806        struct ata_queued_cmd *qc;
1807
1808        /* Only one outstanding command per SFF channel */
1809        qc = ata_qc_from_tag(ap, ap->link.active_tag);
1810        /* Check we have a live one.. */
1811        if (qc == NULL ||  !(qc->flags & ATA_QCFLAG_ACTIVE))
1812                return;
1813        /* We cannot lose an interrupt on a polled command */
1814        if (qc->tf.flags & ATA_TFLAG_POLLING)
1815                return;
1816        /* See if the controller thinks it is still busy - if so the command
1817           isn't a lost IRQ but is still in progress */
1818        status = ata_sff_altstatus(ap);
1819        if (status & ATA_BUSY)
1820                return;
1821
1822        /* There was a command running, we are no longer busy and we have
1823           no interrupt. */
1824        ata_port_printk(ap, KERN_WARNING, "lost interrupt (Status 0x%x)\n",
1825                                                                status);
1826        /* Run the host interrupt logic as if the interrupt had not been
1827           lost */
1828        ata_sff_host_intr(ap, qc);
1829}
1830EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1831
1832/**
1833 *      ata_sff_freeze - Freeze SFF controller port
1834 *      @ap: port to freeze
1835 *
1836 *      Freeze BMDMA controller port.
1837 *
1838 *      LOCKING:
1839 *      Inherited from caller.
1840 */
1841void ata_sff_freeze(struct ata_port *ap)
1842{
1843        struct ata_ioports *ioaddr = &ap->ioaddr;
1844
1845        ap->ctl |= ATA_NIEN;
1846        ap->last_ctl = ap->ctl;
1847
1848        if (ioaddr->ctl_addr)
1849                iowrite8(ap->ctl, ioaddr->ctl_addr);
1850
1851        /* Under certain circumstances, some controllers raise IRQ on
1852         * ATA_NIEN manipulation.  Also, many controllers fail to mask
1853         * previously pending IRQ on ATA_NIEN assertion.  Clear it.
1854         */
1855        ap->ops->sff_check_status(ap);
1856
1857        ap->ops->sff_irq_clear(ap);
1858}
1859EXPORT_SYMBOL_GPL(ata_sff_freeze);
1860
1861/**
1862 *      ata_sff_thaw - Thaw SFF controller port
1863 *      @ap: port to thaw
1864 *
1865 *      Thaw SFF controller port.
1866 *
1867 *      LOCKING:
1868 *      Inherited from caller.
1869 */
1870void ata_sff_thaw(struct ata_port *ap)
1871{
1872        /* clear & re-enable interrupts */
1873        ap->ops->sff_check_status(ap);
1874        ap->ops->sff_irq_clear(ap);
1875        ap->ops->sff_irq_on(ap);
1876}
1877EXPORT_SYMBOL_GPL(ata_sff_thaw);
1878
1879/**
1880 *      ata_sff_prereset - prepare SFF link for reset
1881 *      @link: SFF link to be reset
1882 *      @deadline: deadline jiffies for the operation
1883 *
1884 *      SFF link @link is about to be reset.  Initialize it.  It first
1885 *      calls ata_std_prereset() and wait for !BSY if the port is
1886 *      being softreset.
1887 *
1888 *      LOCKING:
1889 *      Kernel thread context (may sleep)
1890 *
1891 *      RETURNS:
1892 *      0 on success, -errno otherwise.
1893 */
1894int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1895{
1896        struct ata_eh_context *ehc = &link->eh_context;
1897        int rc;
1898
1899        rc = ata_std_prereset(link, deadline);
1900        if (rc)
1901                return rc;
1902
1903        /* if we're about to do hardreset, nothing more to do */
1904        if (ehc->i.action & ATA_EH_HARDRESET)
1905                return 0;
1906
1907        /* wait for !BSY if we don't know that no device is attached */
1908        if (!ata_link_offline(link)) {
1909                rc = ata_sff_wait_ready(link, deadline);
1910                if (rc && rc != -ENODEV) {
1911                        ata_link_printk(link, KERN_WARNING, "device not ready "
1912                                        "(errno=%d), forcing hardreset\n", rc);
1913                        ehc->i.action |= ATA_EH_HARDRESET;
1914                }
1915        }
1916
1917        return 0;
1918}
1919EXPORT_SYMBOL_GPL(ata_sff_prereset);
1920
1921/**
1922 *      ata_devchk - PATA device presence detection
1923 *      @ap: ATA channel to examine
1924 *      @device: Device to examine (starting at zero)
1925 *
1926 *      This technique was originally described in
1927 *      Hale Landis's ATADRVR (www.ata-atapi.com), and
1928 *      later found its way into the ATA/ATAPI spec.
1929 *
1930 *      Write a pattern to the ATA shadow registers,
1931 *      and if a device is present, it will respond by
1932 *      correctly storing and echoing back the
1933 *      ATA shadow register contents.
1934 *
1935 *      LOCKING:
1936 *      caller.
1937 */
1938static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1939{
1940        struct ata_ioports *ioaddr = &ap->ioaddr;
1941        u8 nsect, lbal;
1942
1943        ap->ops->sff_dev_select(ap, device);
1944
1945        iowrite8(0x55, ioaddr->nsect_addr);
1946        iowrite8(0xaa, ioaddr->lbal_addr);
1947
1948        iowrite8(0xaa, ioaddr->nsect_addr);
1949        iowrite8(0x55, ioaddr->lbal_addr);
1950
1951        iowrite8(0x55, ioaddr->nsect_addr);
1952        iowrite8(0xaa, ioaddr->lbal_addr);
1953
1954        nsect = ioread8(ioaddr->nsect_addr);
1955        lbal = ioread8(ioaddr->lbal_addr);
1956
1957        if ((nsect == 0x55) && (lbal == 0xaa))
1958                return 1;       /* we found a device */
1959
1960        return 0;               /* nothing found */
1961}
1962
1963/**
1964 *      ata_sff_dev_classify - Parse returned ATA device signature
1965 *      @dev: ATA device to classify (starting at zero)
1966 *      @present: device seems present
1967 *      @r_err: Value of error register on completion
1968 *
1969 *      After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1970 *      an ATA/ATAPI-defined set of values is placed in the ATA
1971 *      shadow registers, indicating the results of device detection
1972 *      and diagnostics.
1973 *
1974 *      Select the ATA device, and read the values from the ATA shadow
1975 *      registers.  Then parse according to the Error register value,
1976 *      and the spec-defined values examined by ata_dev_classify().
1977 *
1978 *      LOCKING:
1979 *      caller.
1980 *
1981 *      RETURNS:
1982 *      Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1983 */
1984unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1985                                  u8 *r_err)
1986{
1987        struct ata_port *ap = dev->link->ap;
1988        struct ata_taskfile tf;
1989        unsigned int class;
1990        u8 err;
1991
1992        ap->ops->sff_dev_select(ap, dev->devno);
1993
1994        memset(&tf, 0, sizeof(tf));
1995
1996        ap->ops->sff_tf_read(ap, &tf);
1997        err = tf.feature;
1998        if (r_err)
1999                *r_err = err;
2000
2001        /* see if device passed diags: continue and warn later */
2002        if (err == 0)
2003                /* diagnostic fail : do nothing _YET_ */
2004                dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
2005        else if (err == 1)
2006                /* do nothing */ ;
2007        else if ((dev->devno == 0) && (err == 0x81))
2008                /* do nothing */ ;
2009        else
2010                return ATA_DEV_NONE;
2011
2012        /* determine if device is ATA or ATAPI */
2013        class = ata_dev_classify(&tf);
2014
2015        if (class == ATA_DEV_UNKNOWN) {
2016                /* If the device failed diagnostic, it's likely to
2017                 * have reported incorrect device signature too.
2018                 * Assume ATA device if the device seems present but
2019                 * device signature is invalid with diagnostic
2020                 * failure.
2021                 */
2022                if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
2023                        class = ATA_DEV_ATA;
2024                else
2025                        class = ATA_DEV_NONE;
2026        } else if ((class == ATA_DEV_ATA) &&
2027                   (ap->ops->sff_check_status(ap) == 0))
2028                class = ATA_DEV_NONE;
2029
2030        return class;
2031}
2032EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
2033
2034/**
2035 *      ata_sff_wait_after_reset - wait for devices to become ready after reset
2036 *      @link: SFF link which is just reset
2037 *      @devmask: mask of present devices
2038 *      @deadline: deadline jiffies for the operation
2039 *
2040 *      Wait devices attached to SFF @link to become ready after
2041 *      reset.  It contains preceding 150ms wait to avoid accessing TF
2042 *      status register too early.
2043 *
2044 *      LOCKING:
2045 *      Kernel thread context (may sleep).
2046 *
2047 *      RETURNS:
2048 *      0 on success, -ENODEV if some or all of devices in @devmask
2049 *      don't seem to exist.  -errno on other errors.
2050 */
2051int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
2052                             unsigned long deadline)
2053{
2054        struct ata_port *ap = link->ap;
2055        struct ata_ioports *ioaddr = &ap->ioaddr;
2056        unsigned int dev0 = devmask & (1 << 0);
2057        unsigned int dev1 = devmask & (1 << 1);
2058        int rc, ret = 0;
2059
2060        msleep(ATA_WAIT_AFTER_RESET);
2061
2062        /* always check readiness of the master device */
2063        rc = ata_sff_wait_ready(link, deadline);
2064        /* -ENODEV means the odd clown forgot the D7 pulldown resistor
2065         * and TF status is 0xff, bail out on it too.
2066         */
2067        if (rc)
2068                return rc;
2069
2070        /* if device 1 was found in ata_devchk, wait for register
2071         * access briefly, then wait for BSY to clear.
2072         */
2073        if (dev1) {
2074                int i;
2075
2076                ap->ops->sff_dev_select(ap, 1);
2077
2078                /* Wait for register access.  Some ATAPI devices fail
2079                 * to set nsect/lbal after reset, so don't waste too
2080                 * much time on it.  We're gonna wait for !BSY anyway.
2081                 */
2082                for (i = 0; i < 2; i++) {
2083                        u8 nsect, lbal;
2084
2085                        nsect = ioread8(ioaddr->nsect_addr);
2086                        lbal = ioread8(ioaddr->lbal_addr);
2087                        if ((nsect == 1) && (lbal == 1))
2088                                break;
2089                        msleep(50);     /* give drive a breather */
2090                }
2091
2092                rc = ata_sff_wait_ready(link, deadline);
2093                if (rc) {
2094                        if (rc != -ENODEV)
2095                                return rc;
2096                        ret = rc;
2097                }
2098        }
2099
2100        /* is all this really necessary? */
2101        ap->ops->sff_dev_select(ap, 0);
2102        if (dev1)
2103                ap->ops->sff_dev_select(ap, 1);
2104        if (dev0)
2105                ap->ops->sff_dev_select(ap, 0);
2106
2107        return ret;
2108}
2109EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
2110
2111static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
2112                             unsigned long deadline)
2113{
2114        struct ata_ioports *ioaddr = &ap->ioaddr;
2115
2116        DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
2117
2118        /* software reset.  causes dev0 to be selected */
2119        iowrite8(ap->ctl, ioaddr->ctl_addr);
2120        udelay(20);     /* FIXME: flush */
2121        iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2122        udelay(20);     /* FIXME: flush */
2123        iowrite8(ap->ctl, ioaddr->ctl_addr);
2124        ap->last_ctl = ap->ctl;
2125
2126        /* wait the port to become ready */
2127        return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
2128}
2129
2130/**
2131 *      ata_sff_softreset - reset host port via ATA SRST
2132 *      @link: ATA link to reset
2133 *      @classes: resulting classes of attached devices
2134 *      @deadline: deadline jiffies for the operation
2135 *
2136 *      Reset host port using ATA SRST.
2137 *
2138 *      LOCKING:
2139 *      Kernel thread context (may sleep)
2140 *
2141 *      RETURNS:
2142 *      0 on success, -errno otherwise.
2143 */
2144int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
2145                      unsigned long deadline)
2146{
2147        struct ata_port *ap = link->ap;
2148        unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2149        unsigned int devmask = 0;
2150        int rc;
2151        u8 err;
2152
2153        DPRINTK("ENTER\n");
2154
2155        /* determine if device 0/1 are present */
2156        if (ata_devchk(ap, 0))
2157                devmask |= (1 << 0);
2158        if (slave_possible && ata_devchk(ap, 1))
2159                devmask |= (1 << 1);
2160
2161        /* select device 0 again */
2162        ap->ops->sff_dev_select(ap, 0);
2163
2164        /* issue bus reset */
2165        DPRINTK("about to softreset, devmask=%x\n", devmask);
2166        rc = ata_bus_softreset(ap, devmask, deadline);
2167        /* if link is occupied, -ENODEV too is an error */
2168        if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
2169                ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
2170                return rc;
2171        }
2172
2173        /* determine by signature whether we have ATA or ATAPI devices */
2174        classes[0] = ata_sff_dev_classify(&link->device[0],
2175                                          devmask & (1 << 0), &err);
2176        if (slave_possible && err != 0x81)
2177                classes[1] = ata_sff_dev_classify(&link->device[1],
2178                                                  devmask & (1 << 1), &err);
2179
2180        DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2181        return 0;
2182}
2183EXPORT_SYMBOL_GPL(ata_sff_softreset);
2184
2185/**
2186 *      sata_sff_hardreset - reset host port via SATA phy reset
2187 *      @link: link to reset
2188 *      @class: resulting class of attached device
2189 *      @deadline: deadline jiffies for the operation
2190 *
2191 *      SATA phy-reset host port using DET bits of SControl register,
2192 *      wait for !BSY and classify the attached device.
2193 *
2194 *      LOCKING:
2195 *      Kernel thread context (may sleep)
2196 *
2197 *      RETURNS:
2198 *      0 on success, -errno otherwise.
2199 */
2200int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
2201                       unsigned long deadline)
2202{
2203        struct ata_eh_context *ehc = &link->eh_context;
2204        const unsigned long *timing = sata_ehc_deb_timing(ehc);
2205        bool online;
2206        int rc;
2207
2208        rc = sata_link_hardreset(link, timing, deadline, &online,
2209                                 ata_sff_check_ready);
2210        if (online)
2211                *class = ata_sff_dev_classify(link->device, 1, NULL);
2212
2213        DPRINTK("EXIT, class=%u\n", *class);
2214        return rc;
2215}
2216EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2217
2218/**
2219 *      ata_sff_postreset - SFF postreset callback
2220 *      @link: the target SFF ata_link
2221 *      @classes: classes of attached devices
2222 *
2223 *      This function is invoked after a successful reset.  It first
2224 *      calls ata_std_postreset() and performs SFF specific postreset
2225 *      processing.
2226 *
2227 *      LOCKING:
2228 *      Kernel thread context (may sleep)
2229 */
2230void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2231{
2232        struct ata_port *ap = link->ap;
2233
2234        ata_std_postreset(link, classes);
2235
2236        /* is double-select really necessary? */
2237        if (classes[0] != ATA_DEV_NONE)
2238                ap->ops->sff_dev_select(ap, 1);
2239        if (classes[1] != ATA_DEV_NONE)
2240                ap->ops->sff_dev_select(ap, 0);
2241
2242        /* bail out if no device is present */
2243        if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2244                DPRINTK("EXIT, no device\n");
2245                return;
2246        }
2247
2248        /* set up device control */
2249        if (ap->ioaddr.ctl_addr) {
2250                iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2251                ap->last_ctl = ap->ctl;
2252        }
2253}
2254EXPORT_SYMBOL_GPL(ata_sff_postreset);
2255
2256/**
2257 *      ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
2258 *      @qc: command
2259 *
2260 *      Drain the FIFO and device of any stuck data following a command
2261 *      failing to complete. In some cases this is neccessary before a
2262 *      reset will recover the device.
2263 *
2264 */
2265
2266void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2267{
2268        int count;
2269        struct ata_port *ap;
2270
2271        /* We only need to flush incoming data when a command was running */
2272        if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2273                return;
2274
2275        ap = qc->ap;
2276        /* Drain up to 64K of data before we give up this recovery method */
2277        for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2278                                                && count < 32768; count++)
2279                ioread16(ap->ioaddr.data_addr);
2280
2281        /* Can become DEBUG later */
2282        if (count)
2283                ata_port_printk(ap, KERN_DEBUG,
2284                        "drained %d bytes to clear DRQ.\n", count);
2285
2286}
2287EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2288
2289/**
2290 *      ata_sff_error_handler - Stock error handler for BMDMA controller
2291 *      @ap: port to handle error for
2292 *
2293 *      Stock error handler for SFF controller.  It can handle both
2294 *      PATA and SATA controllers.  Many controllers should be able to
2295 *      use this EH as-is or with some added handling before and
2296 *      after.
2297 *
2298 *      LOCKING:
2299 *      Kernel thread context (may sleep)
2300 */
2301void ata_sff_error_handler(struct ata_port *ap)
2302{
2303        ata_reset_fn_t softreset = ap->ops->softreset;
2304        ata_reset_fn_t hardreset = ap->ops->hardreset;
2305        struct ata_queued_cmd *qc;
2306        unsigned long flags;
2307        int thaw = 0;
2308
2309        qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2310        if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2311                qc = NULL;
2312
2313        /* reset PIO HSM and stop DMA engine */
2314        spin_lock_irqsave(ap->lock, flags);
2315
2316        ap->hsm_task_state = HSM_ST_IDLE;
2317
2318        if (ap->ioaddr.bmdma_addr &&
2319            qc && (qc->tf.protocol == ATA_PROT_DMA ||
2320                   qc->tf.protocol == ATAPI_PROT_DMA)) {
2321                u8 host_stat;
2322
2323                host_stat = ap->ops->bmdma_status(ap);
2324
2325                /* BMDMA controllers indicate host bus error by
2326                 * setting DMA_ERR bit and timing out.  As it wasn't
2327                 * really a timeout event, adjust error mask and
2328                 * cancel frozen state.
2329                 */
2330                if (qc->err_mask == AC_ERR_TIMEOUT
2331                                                && (host_stat & ATA_DMA_ERR)) {
2332                        qc->err_mask = AC_ERR_HOST_BUS;
2333                        thaw = 1;
2334                }
2335
2336                ap->ops->bmdma_stop(qc);
2337        }
2338
2339        ata_sff_sync(ap);               /* FIXME: We don't need this */
2340        ap->ops->sff_check_status(ap);
2341        ap->ops->sff_irq_clear(ap);
2342        /* We *MUST* do FIFO draining before we issue a reset as several
2343         * devices helpfully clear their internal state and will lock solid
2344         * if we touch the data port post reset. Pass qc in case anyone wants
2345         *  to do different PIO/DMA recovery or has per command fixups
2346         */
2347        if (ap->ops->drain_fifo)
2348                ap->ops->drain_fifo(qc);
2349
2350        spin_unlock_irqrestore(ap->lock, flags);
2351
2352        if (thaw)
2353                ata_eh_thaw_port(ap);
2354
2355        /* PIO and DMA engines have been stopped, perform recovery */
2356
2357        /* Ignore ata_sff_softreset if ctl isn't accessible and
2358         * built-in hardresets if SCR access isn't available.
2359         */
2360        if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
2361                softreset = NULL;
2362        if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
2363                hardreset = NULL;
2364
2365        ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2366                  ap->ops->postreset);
2367}
2368EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2369
2370/**
2371 *      ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller
2372 *      @qc: internal command to clean up
2373 *
2374 *      LOCKING:
2375 *      Kernel thread context (may sleep)
2376 */
2377void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
2378{
2379        struct ata_port *ap = qc->ap;
2380        unsigned long flags;
2381
2382        spin_lock_irqsave(ap->lock, flags);
2383
2384        ap->hsm_task_state = HSM_ST_IDLE;
2385
2386        if (ap->ioaddr.bmdma_addr)
2387                ata_bmdma_stop(qc);
2388
2389        spin_unlock_irqrestore(ap->lock, flags);
2390}
2391EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
2392
2393/**
2394 *      ata_sff_port_start - Set port up for dma.
2395 *      @ap: Port to initialize
2396 *
2397 *      Called just after data structures for each port are
2398 *      initialized.  Allocates space for PRD table if the device
2399 *      is DMA capable SFF.
2400 *
2401 *      May be used as the port_start() entry in ata_port_operations.
2402 *
2403 *      LOCKING:
2404 *      Inherited from caller.
2405 */
2406int ata_sff_port_start(struct ata_port *ap)
2407{
2408        if (ap->ioaddr.bmdma_addr)
2409                return ata_port_start(ap);
2410        return 0;
2411}
2412EXPORT_SYMBOL_GPL(ata_sff_port_start);
2413
2414/**
2415 *      ata_sff_port_start32 - Set port up for dma.
2416 *      @ap: Port to initialize
2417 *
2418 *      Called just after data structures for each port are
2419 *      initialized.  Allocates space for PRD table if the device
2420 *      is DMA capable SFF.
2421 *
2422 *      May be used as the port_start() entry in ata_port_operations for
2423 *      devices that are capable of 32bit PIO.
2424 *
2425 *      LOCKING:
2426 *      Inherited from caller.
2427 */
2428int ata_sff_port_start32(struct ata_port *ap)
2429{
2430        ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
2431        if (ap->ioaddr.bmdma_addr)
2432                return ata_port_start(ap);
2433        return 0;
2434}
2435EXPORT_SYMBOL_GPL(ata_sff_port_start32);
2436
2437/**
2438 *      ata_sff_std_ports - initialize ioaddr with standard port offsets.
2439 *      @ioaddr: IO address structure to be initialized
2440 *
2441 *      Utility function which initializes data_addr, error_addr,
2442 *      feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2443 *      device_addr, status_addr, and command_addr to standard offsets
2444 *      relative to cmd_addr.
2445 *
2446 *      Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2447 */
2448void ata_sff_std_ports(struct ata_ioports *ioaddr)
2449{
2450        ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2451        ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2452        ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2453        ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2454        ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2455        ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2456        ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2457        ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2458        ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2459        ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2460}
2461EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2462
2463unsigned long ata_bmdma_mode_filter(struct ata_device *adev,
2464                                    unsigned long xfer_mask)
2465{
2466        /* Filter out DMA modes if the device has been configured by
2467           the BIOS as PIO only */
2468
2469        if (adev->link->ap->ioaddr.bmdma_addr == NULL)
2470                xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2471        return xfer_mask;
2472}
2473EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
2474
2475/**
2476 *      ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2477 *      @qc: Info associated with this ATA transaction.
2478 *
2479 *      LOCKING:
2480 *      spin_lock_irqsave(host lock)
2481 */
2482void ata_bmdma_setup(struct ata_queued_cmd *qc)
2483{
2484        struct ata_port *ap = qc->ap;
2485        unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2486        u8 dmactl;
2487
2488        /* load PRD table addr. */
2489        mb();   /* make sure PRD table writes are visible to controller */
2490        iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2491
2492        /* specify data direction, triple-check start bit is clear */
2493        dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2494        dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2495        if (!rw)
2496                dmactl |= ATA_DMA_WR;
2497        iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2498
2499        /* issue r/w command */
2500        ap->ops->sff_exec_command(ap, &qc->tf);
2501}
2502EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2503
2504/**
2505 *      ata_bmdma_start - Start a PCI IDE BMDMA transaction
2506 *      @qc: Info associated with this ATA transaction.
2507 *
2508 *      LOCKING:
2509 *      spin_lock_irqsave(host lock)
2510 */
2511void ata_bmdma_start(struct ata_queued_cmd *qc)
2512{
2513        struct ata_port *ap = qc->ap;
2514        u8 dmactl;
2515
2516        /* start host DMA transaction */
2517        dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2518        iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2519
2520        /* Strictly, one may wish to issue an ioread8() here, to
2521         * flush the mmio write.  However, control also passes
2522         * to the hardware at this point, and it will interrupt
2523         * us when we are to resume control.  So, in effect,
2524         * we don't care when the mmio write flushes.
2525         * Further, a read of the DMA status register _immediately_
2526         * following the write may not be what certain flaky hardware
2527         * is expected, so I think it is best to not add a readb()
2528         * without first all the MMIO ATA cards/mobos.
2529         * Or maybe I'm just being paranoid.
2530         *
2531         * FIXME: The posting of this write means I/O starts are
2532         * unneccessarily delayed for MMIO
2533         */
2534}
2535EXPORT_SYMBOL_GPL(ata_bmdma_start);
2536
2537/**
2538 *      ata_bmdma_stop - Stop PCI IDE BMDMA transfer
2539 *      @qc: Command we are ending DMA for
2540 *
2541 *      Clears the ATA_DMA_START flag in the dma control register
2542 *
2543 *      May be used as the bmdma_stop() entry in ata_port_operations.
2544 *
2545 *      LOCKING:
2546 *      spin_lock_irqsave(host lock)
2547 */
2548void ata_bmdma_stop(struct ata_queued_cmd *qc)
2549{
2550        struct ata_port *ap = qc->ap;
2551        void __iomem *mmio = ap->ioaddr.bmdma_addr;
2552
2553        /* clear start/stop bit */
2554        iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
2555                 mmio + ATA_DMA_CMD);
2556
2557        /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
2558        ata_sff_dma_pause(ap);
2559}
2560EXPORT_SYMBOL_GPL(ata_bmdma_stop);
2561
2562/**
2563 *      ata_bmdma_status - Read PCI IDE BMDMA status
2564 *      @ap: Port associated with this ATA transaction.
2565 *
2566 *      Read and return BMDMA status register.
2567 *
2568 *      May be used as the bmdma_status() entry in ata_port_operations.
2569 *
2570 *      LOCKING:
2571 *      spin_lock_irqsave(host lock)
2572 */
2573u8 ata_bmdma_status(struct ata_port *ap)
2574{
2575        return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
2576}
2577EXPORT_SYMBOL_GPL(ata_bmdma_status);
2578
2579/**
2580 *      ata_bus_reset - reset host port and associated ATA channel
2581 *      @ap: port to reset
2582 *
2583 *      This is typically the first time we actually start issuing
2584 *      commands to the ATA channel.  We wait for BSY to clear, then
2585 *      issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2586 *      result.  Determine what devices, if any, are on the channel
2587 *      by looking at the device 0/1 error register.  Look at the signature
2588 *      stored in each device's taskfile registers, to determine if
2589 *      the device is ATA or ATAPI.
2590 *
2591 *      LOCKING:
2592 *      PCI/etc. bus probe sem.
2593 *      Obtains host lock.
2594 *
2595 *      SIDE EFFECTS:
2596 *      Sets ATA_FLAG_DISABLED if bus reset fails.
2597 *
2598 *      DEPRECATED:
2599 *      This function is only for drivers which still use old EH and
2600 *      will be removed soon.
2601 */
2602void ata_bus_reset(struct ata_port *ap)
2603{
2604        struct ata_device *device = ap->link.device;
2605        struct ata_ioports *ioaddr = &ap->ioaddr;
2606        unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2607        u8 err;
2608        unsigned int dev0, dev1 = 0, devmask = 0;
2609        int rc;
2610
2611        DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
2612
2613        /* determine if device 0/1 are present */
2614        if (ap->flags & ATA_FLAG_SATA_RESET)
2615                dev0 = 1;
2616        else {
2617                dev0 = ata_devchk(ap, 0);
2618                if (slave_possible)
2619                        dev1 = ata_devchk(ap, 1);
2620        }
2621
2622        if (dev0)
2623                devmask |= (1 << 0);
2624        if (dev1)
2625                devmask |= (1 << 1);
2626
2627        /* select device 0 again */
2628        ap->ops->sff_dev_select(ap, 0);
2629
2630        /* issue bus reset */
2631        if (ap->flags & ATA_FLAG_SRST) {
2632                rc = ata_bus_softreset(ap, devmask,
2633                                       ata_deadline(jiffies, 40000));
2634                if (rc && rc != -ENODEV)
2635                        goto err_out;
2636        }
2637
2638        /*
2639         * determine by signature whether we have ATA or ATAPI devices
2640         */
2641        device[0].class = ata_sff_dev_classify(&device[0], dev0, &err);
2642        if ((slave_possible) && (err != 0x81))
2643                device[1].class = ata_sff_dev_classify(&device[1], dev1, &err);
2644
2645        /* is double-select really necessary? */
2646        if (device[1].class != ATA_DEV_NONE)
2647                ap->ops->sff_dev_select(ap, 1);
2648        if (device[0].class != ATA_DEV_NONE)
2649                ap->ops->sff_dev_select(ap, 0);
2650
2651        /* if no devices were detected, disable this port */
2652        if ((device[0].class == ATA_DEV_NONE) &&
2653            (device[1].class == ATA_DEV_NONE))
2654                goto err_out;
2655
2656        if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2657                /* set up device control for ATA_FLAG_SATA_RESET */
2658                iowrite8(ap->ctl, ioaddr->ctl_addr);
2659                ap->last_ctl = ap->ctl;
2660        }
2661
2662        DPRINTK("EXIT\n");
2663        return;
2664
2665err_out:
2666        ata_port_printk(ap, KERN_ERR, "disabling port\n");
2667        ata_port_disable(ap);
2668
2669        DPRINTK("EXIT\n");
2670}
2671EXPORT_SYMBOL_GPL(ata_bus_reset);
2672
2673#ifdef CONFIG_PCI
2674
2675/**
2676 *      ata_pci_bmdma_clear_simplex -   attempt to kick device out of simplex
2677 *      @pdev: PCI device
2678 *
2679 *      Some PCI ATA devices report simplex mode but in fact can be told to
2680 *      enter non simplex mode. This implements the necessary logic to
2681 *      perform the task on such devices. Calling it on other devices will
2682 *      have -undefined- behaviour.
2683 */
2684int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
2685{
2686        unsigned long bmdma = pci_resource_start(pdev, 4);
2687        u8 simplex;
2688
2689        if (bmdma == 0)
2690                return -ENOENT;
2691
2692        simplex = inb(bmdma + 0x02);
2693        outb(simplex & 0x60, bmdma + 0x02);
2694        simplex = inb(bmdma + 0x02);
2695        if (simplex & 0x80)
2696                return -EOPNOTSUPP;
2697        return 0;
2698}
2699EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
2700
2701/**
2702 *      ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
2703 *      @host: target ATA host
2704 *
2705 *      Acquire PCI BMDMA resources and initialize @host accordingly.
2706 *
2707 *      LOCKING:
2708 *      Inherited from calling layer (may sleep).
2709 *
2710 *      RETURNS:
2711 *      0 on success, -errno otherwise.
2712 */
2713int ata_pci_bmdma_init(struct ata_host *host)
2714{
2715        struct device *gdev = host->dev;
2716        struct pci_dev *pdev = to_pci_dev(gdev);
2717        int i, rc;
2718
2719        /* No BAR4 allocation: No DMA */
2720        if (pci_resource_start(pdev, 4) == 0)
2721                return 0;
2722
2723        /* TODO: If we get no DMA mask we should fall back to PIO */
2724        rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
2725        if (rc)
2726                return rc;
2727        rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
2728        if (rc)
2729                return rc;
2730
2731        /* request and iomap DMA region */
2732        rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
2733        if (rc) {
2734                dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
2735                return -ENOMEM;
2736        }
2737        host->iomap = pcim_iomap_table(pdev);
2738
2739        for (i = 0; i < 2; i++) {
2740                struct ata_port *ap = host->ports[i];
2741                void __iomem *bmdma = host->iomap[4] + 8 * i;
2742
2743                if (ata_port_is_dummy(ap))
2744                        continue;
2745
2746                ap->ioaddr.bmdma_addr = bmdma;
2747                if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
2748                    (ioread8(bmdma + 2) & 0x80))
2749                        host->flags |= ATA_HOST_SIMPLEX;
2750
2751                ata_port_desc(ap, "bmdma 0x%llx",
2752                    (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
2753        }
2754
2755        return 0;
2756}
2757EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
2758
2759static int ata_resources_present(struct pci_dev *pdev, int port)
2760{
2761        int i;
2762
2763        /* Check the PCI resources for this channel are enabled */
2764        port = port * 2;
2765        for (i = 0; i < 2; i++) {
2766                if (pci_resource_start(pdev, port + i) == 0 ||
2767                    pci_resource_len(pdev, port + i) == 0)
2768                        return 0;
2769        }
2770        return 1;
2771}
2772
2773/**
2774 *      ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2775 *      @host: target ATA host
2776 *
2777 *      Acquire native PCI ATA resources for @host and initialize the
2778 *      first two ports of @host accordingly.  Ports marked dummy are
2779 *      skipped and allocation failure makes the port dummy.
2780 *
2781 *      Note that native PCI resources are valid even for legacy hosts
2782 *      as we fix up pdev resources array early in boot, so this
2783 *      function can be used for both native and legacy SFF hosts.
2784 *
2785 *      LOCKING:
2786 *      Inherited from calling layer (may sleep).
2787 *
2788 *      RETURNS:
2789 *      0 if at least one port is initialized, -ENODEV if no port is
2790 *      available.
2791 */
2792int ata_pci_sff_init_host(struct ata_host *host)
2793{
2794        struct device *gdev = host->dev;
2795        struct pci_dev *pdev = to_pci_dev(gdev);
2796        unsigned int mask = 0;
2797        int i, rc;
2798
2799        /* request, iomap BARs and init port addresses accordingly */
2800        for (i = 0; i < 2; i++) {
2801                struct ata_port *ap = host->ports[i];
2802                int base = i * 2;
2803                void __iomem * const *iomap;
2804
2805                if (ata_port_is_dummy(ap))
2806                        continue;
2807
2808                /* Discard disabled ports.  Some controllers show
2809                 * their unused channels this way.  Disabled ports are
2810                 * made dummy.
2811                 */
2812                if (!ata_resources_present(pdev, i)) {
2813                        ap->ops = &ata_dummy_port_ops;
2814                        continue;
2815                }
2816
2817                rc = pcim_iomap_regions(pdev, 0x3 << base,
2818                                        dev_driver_string(gdev));
2819                if (rc) {
2820                        dev_printk(KERN_WARNING, gdev,
2821                                   "failed to request/iomap BARs for port %d "
2822                                   "(errno=%d)\n", i, rc);
2823                        if (rc == -EBUSY)
2824                                pcim_pin_device(pdev);
2825                        ap->ops = &ata_dummy_port_ops;
2826                        continue;
2827                }
2828                host->iomap = iomap = pcim_iomap_table(pdev);
2829
2830                ap->ioaddr.cmd_addr = iomap[base];
2831                ap->ioaddr.altstatus_addr =
2832                ap->ioaddr.ctl_addr = (void __iomem *)
2833                        ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2834                ata_sff_std_ports(&ap->ioaddr);
2835
2836                ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2837                        (unsigned long long)pci_resource_start(pdev, base),
2838                        (unsigned long long)pci_resource_start(pdev, base + 1));
2839
2840                mask |= 1 << i;
2841        }
2842
2843        if (!mask) {
2844                dev_printk(KERN_ERR, gdev, "no available native port\n");
2845                return -ENODEV;
2846        }
2847
2848        return 0;
2849}
2850EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2851
2852/**
2853 *      ata_pci_sff_prepare_host - helper to prepare native PCI ATA host
2854 *      @pdev: target PCI device
2855 *      @ppi: array of port_info, must be enough for two ports
2856 *      @r_host: out argument for the initialized ATA host
2857 *
2858 *      Helper to allocate ATA host for @pdev, acquire all native PCI
2859 *      resources and initialize it accordingly in one go.
2860 *
2861 *      LOCKING:
2862 *      Inherited from calling layer (may sleep).
2863 *
2864 *      RETURNS:
2865 *      0 on success, -errno otherwise.
2866 */
2867int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2868                             const struct ata_port_info * const *ppi,
2869                             struct ata_host **r_host)
2870{
2871        struct ata_host *host;
2872        int rc;
2873
2874        if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2875                return -ENOMEM;
2876
2877        host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2878        if (!host) {
2879                dev_printk(KERN_ERR, &pdev->dev,
2880                           "failed to allocate ATA host\n");
2881                rc = -ENOMEM;
2882                goto err_out;
2883        }
2884
2885        rc = ata_pci_sff_init_host(host);
2886        if (rc)
2887                goto err_out;
2888
2889        /* init DMA related stuff */
2890        rc = ata_pci_bmdma_init(host);
2891        if (rc)
2892                goto err_bmdma;
2893
2894        devres_remove_group(&pdev->dev, NULL);
2895        *r_host = host;
2896        return 0;
2897
2898err_bmdma:
2899        /* This is necessary because PCI and iomap resources are
2900         * merged and releasing the top group won't release the
2901         * acquired resources if some of those have been acquired
2902         * before entering this function.
2903         */
2904        pcim_iounmap_regions(pdev, 0xf);
2905err_out:
2906        devres_release_group(&pdev->dev, NULL);
2907        return rc;
2908}
2909EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2910
2911/**
2912 *      ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2913 *      @host: target SFF ATA host
2914 *      @irq_handler: irq_handler used when requesting IRQ(s)
2915 *      @sht: scsi_host_template to use when registering the host
2916 *
2917 *      This is the counterpart of ata_host_activate() for SFF ATA
2918 *      hosts.  This separate helper is necessary because SFF hosts
2919 *      use two separate interrupts in legacy mode.
2920 *
2921 *      LOCKING:
2922 *      Inherited from calling layer (may sleep).
2923 *
2924 *      RETURNS:
2925 *      0 on success, -errno otherwise.
2926 */
2927int ata_pci_sff_activate_host(struct ata_host *host,
2928                              irq_handler_t irq_handler,
2929                              struct scsi_host_template *sht)
2930{
2931        struct device *dev = host->dev;
2932        struct pci_dev *pdev = to_pci_dev(dev);
2933        const char *drv_name = dev_driver_string(host->dev);
2934        int legacy_mode = 0, rc;
2935
2936        rc = ata_host_start(host);
2937        if (rc)
2938                return rc;
2939
2940        if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2941                u8 tmp8, mask;
2942
2943                /* TODO: What if one channel is in native mode ... */
2944                pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2945                mask = (1 << 2) | (1 << 0);
2946                if ((tmp8 & mask) != mask)
2947                        legacy_mode = 1;
2948#if defined(CONFIG_NO_ATA_LEGACY)
2949                /* Some platforms with PCI limits cannot address compat
2950                   port space. In that case we punt if their firmware has
2951                   left a device in compatibility mode */
2952                if (legacy_mode) {
2953                        printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
2954                        return -EOPNOTSUPP;
2955                }
2956#endif
2957        }
2958
2959        if (!devres_open_group(dev, NULL, GFP_KERNEL))
2960                return -ENOMEM;
2961
2962        if (!legacy_mode && pdev->irq) {
2963                rc = devm_request_irq(dev, pdev->irq, irq_handler,
2964                                      IRQF_SHARED, drv_name, host);
2965                if (rc)
2966                        goto out;
2967
2968                ata_port_desc(host->ports[0], "irq %d", pdev->irq);
2969                ata_port_desc(host->ports[1], "irq %d", pdev->irq);
2970        } else if (legacy_mode) {
2971                if (!ata_port_is_dummy(host->ports[0])) {
2972                        rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2973                                              irq_handler, IRQF_SHARED,
2974                                              drv_name, host);
2975                        if (rc)
2976                                goto out;
2977
2978                        ata_port_desc(host->ports[0], "irq %d",
2979                                      ATA_PRIMARY_IRQ(pdev));
2980                }
2981
2982                if (!ata_port_is_dummy(host->ports[1])) {
2983                        rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2984                                              irq_handler, IRQF_SHARED,
2985                                              drv_name, host);
2986                        if (rc)
2987                                goto out;
2988
2989                        ata_port_desc(host->ports[1], "irq %d",
2990                                      ATA_SECONDARY_IRQ(pdev));
2991                }
2992        }
2993
2994        rc = ata_host_register(host, sht);
2995out:
2996        if (rc == 0)
2997                devres_remove_group(dev, NULL);
2998        else
2999                devres_release_group(dev, NULL);
3000
3001        return rc;
3002}
3003EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
3004
3005/**
3006 *      ata_pci_sff_init_one - Initialize/register PCI IDE host controller
3007 *      @pdev: Controller to be initialized
3008 *      @ppi: array of port_info, must be enough for two ports
3009 *      @sht: scsi_host_template to use when registering the host
3010 *      @host_priv: host private_data
3011 *
3012 *      This is a helper function which can be called from a driver's
3013 *      xxx_init_one() probe function if the hardware uses traditional
3014 *      IDE taskfile registers.
3015 *
3016 *      This function calls pci_enable_device(), reserves its register
3017 *      regions, sets the dma mask, enables bus master mode, and calls
3018 *      ata_device_add()
3019 *
3020 *      ASSUMPTION:
3021 *      Nobody makes a single channel controller that appears solely as
3022 *      the secondary legacy port on PCI.
3023 *
3024 *      LOCKING:
3025 *      Inherited from PCI layer (may sleep).
3026 *
3027 *      RETURNS:
3028 *      Zero on success, negative on errno-based value on error.
3029 */
3030int ata_pci_sff_init_one(struct pci_dev *pdev,
3031                         const struct ata_port_info * const *ppi,
3032                         struct scsi_host_template *sht, void *host_priv)
3033{
3034        struct device *dev = &pdev->dev;
3035        const struct ata_port_info *pi = NULL;
3036        struct ata_host *host = NULL;
3037        int i, rc;
3038
3039        DPRINTK("ENTER\n");
3040
3041        /* look up the first valid port_info */
3042        for (i = 0; i < 2 && ppi[i]; i++) {
3043                if (ppi[i]->port_ops != &ata_dummy_port_ops) {
3044                        pi = ppi[i];
3045                        break;
3046                }
3047        }
3048
3049        if (!pi) {
3050                dev_printk(KERN_ERR, &pdev->dev,
3051                           "no valid port_info specified\n");
3052                return -EINVAL;
3053        }
3054
3055        if (!devres_open_group(dev, NULL, GFP_KERNEL))
3056                return -ENOMEM;
3057
3058        rc = pcim_enable_device(pdev);
3059        if (rc)
3060                goto out;
3061
3062        /* prepare and activate SFF host */
3063        rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
3064        if (rc)
3065                goto out;
3066        host->private_data = host_priv;
3067
3068        pci_set_master(pdev);
3069        rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
3070out:
3071        if (rc == 0)
3072                devres_remove_group(&pdev->dev, NULL);
3073        else
3074                devres_release_group(&pdev->dev, NULL);
3075
3076        return rc;
3077}
3078EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
3079
3080#endif /* CONFIG_PCI */
3081