linux/drivers/ata/libata-sff.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  libata-sff.c - helper library for PCI IDE BMDMA
   4 *
   5 *  Copyright 2003-2006 Red Hat, Inc.  All rights reserved.
   6 *  Copyright 2003-2006 Jeff Garzik
   7 *
   8 *  libata documentation is available via 'make {ps|pdf}docs',
   9 *  as Documentation/driver-api/libata.rst
  10 *
  11 *  Hardware documentation available from http://www.t13.org/ and
  12 *  http://www.sata-io.org/
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/gfp.h>
  17#include <linux/pci.h>
  18#include <linux/module.h>
  19#include <linux/libata.h>
  20#include <linux/highmem.h>
  21
  22#include "libata.h"
  23
  24static struct workqueue_struct *ata_sff_wq;
  25
  26const struct ata_port_operations ata_sff_port_ops = {
  27        .inherits               = &ata_base_port_ops,
  28
  29        .qc_prep                = ata_noop_qc_prep,
  30        .qc_issue               = ata_sff_qc_issue,
  31        .qc_fill_rtf            = ata_sff_qc_fill_rtf,
  32
  33        .freeze                 = ata_sff_freeze,
  34        .thaw                   = ata_sff_thaw,
  35        .prereset               = ata_sff_prereset,
  36        .softreset              = ata_sff_softreset,
  37        .hardreset              = sata_sff_hardreset,
  38        .postreset              = ata_sff_postreset,
  39        .error_handler          = ata_sff_error_handler,
  40
  41        .sff_dev_select         = ata_sff_dev_select,
  42        .sff_check_status       = ata_sff_check_status,
  43        .sff_tf_load            = ata_sff_tf_load,
  44        .sff_tf_read            = ata_sff_tf_read,
  45        .sff_exec_command       = ata_sff_exec_command,
  46        .sff_data_xfer          = ata_sff_data_xfer,
  47        .sff_drain_fifo         = ata_sff_drain_fifo,
  48
  49        .lost_interrupt         = ata_sff_lost_interrupt,
  50};
  51EXPORT_SYMBOL_GPL(ata_sff_port_ops);
  52
  53/**
  54 *      ata_sff_check_status - Read device status reg & clear interrupt
  55 *      @ap: port where the device is
  56 *
  57 *      Reads ATA taskfile status register for currently-selected device
  58 *      and return its value. This also clears pending interrupts
  59 *      from this device
  60 *
  61 *      LOCKING:
  62 *      Inherited from caller.
  63 */
  64u8 ata_sff_check_status(struct ata_port *ap)
  65{
  66        return ioread8(ap->ioaddr.status_addr);
  67}
  68EXPORT_SYMBOL_GPL(ata_sff_check_status);
  69
  70/**
  71 *      ata_sff_altstatus - Read device alternate status reg
  72 *      @ap: port where the device is
  73 *
  74 *      Reads ATA taskfile alternate status register for
  75 *      currently-selected device and return its value.
  76 *
  77 *      Note: may NOT be used as the check_altstatus() entry in
  78 *      ata_port_operations.
  79 *
  80 *      LOCKING:
  81 *      Inherited from caller.
  82 */
  83static u8 ata_sff_altstatus(struct ata_port *ap)
  84{
  85        if (ap->ops->sff_check_altstatus)
  86                return ap->ops->sff_check_altstatus(ap);
  87
  88        return ioread8(ap->ioaddr.altstatus_addr);
  89}
  90
  91/**
  92 *      ata_sff_irq_status - Check if the device is busy
  93 *      @ap: port where the device is
  94 *
  95 *      Determine if the port is currently busy. Uses altstatus
  96 *      if available in order to avoid clearing shared IRQ status
  97 *      when finding an IRQ source. Non ctl capable devices don't
  98 *      share interrupt lines fortunately for us.
  99 *
 100 *      LOCKING:
 101 *      Inherited from caller.
 102 */
 103static u8 ata_sff_irq_status(struct ata_port *ap)
 104{
 105        u8 status;
 106
 107        if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
 108                status = ata_sff_altstatus(ap);
 109                /* Not us: We are busy */
 110                if (status & ATA_BUSY)
 111                        return status;
 112        }
 113        /* Clear INTRQ latch */
 114        status = ap->ops->sff_check_status(ap);
 115        return status;
 116}
 117
 118/**
 119 *      ata_sff_sync - Flush writes
 120 *      @ap: Port to wait for.
 121 *
 122 *      CAUTION:
 123 *      If we have an mmio device with no ctl and no altstatus
 124 *      method this will fail. No such devices are known to exist.
 125 *
 126 *      LOCKING:
 127 *      Inherited from caller.
 128 */
 129
 130static void ata_sff_sync(struct ata_port *ap)
 131{
 132        if (ap->ops->sff_check_altstatus)
 133                ap->ops->sff_check_altstatus(ap);
 134        else if (ap->ioaddr.altstatus_addr)
 135                ioread8(ap->ioaddr.altstatus_addr);
 136}
 137
 138/**
 139 *      ata_sff_pause           -       Flush writes and wait 400nS
 140 *      @ap: Port to pause for.
 141 *
 142 *      CAUTION:
 143 *      If we have an mmio device with no ctl and no altstatus
 144 *      method this will fail. No such devices are known to exist.
 145 *
 146 *      LOCKING:
 147 *      Inherited from caller.
 148 */
 149
 150void ata_sff_pause(struct ata_port *ap)
 151{
 152        ata_sff_sync(ap);
 153        ndelay(400);
 154}
 155EXPORT_SYMBOL_GPL(ata_sff_pause);
 156
 157/**
 158 *      ata_sff_dma_pause       -       Pause before commencing DMA
 159 *      @ap: Port to pause for.
 160 *
 161 *      Perform I/O fencing and ensure sufficient cycle delays occur
 162 *      for the HDMA1:0 transition
 163 */
 164
 165void ata_sff_dma_pause(struct ata_port *ap)
 166{
 167        if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
 168                /* An altstatus read will cause the needed delay without
 169                   messing up the IRQ status */
 170                ata_sff_altstatus(ap);
 171                return;
 172        }
 173        /* There are no DMA controllers without ctl. BUG here to ensure
 174           we never violate the HDMA1:0 transition timing and risk
 175           corruption. */
 176        BUG();
 177}
 178EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
 179
 180/**
 181 *      ata_sff_busy_sleep - sleep until BSY clears, or timeout
 182 *      @ap: port containing status register to be polled
 183 *      @tmout_pat: impatience timeout in msecs
 184 *      @tmout: overall timeout in msecs
 185 *
 186 *      Sleep until ATA Status register bit BSY clears,
 187 *      or a timeout occurs.
 188 *
 189 *      LOCKING:
 190 *      Kernel thread context (may sleep).
 191 *
 192 *      RETURNS:
 193 *      0 on success, -errno otherwise.
 194 */
 195int ata_sff_busy_sleep(struct ata_port *ap,
 196                       unsigned long tmout_pat, unsigned long tmout)
 197{
 198        unsigned long timer_start, timeout;
 199        u8 status;
 200
 201        status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
 202        timer_start = jiffies;
 203        timeout = ata_deadline(timer_start, tmout_pat);
 204        while (status != 0xff && (status & ATA_BUSY) &&
 205               time_before(jiffies, timeout)) {
 206                ata_msleep(ap, 50);
 207                status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
 208        }
 209
 210        if (status != 0xff && (status & ATA_BUSY))
 211                ata_port_warn(ap,
 212                              "port is slow to respond, please be patient (Status 0x%x)\n",
 213                              status);
 214
 215        timeout = ata_deadline(timer_start, tmout);
 216        while (status != 0xff && (status & ATA_BUSY) &&
 217               time_before(jiffies, timeout)) {
 218                ata_msleep(ap, 50);
 219                status = ap->ops->sff_check_status(ap);
 220        }
 221
 222        if (status == 0xff)
 223                return -ENODEV;
 224
 225        if (status & ATA_BUSY) {
 226                ata_port_err(ap,
 227                             "port failed to respond (%lu secs, Status 0x%x)\n",
 228                             DIV_ROUND_UP(tmout, 1000), status);
 229                return -EBUSY;
 230        }
 231
 232        return 0;
 233}
 234EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
 235
 236static int ata_sff_check_ready(struct ata_link *link)
 237{
 238        u8 status = link->ap->ops->sff_check_status(link->ap);
 239
 240        return ata_check_ready(status);
 241}
 242
 243/**
 244 *      ata_sff_wait_ready - sleep until BSY clears, or timeout
 245 *      @link: SFF link to wait ready status for
 246 *      @deadline: deadline jiffies for the operation
 247 *
 248 *      Sleep until ATA Status register bit BSY clears, or timeout
 249 *      occurs.
 250 *
 251 *      LOCKING:
 252 *      Kernel thread context (may sleep).
 253 *
 254 *      RETURNS:
 255 *      0 on success, -errno otherwise.
 256 */
 257int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
 258{
 259        return ata_wait_ready(link, deadline, ata_sff_check_ready);
 260}
 261EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
 262
 263/**
 264 *      ata_sff_set_devctl - Write device control reg
 265 *      @ap: port where the device is
 266 *      @ctl: value to write
 267 *
 268 *      Writes ATA taskfile device control register.
 269 *
 270 *      Note: may NOT be used as the sff_set_devctl() entry in
 271 *      ata_port_operations.
 272 *
 273 *      LOCKING:
 274 *      Inherited from caller.
 275 */
 276static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
 277{
 278        if (ap->ops->sff_set_devctl)
 279                ap->ops->sff_set_devctl(ap, ctl);
 280        else
 281                iowrite8(ctl, ap->ioaddr.ctl_addr);
 282}
 283
 284/**
 285 *      ata_sff_dev_select - Select device 0/1 on ATA bus
 286 *      @ap: ATA channel to manipulate
 287 *      @device: ATA device (numbered from zero) to select
 288 *
 289 *      Use the method defined in the ATA specification to
 290 *      make either device 0, or device 1, active on the
 291 *      ATA channel.  Works with both PIO and MMIO.
 292 *
 293 *      May be used as the dev_select() entry in ata_port_operations.
 294 *
 295 *      LOCKING:
 296 *      caller.
 297 */
 298void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
 299{
 300        u8 tmp;
 301
 302        if (device == 0)
 303                tmp = ATA_DEVICE_OBS;
 304        else
 305                tmp = ATA_DEVICE_OBS | ATA_DEV1;
 306
 307        iowrite8(tmp, ap->ioaddr.device_addr);
 308        ata_sff_pause(ap);      /* needed; also flushes, for mmio */
 309}
 310EXPORT_SYMBOL_GPL(ata_sff_dev_select);
 311
 312/**
 313 *      ata_dev_select - Select device 0/1 on ATA bus
 314 *      @ap: ATA channel to manipulate
 315 *      @device: ATA device (numbered from zero) to select
 316 *      @wait: non-zero to wait for Status register BSY bit to clear
 317 *      @can_sleep: non-zero if context allows sleeping
 318 *
 319 *      Use the method defined in the ATA specification to
 320 *      make either device 0, or device 1, active on the
 321 *      ATA channel.
 322 *
 323 *      This is a high-level version of ata_sff_dev_select(), which
 324 *      additionally provides the services of inserting the proper
 325 *      pauses and status polling, where needed.
 326 *
 327 *      LOCKING:
 328 *      caller.
 329 */
 330static void ata_dev_select(struct ata_port *ap, unsigned int device,
 331                           unsigned int wait, unsigned int can_sleep)
 332{
 333        if (ata_msg_probe(ap))
 334                ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n",
 335                              device, wait);
 336
 337        if (wait)
 338                ata_wait_idle(ap);
 339
 340        ap->ops->sff_dev_select(ap, device);
 341
 342        if (wait) {
 343                if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
 344                        ata_msleep(ap, 150);
 345                ata_wait_idle(ap);
 346        }
 347}
 348
 349/**
 350 *      ata_sff_irq_on - Enable interrupts on a port.
 351 *      @ap: Port on which interrupts are enabled.
 352 *
 353 *      Enable interrupts on a legacy IDE device using MMIO or PIO,
 354 *      wait for idle, clear any pending interrupts.
 355 *
 356 *      Note: may NOT be used as the sff_irq_on() entry in
 357 *      ata_port_operations.
 358 *
 359 *      LOCKING:
 360 *      Inherited from caller.
 361 */
 362void ata_sff_irq_on(struct ata_port *ap)
 363{
 364        struct ata_ioports *ioaddr = &ap->ioaddr;
 365
 366        if (ap->ops->sff_irq_on) {
 367                ap->ops->sff_irq_on(ap);
 368                return;
 369        }
 370
 371        ap->ctl &= ~ATA_NIEN;
 372        ap->last_ctl = ap->ctl;
 373
 374        if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
 375                ata_sff_set_devctl(ap, ap->ctl);
 376        ata_wait_idle(ap);
 377
 378        if (ap->ops->sff_irq_clear)
 379                ap->ops->sff_irq_clear(ap);
 380}
 381EXPORT_SYMBOL_GPL(ata_sff_irq_on);
 382
 383/**
 384 *      ata_sff_tf_load - send taskfile registers to host controller
 385 *      @ap: Port to which output is sent
 386 *      @tf: ATA taskfile register set
 387 *
 388 *      Outputs ATA taskfile to standard ATA host controller.
 389 *
 390 *      LOCKING:
 391 *      Inherited from caller.
 392 */
 393void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
 394{
 395        struct ata_ioports *ioaddr = &ap->ioaddr;
 396        unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
 397
 398        if (tf->ctl != ap->last_ctl) {
 399                if (ioaddr->ctl_addr)
 400                        iowrite8(tf->ctl, ioaddr->ctl_addr);
 401                ap->last_ctl = tf->ctl;
 402                ata_wait_idle(ap);
 403        }
 404
 405        if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
 406                WARN_ON_ONCE(!ioaddr->ctl_addr);
 407                iowrite8(tf->hob_feature, ioaddr->feature_addr);
 408                iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
 409                iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
 410                iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
 411                iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
 412                VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
 413                        tf->hob_feature,
 414                        tf->hob_nsect,
 415                        tf->hob_lbal,
 416                        tf->hob_lbam,
 417                        tf->hob_lbah);
 418        }
 419
 420        if (is_addr) {
 421                iowrite8(tf->feature, ioaddr->feature_addr);
 422                iowrite8(tf->nsect, ioaddr->nsect_addr);
 423                iowrite8(tf->lbal, ioaddr->lbal_addr);
 424                iowrite8(tf->lbam, ioaddr->lbam_addr);
 425                iowrite8(tf->lbah, ioaddr->lbah_addr);
 426                VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
 427                        tf->feature,
 428                        tf->nsect,
 429                        tf->lbal,
 430                        tf->lbam,
 431                        tf->lbah);
 432        }
 433
 434        if (tf->flags & ATA_TFLAG_DEVICE) {
 435                iowrite8(tf->device, ioaddr->device_addr);
 436                VPRINTK("device 0x%X\n", tf->device);
 437        }
 438
 439        ata_wait_idle(ap);
 440}
 441EXPORT_SYMBOL_GPL(ata_sff_tf_load);
 442
 443/**
 444 *      ata_sff_tf_read - input device's ATA taskfile shadow registers
 445 *      @ap: Port from which input is read
 446 *      @tf: ATA taskfile register set for storing input
 447 *
 448 *      Reads ATA taskfile registers for currently-selected device
 449 *      into @tf. Assumes the device has a fully SFF compliant task file
 450 *      layout and behaviour. If you device does not (eg has a different
 451 *      status method) then you will need to provide a replacement tf_read
 452 *
 453 *      LOCKING:
 454 *      Inherited from caller.
 455 */
 456void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 457{
 458        struct ata_ioports *ioaddr = &ap->ioaddr;
 459
 460        tf->command = ata_sff_check_status(ap);
 461        tf->feature = ioread8(ioaddr->error_addr);
 462        tf->nsect = ioread8(ioaddr->nsect_addr);
 463        tf->lbal = ioread8(ioaddr->lbal_addr);
 464        tf->lbam = ioread8(ioaddr->lbam_addr);
 465        tf->lbah = ioread8(ioaddr->lbah_addr);
 466        tf->device = ioread8(ioaddr->device_addr);
 467
 468        if (tf->flags & ATA_TFLAG_LBA48) {
 469                if (likely(ioaddr->ctl_addr)) {
 470                        iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
 471                        tf->hob_feature = ioread8(ioaddr->error_addr);
 472                        tf->hob_nsect = ioread8(ioaddr->nsect_addr);
 473                        tf->hob_lbal = ioread8(ioaddr->lbal_addr);
 474                        tf->hob_lbam = ioread8(ioaddr->lbam_addr);
 475                        tf->hob_lbah = ioread8(ioaddr->lbah_addr);
 476                        iowrite8(tf->ctl, ioaddr->ctl_addr);
 477                        ap->last_ctl = tf->ctl;
 478                } else
 479                        WARN_ON_ONCE(1);
 480        }
 481}
 482EXPORT_SYMBOL_GPL(ata_sff_tf_read);
 483
 484/**
 485 *      ata_sff_exec_command - issue ATA command to host controller
 486 *      @ap: port to which command is being issued
 487 *      @tf: ATA taskfile register set
 488 *
 489 *      Issues ATA command, with proper synchronization with interrupt
 490 *      handler / other threads.
 491 *
 492 *      LOCKING:
 493 *      spin_lock_irqsave(host lock)
 494 */
 495void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
 496{
 497        DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
 498
 499        iowrite8(tf->command, ap->ioaddr.command_addr);
 500        ata_sff_pause(ap);
 501}
 502EXPORT_SYMBOL_GPL(ata_sff_exec_command);
 503
 504/**
 505 *      ata_tf_to_host - issue ATA taskfile to host controller
 506 *      @ap: port to which command is being issued
 507 *      @tf: ATA taskfile register set
 508 *
 509 *      Issues ATA taskfile register set to ATA host controller,
 510 *      with proper synchronization with interrupt handler and
 511 *      other threads.
 512 *
 513 *      LOCKING:
 514 *      spin_lock_irqsave(host lock)
 515 */
 516static inline void ata_tf_to_host(struct ata_port *ap,
 517                                  const struct ata_taskfile *tf)
 518{
 519        ap->ops->sff_tf_load(ap, tf);
 520        ap->ops->sff_exec_command(ap, tf);
 521}
 522
 523/**
 524 *      ata_sff_data_xfer - Transfer data by PIO
 525 *      @qc: queued command
 526 *      @buf: data buffer
 527 *      @buflen: buffer length
 528 *      @rw: read/write
 529 *
 530 *      Transfer data from/to the device data register by PIO.
 531 *
 532 *      LOCKING:
 533 *      Inherited from caller.
 534 *
 535 *      RETURNS:
 536 *      Bytes consumed.
 537 */
 538unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf,
 539                               unsigned int buflen, int rw)
 540{
 541        struct ata_port *ap = qc->dev->link->ap;
 542        void __iomem *data_addr = ap->ioaddr.data_addr;
 543        unsigned int words = buflen >> 1;
 544
 545        /* Transfer multiple of 2 bytes */
 546        if (rw == READ)
 547                ioread16_rep(data_addr, buf, words);
 548        else
 549                iowrite16_rep(data_addr, buf, words);
 550
 551        /* Transfer trailing byte, if any. */
 552        if (unlikely(buflen & 0x01)) {
 553                unsigned char pad[2] = { };
 554
 555                /* Point buf to the tail of buffer */
 556                buf += buflen - 1;
 557
 558                /*
 559                 * Use io*16_rep() accessors here as well to avoid pointlessly
 560                 * swapping bytes to and from on the big endian machines...
 561                 */
 562                if (rw == READ) {
 563                        ioread16_rep(data_addr, pad, 1);
 564                        *buf = pad[0];
 565                } else {
 566                        pad[0] = *buf;
 567                        iowrite16_rep(data_addr, pad, 1);
 568                }
 569                words++;
 570        }
 571
 572        return words << 1;
 573}
 574EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
 575
 576/**
 577 *      ata_sff_data_xfer32 - Transfer data by PIO
 578 *      @qc: queued command
 579 *      @buf: data buffer
 580 *      @buflen: buffer length
 581 *      @rw: read/write
 582 *
 583 *      Transfer data from/to the device data register by PIO using 32bit
 584 *      I/O operations.
 585 *
 586 *      LOCKING:
 587 *      Inherited from caller.
 588 *
 589 *      RETURNS:
 590 *      Bytes consumed.
 591 */
 592
 593unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
 594                               unsigned int buflen, int rw)
 595{
 596        struct ata_device *dev = qc->dev;
 597        struct ata_port *ap = dev->link->ap;
 598        void __iomem *data_addr = ap->ioaddr.data_addr;
 599        unsigned int words = buflen >> 2;
 600        int slop = buflen & 3;
 601
 602        if (!(ap->pflags & ATA_PFLAG_PIO32))
 603                return ata_sff_data_xfer(qc, buf, buflen, rw);
 604
 605        /* Transfer multiple of 4 bytes */
 606        if (rw == READ)
 607                ioread32_rep(data_addr, buf, words);
 608        else
 609                iowrite32_rep(data_addr, buf, words);
 610
 611        /* Transfer trailing bytes, if any */
 612        if (unlikely(slop)) {
 613                unsigned char pad[4] = { };
 614
 615                /* Point buf to the tail of buffer */
 616                buf += buflen - slop;
 617
 618                /*
 619                 * Use io*_rep() accessors here as well to avoid pointlessly
 620                 * swapping bytes to and from on the big endian machines...
 621                 */
 622                if (rw == READ) {
 623                        if (slop < 3)
 624                                ioread16_rep(data_addr, pad, 1);
 625                        else
 626                                ioread32_rep(data_addr, pad, 1);
 627                        memcpy(buf, pad, slop);
 628                } else {
 629                        memcpy(pad, buf, slop);
 630                        if (slop < 3)
 631                                iowrite16_rep(data_addr, pad, 1);
 632                        else
 633                                iowrite32_rep(data_addr, pad, 1);
 634                }
 635        }
 636        return (buflen + 1) & ~1;
 637}
 638EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
 639
 640/**
 641 *      ata_pio_sector - Transfer a sector of data.
 642 *      @qc: Command on going
 643 *
 644 *      Transfer qc->sect_size bytes of data from/to the ATA device.
 645 *
 646 *      LOCKING:
 647 *      Inherited from caller.
 648 */
 649static void ata_pio_sector(struct ata_queued_cmd *qc)
 650{
 651        int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
 652        struct ata_port *ap = qc->ap;
 653        struct page *page;
 654        unsigned int offset;
 655        unsigned char *buf;
 656
 657        if (!qc->cursg) {
 658                qc->curbytes = qc->nbytes;
 659                return;
 660        }
 661        if (qc->curbytes == qc->nbytes - qc->sect_size)
 662                ap->hsm_task_state = HSM_ST_LAST;
 663
 664        page = sg_page(qc->cursg);
 665        offset = qc->cursg->offset + qc->cursg_ofs;
 666
 667        /* get the current page and offset */
 668        page = nth_page(page, (offset >> PAGE_SHIFT));
 669        offset %= PAGE_SIZE;
 670
 671        DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
 672
 673        /* do the actual data transfer */
 674        buf = kmap_atomic(page);
 675        ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size, do_write);
 676        kunmap_atomic(buf);
 677
 678        if (!do_write && !PageSlab(page))
 679                flush_dcache_page(page);
 680
 681        qc->curbytes += qc->sect_size;
 682        qc->cursg_ofs += qc->sect_size;
 683
 684        if (qc->cursg_ofs == qc->cursg->length) {
 685                qc->cursg = sg_next(qc->cursg);
 686                if (!qc->cursg)
 687                        ap->hsm_task_state = HSM_ST_LAST;
 688                qc->cursg_ofs = 0;
 689        }
 690}
 691
 692/**
 693 *      ata_pio_sectors - Transfer one or many sectors.
 694 *      @qc: Command on going
 695 *
 696 *      Transfer one or many sectors of data from/to the
 697 *      ATA device for the DRQ request.
 698 *
 699 *      LOCKING:
 700 *      Inherited from caller.
 701 */
 702static void ata_pio_sectors(struct ata_queued_cmd *qc)
 703{
 704        if (is_multi_taskfile(&qc->tf)) {
 705                /* READ/WRITE MULTIPLE */
 706                unsigned int nsect;
 707
 708                WARN_ON_ONCE(qc->dev->multi_count == 0);
 709
 710                nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
 711                            qc->dev->multi_count);
 712                while (nsect--)
 713                        ata_pio_sector(qc);
 714        } else
 715                ata_pio_sector(qc);
 716
 717        ata_sff_sync(qc->ap); /* flush */
 718}
 719
 720/**
 721 *      atapi_send_cdb - Write CDB bytes to hardware
 722 *      @ap: Port to which ATAPI device is attached.
 723 *      @qc: Taskfile currently active
 724 *
 725 *      When device has indicated its readiness to accept
 726 *      a CDB, this function is called.  Send the CDB.
 727 *
 728 *      LOCKING:
 729 *      caller.
 730 */
 731static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
 732{
 733        /* send SCSI cdb */
 734        DPRINTK("send cdb\n");
 735        WARN_ON_ONCE(qc->dev->cdb_len < 12);
 736
 737        ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
 738        ata_sff_sync(ap);
 739        /* FIXME: If the CDB is for DMA do we need to do the transition delay
 740           or is bmdma_start guaranteed to do it ? */
 741        switch (qc->tf.protocol) {
 742        case ATAPI_PROT_PIO:
 743                ap->hsm_task_state = HSM_ST;
 744                break;
 745        case ATAPI_PROT_NODATA:
 746                ap->hsm_task_state = HSM_ST_LAST;
 747                break;
 748#ifdef CONFIG_ATA_BMDMA
 749        case ATAPI_PROT_DMA:
 750                ap->hsm_task_state = HSM_ST_LAST;
 751                /* initiate bmdma */
 752                ap->ops->bmdma_start(qc);
 753                break;
 754#endif /* CONFIG_ATA_BMDMA */
 755        default:
 756                BUG();
 757        }
 758}
 759
 760/**
 761 *      __atapi_pio_bytes - Transfer data from/to the ATAPI device.
 762 *      @qc: Command on going
 763 *      @bytes: number of bytes
 764 *
 765 *      Transfer Transfer data from/to the ATAPI device.
 766 *
 767 *      LOCKING:
 768 *      Inherited from caller.
 769 *
 770 */
 771static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
 772{
 773        int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
 774        struct ata_port *ap = qc->ap;
 775        struct ata_device *dev = qc->dev;
 776        struct ata_eh_info *ehi = &dev->link->eh_info;
 777        struct scatterlist *sg;
 778        struct page *page;
 779        unsigned char *buf;
 780        unsigned int offset, count, consumed;
 781
 782next_sg:
 783        sg = qc->cursg;
 784        if (unlikely(!sg)) {
 785                ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
 786                                  "buf=%u cur=%u bytes=%u",
 787                                  qc->nbytes, qc->curbytes, bytes);
 788                return -1;
 789        }
 790
 791        page = sg_page(sg);
 792        offset = sg->offset + qc->cursg_ofs;
 793
 794        /* get the current page and offset */
 795        page = nth_page(page, (offset >> PAGE_SHIFT));
 796        offset %= PAGE_SIZE;
 797
 798        /* don't overrun current sg */
 799        count = min(sg->length - qc->cursg_ofs, bytes);
 800
 801        /* don't cross page boundaries */
 802        count = min(count, (unsigned int)PAGE_SIZE - offset);
 803
 804        DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
 805
 806        /* do the actual data transfer */
 807        buf = kmap_atomic(page);
 808        consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw);
 809        kunmap_atomic(buf);
 810
 811        bytes -= min(bytes, consumed);
 812        qc->curbytes += count;
 813        qc->cursg_ofs += count;
 814
 815        if (qc->cursg_ofs == sg->length) {
 816                qc->cursg = sg_next(qc->cursg);
 817                qc->cursg_ofs = 0;
 818        }
 819
 820        /*
 821         * There used to be a  WARN_ON_ONCE(qc->cursg && count != consumed);
 822         * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
 823         * check correctly as it doesn't know if it is the last request being
 824         * made. Somebody should implement a proper sanity check.
 825         */
 826        if (bytes)
 827                goto next_sg;
 828        return 0;
 829}
 830
 831/**
 832 *      atapi_pio_bytes - Transfer data from/to the ATAPI device.
 833 *      @qc: Command on going
 834 *
 835 *      Transfer Transfer data from/to the ATAPI device.
 836 *
 837 *      LOCKING:
 838 *      Inherited from caller.
 839 */
 840static void atapi_pio_bytes(struct ata_queued_cmd *qc)
 841{
 842        struct ata_port *ap = qc->ap;
 843        struct ata_device *dev = qc->dev;
 844        struct ata_eh_info *ehi = &dev->link->eh_info;
 845        unsigned int ireason, bc_lo, bc_hi, bytes;
 846        int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
 847
 848        /* Abuse qc->result_tf for temp storage of intermediate TF
 849         * here to save some kernel stack usage.
 850         * For normal completion, qc->result_tf is not relevant. For
 851         * error, qc->result_tf is later overwritten by ata_qc_complete().
 852         * So, the correctness of qc->result_tf is not affected.
 853         */
 854        ap->ops->sff_tf_read(ap, &qc->result_tf);
 855        ireason = qc->result_tf.nsect;
 856        bc_lo = qc->result_tf.lbam;
 857        bc_hi = qc->result_tf.lbah;
 858        bytes = (bc_hi << 8) | bc_lo;
 859
 860        /* shall be cleared to zero, indicating xfer of data */
 861        if (unlikely(ireason & ATAPI_COD))
 862                goto atapi_check;
 863
 864        /* make sure transfer direction matches expected */
 865        i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
 866        if (unlikely(do_write != i_write))
 867                goto atapi_check;
 868
 869        if (unlikely(!bytes))
 870                goto atapi_check;
 871
 872        VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
 873
 874        if (unlikely(__atapi_pio_bytes(qc, bytes)))
 875                goto err_out;
 876        ata_sff_sync(ap); /* flush */
 877
 878        return;
 879
 880 atapi_check:
 881        ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
 882                          ireason, bytes);
 883 err_out:
 884        qc->err_mask |= AC_ERR_HSM;
 885        ap->hsm_task_state = HSM_ST_ERR;
 886}
 887
 888/**
 889 *      ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
 890 *      @ap: the target ata_port
 891 *      @qc: qc on going
 892 *
 893 *      RETURNS:
 894 *      1 if ok in workqueue, 0 otherwise.
 895 */
 896static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
 897                                                struct ata_queued_cmd *qc)
 898{
 899        if (qc->tf.flags & ATA_TFLAG_POLLING)
 900                return 1;
 901
 902        if (ap->hsm_task_state == HSM_ST_FIRST) {
 903                if (qc->tf.protocol == ATA_PROT_PIO &&
 904                   (qc->tf.flags & ATA_TFLAG_WRITE))
 905                    return 1;
 906
 907                if (ata_is_atapi(qc->tf.protocol) &&
 908                   !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
 909                        return 1;
 910        }
 911
 912        return 0;
 913}
 914
 915/**
 916 *      ata_hsm_qc_complete - finish a qc running on standard HSM
 917 *      @qc: Command to complete
 918 *      @in_wq: 1 if called from workqueue, 0 otherwise
 919 *
 920 *      Finish @qc which is running on standard HSM.
 921 *
 922 *      LOCKING:
 923 *      If @in_wq is zero, spin_lock_irqsave(host lock).
 924 *      Otherwise, none on entry and grabs host lock.
 925 */
 926static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 927{
 928        struct ata_port *ap = qc->ap;
 929
 930        if (ap->ops->error_handler) {
 931                if (in_wq) {
 932                        /* EH might have kicked in while host lock is
 933                         * released.
 934                         */
 935                        qc = ata_qc_from_tag(ap, qc->tag);
 936                        if (qc) {
 937                                if (likely(!(qc->err_mask & AC_ERR_HSM))) {
 938                                        ata_sff_irq_on(ap);
 939                                        ata_qc_complete(qc);
 940                                } else
 941                                        ata_port_freeze(ap);
 942                        }
 943                } else {
 944                        if (likely(!(qc->err_mask & AC_ERR_HSM)))
 945                                ata_qc_complete(qc);
 946                        else
 947                                ata_port_freeze(ap);
 948                }
 949        } else {
 950                if (in_wq) {
 951                        ata_sff_irq_on(ap);
 952                        ata_qc_complete(qc);
 953                } else
 954                        ata_qc_complete(qc);
 955        }
 956}
 957
 958/**
 959 *      ata_sff_hsm_move - move the HSM to the next state.
 960 *      @ap: the target ata_port
 961 *      @qc: qc on going
 962 *      @status: current device status
 963 *      @in_wq: 1 if called from workqueue, 0 otherwise
 964 *
 965 *      RETURNS:
 966 *      1 when poll next status needed, 0 otherwise.
 967 */
 968int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
 969                     u8 status, int in_wq)
 970{
 971        struct ata_link *link = qc->dev->link;
 972        struct ata_eh_info *ehi = &link->eh_info;
 973        int poll_next;
 974
 975        lockdep_assert_held(ap->lock);
 976
 977        WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
 978
 979        /* Make sure ata_sff_qc_issue() does not throw things
 980         * like DMA polling into the workqueue. Notice that
 981         * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
 982         */
 983        WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
 984
 985fsm_start:
 986        DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
 987                ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
 988
 989        switch (ap->hsm_task_state) {
 990        case HSM_ST_FIRST:
 991                /* Send first data block or PACKET CDB */
 992
 993                /* If polling, we will stay in the work queue after
 994                 * sending the data. Otherwise, interrupt handler
 995                 * takes over after sending the data.
 996                 */
 997                poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
 998
 999                /* check device status */
1000                if (unlikely((status & ATA_DRQ) == 0)) {
1001                        /* handle BSY=0, DRQ=0 as error */
1002                        if (likely(status & (ATA_ERR | ATA_DF)))
1003                                /* device stops HSM for abort/error */
1004                                qc->err_mask |= AC_ERR_DEV;
1005                        else {
1006                                /* HSM violation. Let EH handle this */
1007                                ata_ehi_push_desc(ehi,
1008                                        "ST_FIRST: !(DRQ|ERR|DF)");
1009                                qc->err_mask |= AC_ERR_HSM;
1010                        }
1011
1012                        ap->hsm_task_state = HSM_ST_ERR;
1013                        goto fsm_start;
1014                }
1015
1016                /* Device should not ask for data transfer (DRQ=1)
1017                 * when it finds something wrong.
1018                 * We ignore DRQ here and stop the HSM by
1019                 * changing hsm_task_state to HSM_ST_ERR and
1020                 * let the EH abort the command or reset the device.
1021                 */
1022                if (unlikely(status & (ATA_ERR | ATA_DF))) {
1023                        /* Some ATAPI tape drives forget to clear the ERR bit
1024                         * when doing the next command (mostly request sense).
1025                         * We ignore ERR here to workaround and proceed sending
1026                         * the CDB.
1027                         */
1028                        if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1029                                ata_ehi_push_desc(ehi, "ST_FIRST: "
1030                                        "DRQ=1 with device error, "
1031                                        "dev_stat 0x%X", status);
1032                                qc->err_mask |= AC_ERR_HSM;
1033                                ap->hsm_task_state = HSM_ST_ERR;
1034                                goto fsm_start;
1035                        }
1036                }
1037
1038                if (qc->tf.protocol == ATA_PROT_PIO) {
1039                        /* PIO data out protocol.
1040                         * send first data block.
1041                         */
1042
1043                        /* ata_pio_sectors() might change the state
1044                         * to HSM_ST_LAST. so, the state is changed here
1045                         * before ata_pio_sectors().
1046                         */
1047                        ap->hsm_task_state = HSM_ST;
1048                        ata_pio_sectors(qc);
1049                } else
1050                        /* send CDB */
1051                        atapi_send_cdb(ap, qc);
1052
1053                /* if polling, ata_sff_pio_task() handles the rest.
1054                 * otherwise, interrupt handler takes over from here.
1055                 */
1056                break;
1057
1058        case HSM_ST:
1059                /* complete command or read/write the data register */
1060                if (qc->tf.protocol == ATAPI_PROT_PIO) {
1061                        /* ATAPI PIO protocol */
1062                        if ((status & ATA_DRQ) == 0) {
1063                                /* No more data to transfer or device error.
1064                                 * Device error will be tagged in HSM_ST_LAST.
1065                                 */
1066                                ap->hsm_task_state = HSM_ST_LAST;
1067                                goto fsm_start;
1068                        }
1069
1070                        /* Device should not ask for data transfer (DRQ=1)
1071                         * when it finds something wrong.
1072                         * We ignore DRQ here and stop the HSM by
1073                         * changing hsm_task_state to HSM_ST_ERR and
1074                         * let the EH abort the command or reset the device.
1075                         */
1076                        if (unlikely(status & (ATA_ERR | ATA_DF))) {
1077                                ata_ehi_push_desc(ehi, "ST-ATAPI: "
1078                                        "DRQ=1 with device error, "
1079                                        "dev_stat 0x%X", status);
1080                                qc->err_mask |= AC_ERR_HSM;
1081                                ap->hsm_task_state = HSM_ST_ERR;
1082                                goto fsm_start;
1083                        }
1084
1085                        atapi_pio_bytes(qc);
1086
1087                        if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1088                                /* bad ireason reported by device */
1089                                goto fsm_start;
1090
1091                } else {
1092                        /* ATA PIO protocol */
1093                        if (unlikely((status & ATA_DRQ) == 0)) {
1094                                /* handle BSY=0, DRQ=0 as error */
1095                                if (likely(status & (ATA_ERR | ATA_DF))) {
1096                                        /* device stops HSM for abort/error */
1097                                        qc->err_mask |= AC_ERR_DEV;
1098
1099                                        /* If diagnostic failed and this is
1100                                         * IDENTIFY, it's likely a phantom
1101                                         * device.  Mark hint.
1102                                         */
1103                                        if (qc->dev->horkage &
1104                                            ATA_HORKAGE_DIAGNOSTIC)
1105                                                qc->err_mask |=
1106                                                        AC_ERR_NODEV_HINT;
1107                                } else {
1108                                        /* HSM violation. Let EH handle this.
1109                                         * Phantom devices also trigger this
1110                                         * condition.  Mark hint.
1111                                         */
1112                                        ata_ehi_push_desc(ehi, "ST-ATA: "
1113                                                "DRQ=0 without device error, "
1114                                                "dev_stat 0x%X", status);
1115                                        qc->err_mask |= AC_ERR_HSM |
1116                                                        AC_ERR_NODEV_HINT;
1117                                }
1118
1119                                ap->hsm_task_state = HSM_ST_ERR;
1120                                goto fsm_start;
1121                        }
1122
1123                        /* For PIO reads, some devices may ask for
1124                         * data transfer (DRQ=1) alone with ERR=1.
1125                         * We respect DRQ here and transfer one
1126                         * block of junk data before changing the
1127                         * hsm_task_state to HSM_ST_ERR.
1128                         *
1129                         * For PIO writes, ERR=1 DRQ=1 doesn't make
1130                         * sense since the data block has been
1131                         * transferred to the device.
1132                         */
1133                        if (unlikely(status & (ATA_ERR | ATA_DF))) {
1134                                /* data might be corrputed */
1135                                qc->err_mask |= AC_ERR_DEV;
1136
1137                                if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1138                                        ata_pio_sectors(qc);
1139                                        status = ata_wait_idle(ap);
1140                                }
1141
1142                                if (status & (ATA_BUSY | ATA_DRQ)) {
1143                                        ata_ehi_push_desc(ehi, "ST-ATA: "
1144                                                "BUSY|DRQ persists on ERR|DF, "
1145                                                "dev_stat 0x%X", status);
1146                                        qc->err_mask |= AC_ERR_HSM;
1147                                }
1148
1149                                /* There are oddball controllers with
1150                                 * status register stuck at 0x7f and
1151                                 * lbal/m/h at zero which makes it
1152                                 * pass all other presence detection
1153                                 * mechanisms we have.  Set NODEV_HINT
1154                                 * for it.  Kernel bz#7241.
1155                                 */
1156                                if (status == 0x7f)
1157                                        qc->err_mask |= AC_ERR_NODEV_HINT;
1158
1159                                /* ata_pio_sectors() might change the
1160                                 * state to HSM_ST_LAST. so, the state
1161                                 * is changed after ata_pio_sectors().
1162                                 */
1163                                ap->hsm_task_state = HSM_ST_ERR;
1164                                goto fsm_start;
1165                        }
1166
1167                        ata_pio_sectors(qc);
1168
1169                        if (ap->hsm_task_state == HSM_ST_LAST &&
1170                            (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1171                                /* all data read */
1172                                status = ata_wait_idle(ap);
1173                                goto fsm_start;
1174                        }
1175                }
1176
1177                poll_next = 1;
1178                break;
1179
1180        case HSM_ST_LAST:
1181                if (unlikely(!ata_ok(status))) {
1182                        qc->err_mask |= __ac_err_mask(status);
1183                        ap->hsm_task_state = HSM_ST_ERR;
1184                        goto fsm_start;
1185                }
1186
1187                /* no more data to transfer */
1188                DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1189                        ap->print_id, qc->dev->devno, status);
1190
1191                WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1192
1193                ap->hsm_task_state = HSM_ST_IDLE;
1194
1195                /* complete taskfile transaction */
1196                ata_hsm_qc_complete(qc, in_wq);
1197
1198                poll_next = 0;
1199                break;
1200
1201        case HSM_ST_ERR:
1202                ap->hsm_task_state = HSM_ST_IDLE;
1203
1204                /* complete taskfile transaction */
1205                ata_hsm_qc_complete(qc, in_wq);
1206
1207                poll_next = 0;
1208                break;
1209        default:
1210                poll_next = 0;
1211                WARN(true, "ata%d: SFF host state machine in invalid state %d",
1212                     ap->print_id, ap->hsm_task_state);
1213        }
1214
1215        return poll_next;
1216}
1217EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1218
1219void ata_sff_queue_work(struct work_struct *work)
1220{
1221        queue_work(ata_sff_wq, work);
1222}
1223EXPORT_SYMBOL_GPL(ata_sff_queue_work);
1224
1225void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
1226{
1227        queue_delayed_work(ata_sff_wq, dwork, delay);
1228}
1229EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
1230
1231void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1232{
1233        struct ata_port *ap = link->ap;
1234
1235        WARN_ON((ap->sff_pio_task_link != NULL) &&
1236                (ap->sff_pio_task_link != link));
1237        ap->sff_pio_task_link = link;
1238
1239        /* may fail if ata_sff_flush_pio_task() in progress */
1240        ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
1241}
1242EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1243
1244void ata_sff_flush_pio_task(struct ata_port *ap)
1245{
1246        DPRINTK("ENTER\n");
1247
1248        cancel_delayed_work_sync(&ap->sff_pio_task);
1249
1250        /*
1251         * We wanna reset the HSM state to IDLE.  If we do so without
1252         * grabbing the port lock, critical sections protected by it which
1253         * expect the HSM state to stay stable may get surprised.  For
1254         * example, we may set IDLE in between the time
1255         * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
1256         * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
1257         */
1258        spin_lock_irq(ap->lock);
1259        ap->hsm_task_state = HSM_ST_IDLE;
1260        spin_unlock_irq(ap->lock);
1261
1262        ap->sff_pio_task_link = NULL;
1263
1264        if (ata_msg_ctl(ap))
1265                ata_port_dbg(ap, "%s: EXIT\n", __func__);
1266}
1267
1268static void ata_sff_pio_task(struct work_struct *work)
1269{
1270        struct ata_port *ap =
1271                container_of(work, struct ata_port, sff_pio_task.work);
1272        struct ata_link *link = ap->sff_pio_task_link;
1273        struct ata_queued_cmd *qc;
1274        u8 status;
1275        int poll_next;
1276
1277        spin_lock_irq(ap->lock);
1278
1279        BUG_ON(ap->sff_pio_task_link == NULL);
1280        /* qc can be NULL if timeout occurred */
1281        qc = ata_qc_from_tag(ap, link->active_tag);
1282        if (!qc) {
1283                ap->sff_pio_task_link = NULL;
1284                goto out_unlock;
1285        }
1286
1287fsm_start:
1288        WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1289
1290        /*
1291         * This is purely heuristic.  This is a fast path.
1292         * Sometimes when we enter, BSY will be cleared in
1293         * a chk-status or two.  If not, the drive is probably seeking
1294         * or something.  Snooze for a couple msecs, then
1295         * chk-status again.  If still busy, queue delayed work.
1296         */
1297        status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1298        if (status & ATA_BUSY) {
1299                spin_unlock_irq(ap->lock);
1300                ata_msleep(ap, 2);
1301                spin_lock_irq(ap->lock);
1302
1303                status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1304                if (status & ATA_BUSY) {
1305                        ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1306                        goto out_unlock;
1307                }
1308        }
1309
1310        /*
1311         * hsm_move() may trigger another command to be processed.
1312         * clean the link beforehand.
1313         */
1314        ap->sff_pio_task_link = NULL;
1315        /* move the HSM */
1316        poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1317
1318        /* another command or interrupt handler
1319         * may be running at this point.
1320         */
1321        if (poll_next)
1322                goto fsm_start;
1323out_unlock:
1324        spin_unlock_irq(ap->lock);
1325}
1326
1327/**
1328 *      ata_sff_qc_issue - issue taskfile to a SFF controller
1329 *      @qc: command to issue to device
1330 *
1331 *      This function issues a PIO or NODATA command to a SFF
1332 *      controller.
1333 *
1334 *      LOCKING:
1335 *      spin_lock_irqsave(host lock)
1336 *
1337 *      RETURNS:
1338 *      Zero on success, AC_ERR_* mask on failure
1339 */
1340unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1341{
1342        struct ata_port *ap = qc->ap;
1343        struct ata_link *link = qc->dev->link;
1344
1345        /* Use polling pio if the LLD doesn't handle
1346         * interrupt driven pio and atapi CDB interrupt.
1347         */
1348        if (ap->flags & ATA_FLAG_PIO_POLLING)
1349                qc->tf.flags |= ATA_TFLAG_POLLING;
1350
1351        /* select the device */
1352        ata_dev_select(ap, qc->dev->devno, 1, 0);
1353
1354        /* start the command */
1355        switch (qc->tf.protocol) {
1356        case ATA_PROT_NODATA:
1357                if (qc->tf.flags & ATA_TFLAG_POLLING)
1358                        ata_qc_set_polling(qc);
1359
1360                ata_tf_to_host(ap, &qc->tf);
1361                ap->hsm_task_state = HSM_ST_LAST;
1362
1363                if (qc->tf.flags & ATA_TFLAG_POLLING)
1364                        ata_sff_queue_pio_task(link, 0);
1365
1366                break;
1367
1368        case ATA_PROT_PIO:
1369                if (qc->tf.flags & ATA_TFLAG_POLLING)
1370                        ata_qc_set_polling(qc);
1371
1372                ata_tf_to_host(ap, &qc->tf);
1373
1374                if (qc->tf.flags & ATA_TFLAG_WRITE) {
1375                        /* PIO data out protocol */
1376                        ap->hsm_task_state = HSM_ST_FIRST;
1377                        ata_sff_queue_pio_task(link, 0);
1378
1379                        /* always send first data block using the
1380                         * ata_sff_pio_task() codepath.
1381                         */
1382                } else {
1383                        /* PIO data in protocol */
1384                        ap->hsm_task_state = HSM_ST;
1385
1386                        if (qc->tf.flags & ATA_TFLAG_POLLING)
1387                                ata_sff_queue_pio_task(link, 0);
1388
1389                        /* if polling, ata_sff_pio_task() handles the
1390                         * rest.  otherwise, interrupt handler takes
1391                         * over from here.
1392                         */
1393                }
1394
1395                break;
1396
1397        case ATAPI_PROT_PIO:
1398        case ATAPI_PROT_NODATA:
1399                if (qc->tf.flags & ATA_TFLAG_POLLING)
1400                        ata_qc_set_polling(qc);
1401
1402                ata_tf_to_host(ap, &qc->tf);
1403
1404                ap->hsm_task_state = HSM_ST_FIRST;
1405
1406                /* send cdb by polling if no cdb interrupt */
1407                if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1408                    (qc->tf.flags & ATA_TFLAG_POLLING))
1409                        ata_sff_queue_pio_task(link, 0);
1410                break;
1411
1412        default:
1413                return AC_ERR_SYSTEM;
1414        }
1415
1416        return 0;
1417}
1418EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1419
1420/**
1421 *      ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
1422 *      @qc: qc to fill result TF for
1423 *
1424 *      @qc is finished and result TF needs to be filled.  Fill it
1425 *      using ->sff_tf_read.
1426 *
1427 *      LOCKING:
1428 *      spin_lock_irqsave(host lock)
1429 *
1430 *      RETURNS:
1431 *      true indicating that result TF is successfully filled.
1432 */
1433bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1434{
1435        qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1436        return true;
1437}
1438EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1439
1440static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1441{
1442        ap->stats.idle_irq++;
1443
1444#ifdef ATA_IRQ_TRAP
1445        if ((ap->stats.idle_irq % 1000) == 0) {
1446                ap->ops->sff_check_status(ap);
1447                if (ap->ops->sff_irq_clear)
1448                        ap->ops->sff_irq_clear(ap);
1449                ata_port_warn(ap, "irq trap\n");
1450                return 1;
1451        }
1452#endif
1453        return 0;       /* irq not handled */
1454}
1455
1456static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1457                                        struct ata_queued_cmd *qc,
1458                                        bool hsmv_on_idle)
1459{
1460        u8 status;
1461
1462        VPRINTK("ata%u: protocol %d task_state %d\n",
1463                ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1464
1465        /* Check whether we are expecting interrupt in this state */
1466        switch (ap->hsm_task_state) {
1467        case HSM_ST_FIRST:
1468                /* Some pre-ATAPI-4 devices assert INTRQ
1469                 * at this state when ready to receive CDB.
1470                 */
1471
1472                /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1473                 * The flag was turned on only for atapi devices.  No
1474                 * need to check ata_is_atapi(qc->tf.protocol) again.
1475                 */
1476                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1477                        return ata_sff_idle_irq(ap);
1478                break;
1479        case HSM_ST_IDLE:
1480                return ata_sff_idle_irq(ap);
1481        default:
1482                break;
1483        }
1484
1485        /* check main status, clearing INTRQ if needed */
1486        status = ata_sff_irq_status(ap);
1487        if (status & ATA_BUSY) {
1488                if (hsmv_on_idle) {
1489                        /* BMDMA engine is already stopped, we're screwed */
1490                        qc->err_mask |= AC_ERR_HSM;
1491                        ap->hsm_task_state = HSM_ST_ERR;
1492                } else
1493                        return ata_sff_idle_irq(ap);
1494        }
1495
1496        /* clear irq events */
1497        if (ap->ops->sff_irq_clear)
1498                ap->ops->sff_irq_clear(ap);
1499
1500        ata_sff_hsm_move(ap, qc, status, 0);
1501
1502        return 1;       /* irq handled */
1503}
1504
1505/**
1506 *      ata_sff_port_intr - Handle SFF port interrupt
1507 *      @ap: Port on which interrupt arrived (possibly...)
1508 *      @qc: Taskfile currently active in engine
1509 *
1510 *      Handle port interrupt for given queued command.
1511 *
1512 *      LOCKING:
1513 *      spin_lock_irqsave(host lock)
1514 *
1515 *      RETURNS:
1516 *      One if interrupt was handled, zero if not (shared irq).
1517 */
1518unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1519{
1520        return __ata_sff_port_intr(ap, qc, false);
1521}
1522EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1523
1524static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1525        unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1526{
1527        struct ata_host *host = dev_instance;
1528        bool retried = false;
1529        unsigned int i;
1530        unsigned int handled, idle, polling;
1531        unsigned long flags;
1532
1533        /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1534        spin_lock_irqsave(&host->lock, flags);
1535
1536retry:
1537        handled = idle = polling = 0;
1538        for (i = 0; i < host->n_ports; i++) {
1539                struct ata_port *ap = host->ports[i];
1540                struct ata_queued_cmd *qc;
1541
1542                qc = ata_qc_from_tag(ap, ap->link.active_tag);
1543                if (qc) {
1544                        if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1545                                handled |= port_intr(ap, qc);
1546                        else
1547                                polling |= 1 << i;
1548                } else
1549                        idle |= 1 << i;
1550        }
1551
1552        /*
1553         * If no port was expecting IRQ but the controller is actually
1554         * asserting IRQ line, nobody cared will ensue.  Check IRQ
1555         * pending status if available and clear spurious IRQ.
1556         */
1557        if (!handled && !retried) {
1558                bool retry = false;
1559
1560                for (i = 0; i < host->n_ports; i++) {
1561                        struct ata_port *ap = host->ports[i];
1562
1563                        if (polling & (1 << i))
1564                                continue;
1565
1566                        if (!ap->ops->sff_irq_check ||
1567                            !ap->ops->sff_irq_check(ap))
1568                                continue;
1569
1570                        if (idle & (1 << i)) {
1571                                ap->ops->sff_check_status(ap);
1572                                if (ap->ops->sff_irq_clear)
1573                                        ap->ops->sff_irq_clear(ap);
1574                        } else {
1575                                /* clear INTRQ and check if BUSY cleared */
1576                                if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
1577                                        retry |= true;
1578                                /*
1579                                 * With command in flight, we can't do
1580                                 * sff_irq_clear() w/o racing with completion.
1581                                 */
1582                        }
1583                }
1584
1585                if (retry) {
1586                        retried = true;
1587                        goto retry;
1588                }
1589        }
1590
1591        spin_unlock_irqrestore(&host->lock, flags);
1592
1593        return IRQ_RETVAL(handled);
1594}
1595
1596/**
1597 *      ata_sff_interrupt - Default SFF ATA host interrupt handler
1598 *      @irq: irq line (unused)
1599 *      @dev_instance: pointer to our ata_host information structure
1600 *
1601 *      Default interrupt handler for PCI IDE devices.  Calls
1602 *      ata_sff_port_intr() for each port that is not disabled.
1603 *
1604 *      LOCKING:
1605 *      Obtains host lock during operation.
1606 *
1607 *      RETURNS:
1608 *      IRQ_NONE or IRQ_HANDLED.
1609 */
1610irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1611{
1612        return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1613}
1614EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1615
1616/**
1617 *      ata_sff_lost_interrupt  -       Check for an apparent lost interrupt
1618 *      @ap: port that appears to have timed out
1619 *
1620 *      Called from the libata error handlers when the core code suspects
1621 *      an interrupt has been lost. If it has complete anything we can and
1622 *      then return. Interface must support altstatus for this faster
1623 *      recovery to occur.
1624 *
1625 *      Locking:
1626 *      Caller holds host lock
1627 */
1628
1629void ata_sff_lost_interrupt(struct ata_port *ap)
1630{
1631        u8 status;
1632        struct ata_queued_cmd *qc;
1633
1634        /* Only one outstanding command per SFF channel */
1635        qc = ata_qc_from_tag(ap, ap->link.active_tag);
1636        /* We cannot lose an interrupt on a non-existent or polled command */
1637        if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1638                return;
1639        /* See if the controller thinks it is still busy - if so the command
1640           isn't a lost IRQ but is still in progress */
1641        status = ata_sff_altstatus(ap);
1642        if (status & ATA_BUSY)
1643                return;
1644
1645        /* There was a command running, we are no longer busy and we have
1646           no interrupt. */
1647        ata_port_warn(ap, "lost interrupt (Status 0x%x)\n",
1648                                                                status);
1649        /* Run the host interrupt logic as if the interrupt had not been
1650           lost */
1651        ata_sff_port_intr(ap, qc);
1652}
1653EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1654
1655/**
1656 *      ata_sff_freeze - Freeze SFF controller port
1657 *      @ap: port to freeze
1658 *
1659 *      Freeze SFF controller port.
1660 *
1661 *      LOCKING:
1662 *      Inherited from caller.
1663 */
1664void ata_sff_freeze(struct ata_port *ap)
1665{
1666        ap->ctl |= ATA_NIEN;
1667        ap->last_ctl = ap->ctl;
1668
1669        if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
1670                ata_sff_set_devctl(ap, ap->ctl);
1671
1672        /* Under certain circumstances, some controllers raise IRQ on
1673         * ATA_NIEN manipulation.  Also, many controllers fail to mask
1674         * previously pending IRQ on ATA_NIEN assertion.  Clear it.
1675         */
1676        ap->ops->sff_check_status(ap);
1677
1678        if (ap->ops->sff_irq_clear)
1679                ap->ops->sff_irq_clear(ap);
1680}
1681EXPORT_SYMBOL_GPL(ata_sff_freeze);
1682
1683/**
1684 *      ata_sff_thaw - Thaw SFF controller port
1685 *      @ap: port to thaw
1686 *
1687 *      Thaw SFF controller port.
1688 *
1689 *      LOCKING:
1690 *      Inherited from caller.
1691 */
1692void ata_sff_thaw(struct ata_port *ap)
1693{
1694        /* clear & re-enable interrupts */
1695        ap->ops->sff_check_status(ap);
1696        if (ap->ops->sff_irq_clear)
1697                ap->ops->sff_irq_clear(ap);
1698        ata_sff_irq_on(ap);
1699}
1700EXPORT_SYMBOL_GPL(ata_sff_thaw);
1701
1702/**
1703 *      ata_sff_prereset - prepare SFF link for reset
1704 *      @link: SFF link to be reset
1705 *      @deadline: deadline jiffies for the operation
1706 *
1707 *      SFF link @link is about to be reset.  Initialize it.  It first
1708 *      calls ata_std_prereset() and wait for !BSY if the port is
1709 *      being softreset.
1710 *
1711 *      LOCKING:
1712 *      Kernel thread context (may sleep)
1713 *
1714 *      RETURNS:
1715 *      0 on success, -errno otherwise.
1716 */
1717int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1718{
1719        struct ata_eh_context *ehc = &link->eh_context;
1720        int rc;
1721
1722        rc = ata_std_prereset(link, deadline);
1723        if (rc)
1724                return rc;
1725
1726        /* if we're about to do hardreset, nothing more to do */
1727        if (ehc->i.action & ATA_EH_HARDRESET)
1728                return 0;
1729
1730        /* wait for !BSY if we don't know that no device is attached */
1731        if (!ata_link_offline(link)) {
1732                rc = ata_sff_wait_ready(link, deadline);
1733                if (rc && rc != -ENODEV) {
1734                        ata_link_warn(link,
1735                                      "device not ready (errno=%d), forcing hardreset\n",
1736                                      rc);
1737                        ehc->i.action |= ATA_EH_HARDRESET;
1738                }
1739        }
1740
1741        return 0;
1742}
1743EXPORT_SYMBOL_GPL(ata_sff_prereset);
1744
1745/**
1746 *      ata_devchk - PATA device presence detection
1747 *      @ap: ATA channel to examine
1748 *      @device: Device to examine (starting at zero)
1749 *
1750 *      This technique was originally described in
1751 *      Hale Landis's ATADRVR (www.ata-atapi.com), and
1752 *      later found its way into the ATA/ATAPI spec.
1753 *
1754 *      Write a pattern to the ATA shadow registers,
1755 *      and if a device is present, it will respond by
1756 *      correctly storing and echoing back the
1757 *      ATA shadow register contents.
1758 *
1759 *      LOCKING:
1760 *      caller.
1761 */
1762static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1763{
1764        struct ata_ioports *ioaddr = &ap->ioaddr;
1765        u8 nsect, lbal;
1766
1767        ap->ops->sff_dev_select(ap, device);
1768
1769        iowrite8(0x55, ioaddr->nsect_addr);
1770        iowrite8(0xaa, ioaddr->lbal_addr);
1771
1772        iowrite8(0xaa, ioaddr->nsect_addr);
1773        iowrite8(0x55, ioaddr->lbal_addr);
1774
1775        iowrite8(0x55, ioaddr->nsect_addr);
1776        iowrite8(0xaa, ioaddr->lbal_addr);
1777
1778        nsect = ioread8(ioaddr->nsect_addr);
1779        lbal = ioread8(ioaddr->lbal_addr);
1780
1781        if ((nsect == 0x55) && (lbal == 0xaa))
1782                return 1;       /* we found a device */
1783
1784        return 0;               /* nothing found */
1785}
1786
1787/**
1788 *      ata_sff_dev_classify - Parse returned ATA device signature
1789 *      @dev: ATA device to classify (starting at zero)
1790 *      @present: device seems present
1791 *      @r_err: Value of error register on completion
1792 *
1793 *      After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1794 *      an ATA/ATAPI-defined set of values is placed in the ATA
1795 *      shadow registers, indicating the results of device detection
1796 *      and diagnostics.
1797 *
1798 *      Select the ATA device, and read the values from the ATA shadow
1799 *      registers.  Then parse according to the Error register value,
1800 *      and the spec-defined values examined by ata_dev_classify().
1801 *
1802 *      LOCKING:
1803 *      caller.
1804 *
1805 *      RETURNS:
1806 *      Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1807 */
1808unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1809                                  u8 *r_err)
1810{
1811        struct ata_port *ap = dev->link->ap;
1812        struct ata_taskfile tf;
1813        unsigned int class;
1814        u8 err;
1815
1816        ap->ops->sff_dev_select(ap, dev->devno);
1817
1818        memset(&tf, 0, sizeof(tf));
1819
1820        ap->ops->sff_tf_read(ap, &tf);
1821        err = tf.feature;
1822        if (r_err)
1823                *r_err = err;
1824
1825        /* see if device passed diags: continue and warn later */
1826        if (err == 0)
1827                /* diagnostic fail : do nothing _YET_ */
1828                dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
1829        else if (err == 1)
1830                /* do nothing */ ;
1831        else if ((dev->devno == 0) && (err == 0x81))
1832                /* do nothing */ ;
1833        else
1834                return ATA_DEV_NONE;
1835
1836        /* determine if device is ATA or ATAPI */
1837        class = ata_dev_classify(&tf);
1838
1839        if (class == ATA_DEV_UNKNOWN) {
1840                /* If the device failed diagnostic, it's likely to
1841                 * have reported incorrect device signature too.
1842                 * Assume ATA device if the device seems present but
1843                 * device signature is invalid with diagnostic
1844                 * failure.
1845                 */
1846                if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1847                        class = ATA_DEV_ATA;
1848                else
1849                        class = ATA_DEV_NONE;
1850        } else if ((class == ATA_DEV_ATA) &&
1851                   (ap->ops->sff_check_status(ap) == 0))
1852                class = ATA_DEV_NONE;
1853
1854        return class;
1855}
1856EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
1857
1858/**
1859 *      ata_sff_wait_after_reset - wait for devices to become ready after reset
1860 *      @link: SFF link which is just reset
1861 *      @devmask: mask of present devices
1862 *      @deadline: deadline jiffies for the operation
1863 *
1864 *      Wait devices attached to SFF @link to become ready after
1865 *      reset.  It contains preceding 150ms wait to avoid accessing TF
1866 *      status register too early.
1867 *
1868 *      LOCKING:
1869 *      Kernel thread context (may sleep).
1870 *
1871 *      RETURNS:
1872 *      0 on success, -ENODEV if some or all of devices in @devmask
1873 *      don't seem to exist.  -errno on other errors.
1874 */
1875int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1876                             unsigned long deadline)
1877{
1878        struct ata_port *ap = link->ap;
1879        struct ata_ioports *ioaddr = &ap->ioaddr;
1880        unsigned int dev0 = devmask & (1 << 0);
1881        unsigned int dev1 = devmask & (1 << 1);
1882        int rc, ret = 0;
1883
1884        ata_msleep(ap, ATA_WAIT_AFTER_RESET);
1885
1886        /* always check readiness of the master device */
1887        rc = ata_sff_wait_ready(link, deadline);
1888        /* -ENODEV means the odd clown forgot the D7 pulldown resistor
1889         * and TF status is 0xff, bail out on it too.
1890         */
1891        if (rc)
1892                return rc;
1893
1894        /* if device 1 was found in ata_devchk, wait for register
1895         * access briefly, then wait for BSY to clear.
1896         */
1897        if (dev1) {
1898                int i;
1899
1900                ap->ops->sff_dev_select(ap, 1);
1901
1902                /* Wait for register access.  Some ATAPI devices fail
1903                 * to set nsect/lbal after reset, so don't waste too
1904                 * much time on it.  We're gonna wait for !BSY anyway.
1905                 */
1906                for (i = 0; i < 2; i++) {
1907                        u8 nsect, lbal;
1908
1909                        nsect = ioread8(ioaddr->nsect_addr);
1910                        lbal = ioread8(ioaddr->lbal_addr);
1911                        if ((nsect == 1) && (lbal == 1))
1912                                break;
1913                        ata_msleep(ap, 50);     /* give drive a breather */
1914                }
1915
1916                rc = ata_sff_wait_ready(link, deadline);
1917                if (rc) {
1918                        if (rc != -ENODEV)
1919                                return rc;
1920                        ret = rc;
1921                }
1922        }
1923
1924        /* is all this really necessary? */
1925        ap->ops->sff_dev_select(ap, 0);
1926        if (dev1)
1927                ap->ops->sff_dev_select(ap, 1);
1928        if (dev0)
1929                ap->ops->sff_dev_select(ap, 0);
1930
1931        return ret;
1932}
1933EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
1934
1935static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
1936                             unsigned long deadline)
1937{
1938        struct ata_ioports *ioaddr = &ap->ioaddr;
1939
1940        DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1941
1942        if (ap->ioaddr.ctl_addr) {
1943                /* software reset.  causes dev0 to be selected */
1944                iowrite8(ap->ctl, ioaddr->ctl_addr);
1945                udelay(20);     /* FIXME: flush */
1946                iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1947                udelay(20);     /* FIXME: flush */
1948                iowrite8(ap->ctl, ioaddr->ctl_addr);
1949                ap->last_ctl = ap->ctl;
1950        }
1951
1952        /* wait the port to become ready */
1953        return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
1954}
1955
1956/**
1957 *      ata_sff_softreset - reset host port via ATA SRST
1958 *      @link: ATA link to reset
1959 *      @classes: resulting classes of attached devices
1960 *      @deadline: deadline jiffies for the operation
1961 *
1962 *      Reset host port using ATA SRST.
1963 *
1964 *      LOCKING:
1965 *      Kernel thread context (may sleep)
1966 *
1967 *      RETURNS:
1968 *      0 on success, -errno otherwise.
1969 */
1970int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
1971                      unsigned long deadline)
1972{
1973        struct ata_port *ap = link->ap;
1974        unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1975        unsigned int devmask = 0;
1976        int rc;
1977        u8 err;
1978
1979        DPRINTK("ENTER\n");
1980
1981        /* determine if device 0/1 are present */
1982        if (ata_devchk(ap, 0))
1983                devmask |= (1 << 0);
1984        if (slave_possible && ata_devchk(ap, 1))
1985                devmask |= (1 << 1);
1986
1987        /* select device 0 again */
1988        ap->ops->sff_dev_select(ap, 0);
1989
1990        /* issue bus reset */
1991        DPRINTK("about to softreset, devmask=%x\n", devmask);
1992        rc = ata_bus_softreset(ap, devmask, deadline);
1993        /* if link is occupied, -ENODEV too is an error */
1994        if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
1995                ata_link_err(link, "SRST failed (errno=%d)\n", rc);
1996                return rc;
1997        }
1998
1999        /* determine by signature whether we have ATA or ATAPI devices */
2000        classes[0] = ata_sff_dev_classify(&link->device[0],
2001                                          devmask & (1 << 0), &err);
2002        if (slave_possible && err != 0x81)
2003                classes[1] = ata_sff_dev_classify(&link->device[1],
2004                                                  devmask & (1 << 1), &err);
2005
2006        DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2007        return 0;
2008}
2009EXPORT_SYMBOL_GPL(ata_sff_softreset);
2010
2011/**
2012 *      sata_sff_hardreset - reset host port via SATA phy reset
2013 *      @link: link to reset
2014 *      @class: resulting class of attached device
2015 *      @deadline: deadline jiffies for the operation
2016 *
2017 *      SATA phy-reset host port using DET bits of SControl register,
2018 *      wait for !BSY and classify the attached device.
2019 *
2020 *      LOCKING:
2021 *      Kernel thread context (may sleep)
2022 *
2023 *      RETURNS:
2024 *      0 on success, -errno otherwise.
2025 */
2026int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
2027                       unsigned long deadline)
2028{
2029        struct ata_eh_context *ehc = &link->eh_context;
2030        const unsigned long *timing = sata_ehc_deb_timing(ehc);
2031        bool online;
2032        int rc;
2033
2034        rc = sata_link_hardreset(link, timing, deadline, &online,
2035                                 ata_sff_check_ready);
2036        if (online)
2037                *class = ata_sff_dev_classify(link->device, 1, NULL);
2038
2039        DPRINTK("EXIT, class=%u\n", *class);
2040        return rc;
2041}
2042EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2043
2044/**
2045 *      ata_sff_postreset - SFF postreset callback
2046 *      @link: the target SFF ata_link
2047 *      @classes: classes of attached devices
2048 *
2049 *      This function is invoked after a successful reset.  It first
2050 *      calls ata_std_postreset() and performs SFF specific postreset
2051 *      processing.
2052 *
2053 *      LOCKING:
2054 *      Kernel thread context (may sleep)
2055 */
2056void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2057{
2058        struct ata_port *ap = link->ap;
2059
2060        ata_std_postreset(link, classes);
2061
2062        /* is double-select really necessary? */
2063        if (classes[0] != ATA_DEV_NONE)
2064                ap->ops->sff_dev_select(ap, 1);
2065        if (classes[1] != ATA_DEV_NONE)
2066                ap->ops->sff_dev_select(ap, 0);
2067
2068        /* bail out if no device is present */
2069        if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2070                DPRINTK("EXIT, no device\n");
2071                return;
2072        }
2073
2074        /* set up device control */
2075        if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
2076                ata_sff_set_devctl(ap, ap->ctl);
2077                ap->last_ctl = ap->ctl;
2078        }
2079}
2080EXPORT_SYMBOL_GPL(ata_sff_postreset);
2081
2082/**
2083 *      ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
2084 *      @qc: command
2085 *
2086 *      Drain the FIFO and device of any stuck data following a command
2087 *      failing to complete. In some cases this is necessary before a
2088 *      reset will recover the device.
2089 *
2090 */
2091
2092void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2093{
2094        int count;
2095        struct ata_port *ap;
2096
2097        /* We only need to flush incoming data when a command was running */
2098        if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2099                return;
2100
2101        ap = qc->ap;
2102        /* Drain up to 64K of data before we give up this recovery method */
2103        for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2104                                                && count < 65536; count += 2)
2105                ioread16(ap->ioaddr.data_addr);
2106
2107        /* Can become DEBUG later */
2108        if (count)
2109                ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
2110
2111}
2112EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2113
2114/**
2115 *      ata_sff_error_handler - Stock error handler for SFF controller
2116 *      @ap: port to handle error for
2117 *
2118 *      Stock error handler for SFF controller.  It can handle both
2119 *      PATA and SATA controllers.  Many controllers should be able to
2120 *      use this EH as-is or with some added handling before and
2121 *      after.
2122 *
2123 *      LOCKING:
2124 *      Kernel thread context (may sleep)
2125 */
2126void ata_sff_error_handler(struct ata_port *ap)
2127{
2128        ata_reset_fn_t softreset = ap->ops->softreset;
2129        ata_reset_fn_t hardreset = ap->ops->hardreset;
2130        struct ata_queued_cmd *qc;
2131        unsigned long flags;
2132
2133        qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2134        if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2135                qc = NULL;
2136
2137        spin_lock_irqsave(ap->lock, flags);
2138
2139        /*
2140         * We *MUST* do FIFO draining before we issue a reset as
2141         * several devices helpfully clear their internal state and
2142         * will lock solid if we touch the data port post reset. Pass
2143         * qc in case anyone wants to do different PIO/DMA recovery or
2144         * has per command fixups
2145         */
2146        if (ap->ops->sff_drain_fifo)
2147                ap->ops->sff_drain_fifo(qc);
2148
2149        spin_unlock_irqrestore(ap->lock, flags);
2150
2151        /* ignore built-in hardresets if SCR access is not available */
2152        if ((hardreset == sata_std_hardreset ||
2153             hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2154                hardreset = NULL;
2155
2156        ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2157                  ap->ops->postreset);
2158}
2159EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2160
2161/**
2162 *      ata_sff_std_ports - initialize ioaddr with standard port offsets.
2163 *      @ioaddr: IO address structure to be initialized
2164 *
2165 *      Utility function which initializes data_addr, error_addr,
2166 *      feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2167 *      device_addr, status_addr, and command_addr to standard offsets
2168 *      relative to cmd_addr.
2169 *
2170 *      Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2171 */
2172void ata_sff_std_ports(struct ata_ioports *ioaddr)
2173{
2174        ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2175        ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2176        ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2177        ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2178        ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2179        ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2180        ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2181        ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2182        ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2183        ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2184}
2185EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2186
2187#ifdef CONFIG_PCI
2188
2189static int ata_resources_present(struct pci_dev *pdev, int port)
2190{
2191        int i;
2192
2193        /* Check the PCI resources for this channel are enabled */
2194        port = port * 2;
2195        for (i = 0; i < 2; i++) {
2196                if (pci_resource_start(pdev, port + i) == 0 ||
2197                    pci_resource_len(pdev, port + i) == 0)
2198                        return 0;
2199        }
2200        return 1;
2201}
2202
2203/**
2204 *      ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2205 *      @host: target ATA host
2206 *
2207 *      Acquire native PCI ATA resources for @host and initialize the
2208 *      first two ports of @host accordingly.  Ports marked dummy are
2209 *      skipped and allocation failure makes the port dummy.
2210 *
2211 *      Note that native PCI resources are valid even for legacy hosts
2212 *      as we fix up pdev resources array early in boot, so this
2213 *      function can be used for both native and legacy SFF hosts.
2214 *
2215 *      LOCKING:
2216 *      Inherited from calling layer (may sleep).
2217 *
2218 *      RETURNS:
2219 *      0 if at least one port is initialized, -ENODEV if no port is
2220 *      available.
2221 */
2222int ata_pci_sff_init_host(struct ata_host *host)
2223{
2224        struct device *gdev = host->dev;
2225        struct pci_dev *pdev = to_pci_dev(gdev);
2226        unsigned int mask = 0;
2227        int i, rc;
2228
2229        /* request, iomap BARs and init port addresses accordingly */
2230        for (i = 0; i < 2; i++) {
2231                struct ata_port *ap = host->ports[i];
2232                int base = i * 2;
2233                void __iomem * const *iomap;
2234
2235                if (ata_port_is_dummy(ap))
2236                        continue;
2237
2238                /* Discard disabled ports.  Some controllers show
2239                 * their unused channels this way.  Disabled ports are
2240                 * made dummy.
2241                 */
2242                if (!ata_resources_present(pdev, i)) {
2243                        ap->ops = &ata_dummy_port_ops;
2244                        continue;
2245                }
2246
2247                rc = pcim_iomap_regions(pdev, 0x3 << base,
2248                                        dev_driver_string(gdev));
2249                if (rc) {
2250                        dev_warn(gdev,
2251                                 "failed to request/iomap BARs for port %d (errno=%d)\n",
2252                                 i, rc);
2253                        if (rc == -EBUSY)
2254                                pcim_pin_device(pdev);
2255                        ap->ops = &ata_dummy_port_ops;
2256                        continue;
2257                }
2258                host->iomap = iomap = pcim_iomap_table(pdev);
2259
2260                ap->ioaddr.cmd_addr = iomap[base];
2261                ap->ioaddr.altstatus_addr =
2262                ap->ioaddr.ctl_addr = (void __iomem *)
2263                        ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2264                ata_sff_std_ports(&ap->ioaddr);
2265
2266                ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2267                        (unsigned long long)pci_resource_start(pdev, base),
2268                        (unsigned long long)pci_resource_start(pdev, base + 1));
2269
2270                mask |= 1 << i;
2271        }
2272
2273        if (!mask) {
2274                dev_err(gdev, "no available native port\n");
2275                return -ENODEV;
2276        }
2277
2278        return 0;
2279}
2280EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2281
2282/**
2283 *      ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
2284 *      @pdev: target PCI device
2285 *      @ppi: array of port_info, must be enough for two ports
2286 *      @r_host: out argument for the initialized ATA host
2287 *
2288 *      Helper to allocate PIO-only SFF ATA host for @pdev, acquire
2289 *      all PCI resources and initialize it accordingly in one go.
2290 *
2291 *      LOCKING:
2292 *      Inherited from calling layer (may sleep).
2293 *
2294 *      RETURNS:
2295 *      0 on success, -errno otherwise.
2296 */
2297int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2298                             const struct ata_port_info * const *ppi,
2299                             struct ata_host **r_host)
2300{
2301        struct ata_host *host;
2302        int rc;
2303
2304        if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2305                return -ENOMEM;
2306
2307        host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2308        if (!host) {
2309                dev_err(&pdev->dev, "failed to allocate ATA host\n");
2310                rc = -ENOMEM;
2311                goto err_out;
2312        }
2313
2314        rc = ata_pci_sff_init_host(host);
2315        if (rc)
2316                goto err_out;
2317
2318        devres_remove_group(&pdev->dev, NULL);
2319        *r_host = host;
2320        return 0;
2321
2322err_out:
2323        devres_release_group(&pdev->dev, NULL);
2324        return rc;
2325}
2326EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2327
2328/**
2329 *      ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2330 *      @host: target SFF ATA host
2331 *      @irq_handler: irq_handler used when requesting IRQ(s)
2332 *      @sht: scsi_host_template to use when registering the host
2333 *
2334 *      This is the counterpart of ata_host_activate() for SFF ATA
2335 *      hosts.  This separate helper is necessary because SFF hosts
2336 *      use two separate interrupts in legacy mode.
2337 *
2338 *      LOCKING:
2339 *      Inherited from calling layer (may sleep).
2340 *
2341 *      RETURNS:
2342 *      0 on success, -errno otherwise.
2343 */
2344int ata_pci_sff_activate_host(struct ata_host *host,
2345                              irq_handler_t irq_handler,
2346                              struct scsi_host_template *sht)
2347{
2348        struct device *dev = host->dev;
2349        struct pci_dev *pdev = to_pci_dev(dev);
2350        const char *drv_name = dev_driver_string(host->dev);
2351        int legacy_mode = 0, rc;
2352
2353        rc = ata_host_start(host);
2354        if (rc)
2355                return rc;
2356
2357        if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2358                u8 tmp8, mask = 0;
2359
2360                /*
2361                 * ATA spec says we should use legacy mode when one
2362                 * port is in legacy mode, but disabled ports on some
2363                 * PCI hosts appear as fixed legacy ports, e.g SB600/700
2364                 * on which the secondary port is not wired, so
2365                 * ignore ports that are marked as 'dummy' during
2366                 * this check
2367                 */
2368                pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2369                if (!ata_port_is_dummy(host->ports[0]))
2370                        mask |= (1 << 0);
2371                if (!ata_port_is_dummy(host->ports[1]))
2372                        mask |= (1 << 2);
2373                if ((tmp8 & mask) != mask)
2374                        legacy_mode = 1;
2375        }
2376
2377        if (!devres_open_group(dev, NULL, GFP_KERNEL))
2378                return -ENOMEM;
2379
2380        if (!legacy_mode && pdev->irq) {
2381                int i;
2382
2383                rc = devm_request_irq(dev, pdev->irq, irq_handler,
2384                                      IRQF_SHARED, drv_name, host);
2385                if (rc)
2386                        goto out;
2387
2388                for (i = 0; i < 2; i++) {
2389                        if (ata_port_is_dummy(host->ports[i]))
2390                                continue;
2391                        ata_port_desc(host->ports[i], "irq %d", pdev->irq);
2392                }
2393        } else if (legacy_mode) {
2394                if (!ata_port_is_dummy(host->ports[0])) {
2395                        rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2396                                              irq_handler, IRQF_SHARED,
2397                                              drv_name, host);
2398                        if (rc)
2399                                goto out;
2400
2401                        ata_port_desc(host->ports[0], "irq %d",
2402                                      ATA_PRIMARY_IRQ(pdev));
2403                }
2404
2405                if (!ata_port_is_dummy(host->ports[1])) {
2406                        rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2407                                              irq_handler, IRQF_SHARED,
2408                                              drv_name, host);
2409                        if (rc)
2410                                goto out;
2411
2412                        ata_port_desc(host->ports[1], "irq %d",
2413                                      ATA_SECONDARY_IRQ(pdev));
2414                }
2415        }
2416
2417        rc = ata_host_register(host, sht);
2418out:
2419        if (rc == 0)
2420                devres_remove_group(dev, NULL);
2421        else
2422                devres_release_group(dev, NULL);
2423
2424        return rc;
2425}
2426EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2427
2428static const struct ata_port_info *ata_sff_find_valid_pi(
2429                                        const struct ata_port_info * const *ppi)
2430{
2431        int i;
2432
2433        /* look up the first valid port_info */
2434        for (i = 0; i < 2 && ppi[i]; i++)
2435                if (ppi[i]->port_ops != &ata_dummy_port_ops)
2436                        return ppi[i];
2437
2438        return NULL;
2439}
2440
2441static int ata_pci_init_one(struct pci_dev *pdev,
2442                const struct ata_port_info * const *ppi,
2443                struct scsi_host_template *sht, void *host_priv,
2444                int hflags, bool bmdma)
2445{
2446        struct device *dev = &pdev->dev;
2447        const struct ata_port_info *pi;
2448        struct ata_host *host = NULL;
2449        int rc;
2450
2451        DPRINTK("ENTER\n");
2452
2453        pi = ata_sff_find_valid_pi(ppi);
2454        if (!pi) {
2455                dev_err(&pdev->dev, "no valid port_info specified\n");
2456                return -EINVAL;
2457        }
2458
2459        if (!devres_open_group(dev, NULL, GFP_KERNEL))
2460                return -ENOMEM;
2461
2462        rc = pcim_enable_device(pdev);
2463        if (rc)
2464                goto out;
2465
2466#ifdef CONFIG_ATA_BMDMA
2467        if (bmdma)
2468                /* prepare and activate BMDMA host */
2469                rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2470        else
2471#endif
2472                /* prepare and activate SFF host */
2473                rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2474        if (rc)
2475                goto out;
2476        host->private_data = host_priv;
2477        host->flags |= hflags;
2478
2479#ifdef CONFIG_ATA_BMDMA
2480        if (bmdma) {
2481                pci_set_master(pdev);
2482                rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2483        } else
2484#endif
2485                rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2486out:
2487        if (rc == 0)
2488                devres_remove_group(&pdev->dev, NULL);
2489        else
2490                devres_release_group(&pdev->dev, NULL);
2491
2492        return rc;
2493}
2494
2495/**
2496 *      ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
2497 *      @pdev: Controller to be initialized
2498 *      @ppi: array of port_info, must be enough for two ports
2499 *      @sht: scsi_host_template to use when registering the host
2500 *      @host_priv: host private_data
2501 *      @hflag: host flags
2502 *
2503 *      This is a helper function which can be called from a driver's
2504 *      xxx_init_one() probe function if the hardware uses traditional
2505 *      IDE taskfile registers and is PIO only.
2506 *
2507 *      ASSUMPTION:
2508 *      Nobody makes a single channel controller that appears solely as
2509 *      the secondary legacy port on PCI.
2510 *
2511 *      LOCKING:
2512 *      Inherited from PCI layer (may sleep).
2513 *
2514 *      RETURNS:
2515 *      Zero on success, negative on errno-based value on error.
2516 */
2517int ata_pci_sff_init_one(struct pci_dev *pdev,
2518                 const struct ata_port_info * const *ppi,
2519                 struct scsi_host_template *sht, void *host_priv, int hflag)
2520{
2521        return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
2522}
2523EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2524
2525#endif /* CONFIG_PCI */
2526
2527/*
2528 *      BMDMA support
2529 */
2530
2531#ifdef CONFIG_ATA_BMDMA
2532
2533const struct ata_port_operations ata_bmdma_port_ops = {
2534        .inherits               = &ata_sff_port_ops,
2535
2536        .error_handler          = ata_bmdma_error_handler,
2537        .post_internal_cmd      = ata_bmdma_post_internal_cmd,
2538
2539        .qc_prep                = ata_bmdma_qc_prep,
2540        .qc_issue               = ata_bmdma_qc_issue,
2541
2542        .sff_irq_clear          = ata_bmdma_irq_clear,
2543        .bmdma_setup            = ata_bmdma_setup,
2544        .bmdma_start            = ata_bmdma_start,
2545        .bmdma_stop             = ata_bmdma_stop,
2546        .bmdma_status           = ata_bmdma_status,
2547
2548        .port_start             = ata_bmdma_port_start,
2549};
2550EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2551
2552const struct ata_port_operations ata_bmdma32_port_ops = {
2553        .inherits               = &ata_bmdma_port_ops,
2554
2555        .sff_data_xfer          = ata_sff_data_xfer32,
2556        .port_start             = ata_bmdma_port_start32,
2557};
2558EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2559
2560/**
2561 *      ata_bmdma_fill_sg - Fill PCI IDE PRD table
2562 *      @qc: Metadata associated with taskfile to be transferred
2563 *
2564 *      Fill PCI IDE PRD (scatter-gather) table with segments
2565 *      associated with the current disk command.
2566 *
2567 *      LOCKING:
2568 *      spin_lock_irqsave(host lock)
2569 *
2570 */
2571static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2572{
2573        struct ata_port *ap = qc->ap;
2574        struct ata_bmdma_prd *prd = ap->bmdma_prd;
2575        struct scatterlist *sg;
2576        unsigned int si, pi;
2577
2578        pi = 0;
2579        for_each_sg(qc->sg, sg, qc->n_elem, si) {
2580                u32 addr, offset;
2581                u32 sg_len, len;
2582
2583                /* determine if physical DMA addr spans 64K boundary.
2584                 * Note h/w doesn't support 64-bit, so we unconditionally
2585                 * truncate dma_addr_t to u32.
2586                 */
2587                addr = (u32) sg_dma_address(sg);
2588                sg_len = sg_dma_len(sg);
2589
2590                while (sg_len) {
2591                        offset = addr & 0xffff;
2592                        len = sg_len;
2593                        if ((offset + sg_len) > 0x10000)
2594                                len = 0x10000 - offset;
2595
2596                        prd[pi].addr = cpu_to_le32(addr);
2597                        prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2598                        VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2599
2600                        pi++;
2601                        sg_len -= len;
2602                        addr += len;
2603                }
2604        }
2605
2606        prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2607}
2608
2609/**
2610 *      ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2611 *      @qc: Metadata associated with taskfile to be transferred
2612 *
2613 *      Fill PCI IDE PRD (scatter-gather) table with segments
2614 *      associated with the current disk command. Perform the fill
2615 *      so that we avoid writing any length 64K records for
2616 *      controllers that don't follow the spec.
2617 *
2618 *      LOCKING:
2619 *      spin_lock_irqsave(host lock)
2620 *
2621 */
2622static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2623{
2624        struct ata_port *ap = qc->ap;
2625        struct ata_bmdma_prd *prd = ap->bmdma_prd;
2626        struct scatterlist *sg;
2627        unsigned int si, pi;
2628
2629        pi = 0;
2630        for_each_sg(qc->sg, sg, qc->n_elem, si) {
2631                u32 addr, offset;
2632                u32 sg_len, len, blen;
2633
2634                /* determine if physical DMA addr spans 64K boundary.
2635                 * Note h/w doesn't support 64-bit, so we unconditionally
2636                 * truncate dma_addr_t to u32.
2637                 */
2638                addr = (u32) sg_dma_address(sg);
2639                sg_len = sg_dma_len(sg);
2640
2641                while (sg_len) {
2642                        offset = addr & 0xffff;
2643                        len = sg_len;
2644                        if ((offset + sg_len) > 0x10000)
2645                                len = 0x10000 - offset;
2646
2647                        blen = len & 0xffff;
2648                        prd[pi].addr = cpu_to_le32(addr);
2649                        if (blen == 0) {
2650                                /* Some PATA chipsets like the CS5530 can't
2651                                   cope with 0x0000 meaning 64K as the spec
2652                                   says */
2653                                prd[pi].flags_len = cpu_to_le32(0x8000);
2654                                blen = 0x8000;
2655                                prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2656                        }
2657                        prd[pi].flags_len = cpu_to_le32(blen);
2658                        VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2659
2660                        pi++;
2661                        sg_len -= len;
2662                        addr += len;
2663                }
2664        }
2665
2666        prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2667}
2668
2669/**
2670 *      ata_bmdma_qc_prep - Prepare taskfile for submission
2671 *      @qc: Metadata associated with taskfile to be prepared
2672 *
2673 *      Prepare ATA taskfile for submission.
2674 *
2675 *      LOCKING:
2676 *      spin_lock_irqsave(host lock)
2677 */
2678enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2679{
2680        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2681                return AC_ERR_OK;
2682
2683        ata_bmdma_fill_sg(qc);
2684
2685        return AC_ERR_OK;
2686}
2687EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2688
2689/**
2690 *      ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2691 *      @qc: Metadata associated with taskfile to be prepared
2692 *
2693 *      Prepare ATA taskfile for submission.
2694 *
2695 *      LOCKING:
2696 *      spin_lock_irqsave(host lock)
2697 */
2698enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2699{
2700        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2701                return AC_ERR_OK;
2702
2703        ata_bmdma_fill_sg_dumb(qc);
2704
2705        return AC_ERR_OK;
2706}
2707EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2708
2709/**
2710 *      ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
2711 *      @qc: command to issue to device
2712 *
2713 *      This function issues a PIO, NODATA or DMA command to a
2714 *      SFF/BMDMA controller.  PIO and NODATA are handled by
2715 *      ata_sff_qc_issue().
2716 *
2717 *      LOCKING:
2718 *      spin_lock_irqsave(host lock)
2719 *
2720 *      RETURNS:
2721 *      Zero on success, AC_ERR_* mask on failure
2722 */
2723unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2724{
2725        struct ata_port *ap = qc->ap;
2726        struct ata_link *link = qc->dev->link;
2727
2728        /* defer PIO handling to sff_qc_issue */
2729        if (!ata_is_dma(qc->tf.protocol))
2730                return ata_sff_qc_issue(qc);
2731
2732        /* select the device */
2733        ata_dev_select(ap, qc->dev->devno, 1, 0);
2734
2735        /* start the command */
2736        switch (qc->tf.protocol) {
2737        case ATA_PROT_DMA:
2738                WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2739
2740                ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
2741                ap->ops->bmdma_setup(qc);           /* set up bmdma */
2742                ap->ops->bmdma_start(qc);           /* initiate bmdma */
2743                ap->hsm_task_state = HSM_ST_LAST;
2744                break;
2745
2746        case ATAPI_PROT_DMA:
2747                WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2748
2749                ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
2750                ap->ops->bmdma_setup(qc);           /* set up bmdma */
2751                ap->hsm_task_state = HSM_ST_FIRST;
2752
2753                /* send cdb by polling if no cdb interrupt */
2754                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2755                        ata_sff_queue_pio_task(link, 0);
2756                break;
2757
2758        default:
2759                WARN_ON(1);
2760                return AC_ERR_SYSTEM;
2761        }
2762
2763        return 0;
2764}
2765EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2766
2767/**
2768 *      ata_bmdma_port_intr - Handle BMDMA port interrupt
2769 *      @ap: Port on which interrupt arrived (possibly...)
2770 *      @qc: Taskfile currently active in engine
2771 *
2772 *      Handle port interrupt for given queued command.
2773 *
2774 *      LOCKING:
2775 *      spin_lock_irqsave(host lock)
2776 *
2777 *      RETURNS:
2778 *      One if interrupt was handled, zero if not (shared irq).
2779 */
2780unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2781{
2782        struct ata_eh_info *ehi = &ap->link.eh_info;
2783        u8 host_stat = 0;
2784        bool bmdma_stopped = false;
2785        unsigned int handled;
2786
2787        if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2788                /* check status of DMA engine */
2789                host_stat = ap->ops->bmdma_status(ap);
2790                VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
2791
2792                /* if it's not our irq... */
2793                if (!(host_stat & ATA_DMA_INTR))
2794                        return ata_sff_idle_irq(ap);
2795
2796                /* before we do anything else, clear DMA-Start bit */
2797                ap->ops->bmdma_stop(qc);
2798                bmdma_stopped = true;
2799
2800                if (unlikely(host_stat & ATA_DMA_ERR)) {
2801                        /* error when transferring data to/from memory */
2802                        qc->err_mask |= AC_ERR_HOST_BUS;
2803                        ap->hsm_task_state = HSM_ST_ERR;
2804                }
2805        }
2806
2807        handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2808
2809        if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2810                ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2811
2812        return handled;
2813}
2814EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2815
2816/**
2817 *      ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
2818 *      @irq: irq line (unused)
2819 *      @dev_instance: pointer to our ata_host information structure
2820 *
2821 *      Default interrupt handler for PCI IDE devices.  Calls
2822 *      ata_bmdma_port_intr() for each port that is not disabled.
2823 *
2824 *      LOCKING:
2825 *      Obtains host lock during operation.
2826 *
2827 *      RETURNS:
2828 *      IRQ_NONE or IRQ_HANDLED.
2829 */
2830irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2831{
2832        return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2833}
2834EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2835
2836/**
2837 *      ata_bmdma_error_handler - Stock error handler for BMDMA controller
2838 *      @ap: port to handle error for
2839 *
2840 *      Stock error handler for BMDMA controller.  It can handle both
2841 *      PATA and SATA controllers.  Most BMDMA controllers should be
2842 *      able to use this EH as-is or with some added handling before
2843 *      and after.
2844 *
2845 *      LOCKING:
2846 *      Kernel thread context (may sleep)
2847 */
2848void ata_bmdma_error_handler(struct ata_port *ap)
2849{
2850        struct ata_queued_cmd *qc;
2851        unsigned long flags;
2852        bool thaw = false;
2853
2854        qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2855        if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2856                qc = NULL;
2857
2858        /* reset PIO HSM and stop DMA engine */
2859        spin_lock_irqsave(ap->lock, flags);
2860
2861        if (qc && ata_is_dma(qc->tf.protocol)) {
2862                u8 host_stat;
2863
2864                host_stat = ap->ops->bmdma_status(ap);
2865
2866                /* BMDMA controllers indicate host bus error by
2867                 * setting DMA_ERR bit and timing out.  As it wasn't
2868                 * really a timeout event, adjust error mask and
2869                 * cancel frozen state.
2870                 */
2871                if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2872                        qc->err_mask = AC_ERR_HOST_BUS;
2873                        thaw = true;
2874                }
2875
2876                ap->ops->bmdma_stop(qc);
2877
2878                /* if we're gonna thaw, make sure IRQ is clear */
2879                if (thaw) {
2880                        ap->ops->sff_check_status(ap);
2881                        if (ap->ops->sff_irq_clear)
2882                                ap->ops->sff_irq_clear(ap);
2883                }
2884        }
2885
2886        spin_unlock_irqrestore(ap->lock, flags);
2887
2888        if (thaw)
2889                ata_eh_thaw_port(ap);
2890
2891        ata_sff_error_handler(ap);
2892}
2893EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2894
2895/**
2896 *      ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
2897 *      @qc: internal command to clean up
2898 *
2899 *      LOCKING:
2900 *      Kernel thread context (may sleep)
2901 */
2902void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2903{
2904        struct ata_port *ap = qc->ap;
2905        unsigned long flags;
2906
2907        if (ata_is_dma(qc->tf.protocol)) {
2908                spin_lock_irqsave(ap->lock, flags);
2909                ap->ops->bmdma_stop(qc);
2910                spin_unlock_irqrestore(ap->lock, flags);
2911        }
2912}
2913EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2914
2915/**
2916 *      ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
2917 *      @ap: Port associated with this ATA transaction.
2918 *
2919 *      Clear interrupt and error flags in DMA status register.
2920 *
2921 *      May be used as the irq_clear() entry in ata_port_operations.
2922 *
2923 *      LOCKING:
2924 *      spin_lock_irqsave(host lock)
2925 */
2926void ata_bmdma_irq_clear(struct ata_port *ap)
2927{
2928        void __iomem *mmio = ap->ioaddr.bmdma_addr;
2929
2930        if (!mmio)
2931                return;
2932
2933        iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
2934}
2935EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
2936
2937/**
2938 *      ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2939 *      @qc: Info associated with this ATA transaction.
2940 *
2941 *      LOCKING:
2942 *      spin_lock_irqsave(host lock)
2943 */
2944void ata_bmdma_setup(struct ata_queued_cmd *qc)
2945{
2946        struct ata_port *ap = qc->ap;
2947        unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2948        u8 dmactl;
2949
2950        /* load PRD table addr. */
2951        mb();   /* make sure PRD table writes are visible to controller */
2952        iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2953
2954        /* specify data direction, triple-check start bit is clear */
2955        dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2956        dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2957        if (!rw)
2958                dmactl |= ATA_DMA_WR;
2959        iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2960
2961        /* issue r/w command */
2962        ap->ops->sff_exec_command(ap, &qc->tf);
2963}
2964EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2965
2966/**
2967 *      ata_bmdma_start - Start a PCI IDE BMDMA transaction
2968 *      @qc: Info associated with this ATA transaction.
2969 *
2970 *      LOCKING:
2971 *      spin_lock_irqsave(host lock)
2972 */
2973void ata_bmdma_start(struct ata_queued_cmd *qc)
2974{
2975        struct ata_port *ap = qc->ap;
2976        u8 dmactl;
2977
2978        /* start host DMA transaction */
2979        dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2980        iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2981
2982        /* Strictly, one may wish to issue an ioread8() here, to
2983         * flush the mmio write.  However, control also passes
2984         * to the hardware at this point, and it will interrupt
2985         * us when we are to resume control.  So, in effect,
2986         * we don't care when the mmio write flushes.
2987         * Further, a read of the DMA status register _immediately_
2988         * following the write may not be what certain flaky hardware
2989         * is expected, so I think it is best to not add a readb()
2990         * without first all the MMIO ATA cards/mobos.
2991         * Or maybe I'm just being paranoid.
2992         *
2993         * FIXME: The posting of this write means I/O starts are
2994         * unnecessarily delayed for MMIO
2995         */
2996}
2997EXPORT_SYMBOL_GPL(ata_bmdma_start);
2998
2999/**
3000 *      ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3001 *      @qc: Command we are ending DMA for
3002 *
3003 *      Clears the ATA_DMA_START flag in the dma control register
3004 *
3005 *      May be used as the bmdma_stop() entry in ata_port_operations.
3006 *
3007 *      LOCKING:
3008 *      spin_lock_irqsave(host lock)
3009 */
3010void ata_bmdma_stop(struct ata_queued_cmd *qc)
3011{
3012        struct ata_port *ap = qc->ap;
3013        void __iomem *mmio = ap->ioaddr.bmdma_addr;
3014
3015        /* clear start/stop bit */
3016        iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3017                 mmio + ATA_DMA_CMD);
3018
3019        /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3020        ata_sff_dma_pause(ap);
3021}
3022EXPORT_SYMBOL_GPL(ata_bmdma_stop);
3023
3024/**
3025 *      ata_bmdma_status - Read PCI IDE BMDMA status
3026 *      @ap: Port associated with this ATA transaction.
3027 *
3028 *      Read and return BMDMA status register.
3029 *
3030 *      May be used as the bmdma_status() entry in ata_port_operations.
3031 *
3032 *      LOCKING:
3033 *      spin_lock_irqsave(host lock)
3034 */
3035u8 ata_bmdma_status(struct ata_port *ap)
3036{
3037        return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3038}
3039EXPORT_SYMBOL_GPL(ata_bmdma_status);
3040
3041
3042/**
3043 *      ata_bmdma_port_start - Set port up for bmdma.
3044 *      @ap: Port to initialize
3045 *
3046 *      Called just after data structures for each port are
3047 *      initialized.  Allocates space for PRD table.
3048 *
3049 *      May be used as the port_start() entry in ata_port_operations.
3050 *
3051 *      LOCKING:
3052 *      Inherited from caller.
3053 */
3054int ata_bmdma_port_start(struct ata_port *ap)
3055{
3056        if (ap->mwdma_mask || ap->udma_mask) {
3057                ap->bmdma_prd =
3058                        dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
3059                                            &ap->bmdma_prd_dma, GFP_KERNEL);
3060                if (!ap->bmdma_prd)
3061                        return -ENOMEM;
3062        }
3063
3064        return 0;
3065}
3066EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
3067
3068/**
3069 *      ata_bmdma_port_start32 - Set port up for dma.
3070 *      @ap: Port to initialize
3071 *
3072 *      Called just after data structures for each port are
3073 *      initialized.  Enables 32bit PIO and allocates space for PRD
3074 *      table.
3075 *
3076 *      May be used as the port_start() entry in ata_port_operations for
3077 *      devices that are capable of 32bit PIO.
3078 *
3079 *      LOCKING:
3080 *      Inherited from caller.
3081 */
3082int ata_bmdma_port_start32(struct ata_port *ap)
3083{
3084        ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3085        return ata_bmdma_port_start(ap);
3086}
3087EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3088
3089#ifdef CONFIG_PCI
3090
3091/**
3092 *      ata_pci_bmdma_clear_simplex -   attempt to kick device out of simplex
3093 *      @pdev: PCI device
3094 *
3095 *      Some PCI ATA devices report simplex mode but in fact can be told to
3096 *      enter non simplex mode. This implements the necessary logic to
3097 *      perform the task on such devices. Calling it on other devices will
3098 *      have -undefined- behaviour.
3099 */
3100int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3101{
3102        unsigned long bmdma = pci_resource_start(pdev, 4);
3103        u8 simplex;
3104
3105        if (bmdma == 0)
3106                return -ENOENT;
3107
3108        simplex = inb(bmdma + 0x02);
3109        outb(simplex & 0x60, bmdma + 0x02);
3110        simplex = inb(bmdma + 0x02);
3111        if (simplex & 0x80)
3112                return -EOPNOTSUPP;
3113        return 0;
3114}
3115EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3116
3117static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3118{
3119        int i;
3120
3121        dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
3122
3123        for (i = 0; i < 2; i++) {
3124                host->ports[i]->mwdma_mask = 0;
3125                host->ports[i]->udma_mask = 0;
3126        }
3127}
3128
3129/**
3130 *      ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
3131 *      @host: target ATA host
3132 *
3133 *      Acquire PCI BMDMA resources and initialize @host accordingly.
3134 *
3135 *      LOCKING:
3136 *      Inherited from calling layer (may sleep).
3137 */
3138void ata_pci_bmdma_init(struct ata_host *host)
3139{
3140        struct device *gdev = host->dev;
3141        struct pci_dev *pdev = to_pci_dev(gdev);
3142        int i, rc;
3143
3144        /* No BAR4 allocation: No DMA */
3145        if (pci_resource_start(pdev, 4) == 0) {
3146                ata_bmdma_nodma(host, "BAR4 is zero");
3147                return;
3148        }
3149
3150        /*
3151         * Some controllers require BMDMA region to be initialized
3152         * even if DMA is not in use to clear IRQ status via
3153         * ->sff_irq_clear method.  Try to initialize bmdma_addr
3154         * regardless of dma masks.
3155         */
3156        rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
3157        if (rc)
3158                ata_bmdma_nodma(host, "failed to set dma mask");
3159
3160        /* request and iomap DMA region */
3161        rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3162        if (rc) {
3163                ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3164                return;
3165        }
3166        host->iomap = pcim_iomap_table(pdev);
3167
3168        for (i = 0; i < 2; i++) {
3169                struct ata_port *ap = host->ports[i];
3170                void __iomem *bmdma = host->iomap[4] + 8 * i;
3171
3172                if (ata_port_is_dummy(ap))
3173                        continue;
3174
3175                ap->ioaddr.bmdma_addr = bmdma;
3176                if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3177                    (ioread8(bmdma + 2) & 0x80))
3178                        host->flags |= ATA_HOST_SIMPLEX;
3179
3180                ata_port_desc(ap, "bmdma 0x%llx",
3181                    (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3182        }
3183}
3184EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3185
3186/**
3187 *      ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
3188 *      @pdev: target PCI device
3189 *      @ppi: array of port_info, must be enough for two ports
3190 *      @r_host: out argument for the initialized ATA host
3191 *
3192 *      Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
3193 *      resources and initialize it accordingly in one go.
3194 *
3195 *      LOCKING:
3196 *      Inherited from calling layer (may sleep).
3197 *
3198 *      RETURNS:
3199 *      0 on success, -errno otherwise.
3200 */
3201int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3202                               const struct ata_port_info * const * ppi,
3203                               struct ata_host **r_host)
3204{
3205        int rc;
3206
3207        rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3208        if (rc)
3209                return rc;
3210
3211        ata_pci_bmdma_init(*r_host);
3212        return 0;
3213}
3214EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3215
3216/**
3217 *      ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
3218 *      @pdev: Controller to be initialized
3219 *      @ppi: array of port_info, must be enough for two ports
3220 *      @sht: scsi_host_template to use when registering the host
3221 *      @host_priv: host private_data
3222 *      @hflags: host flags
3223 *
3224 *      This function is similar to ata_pci_sff_init_one() but also
3225 *      takes care of BMDMA initialization.
3226 *
3227 *      LOCKING:
3228 *      Inherited from PCI layer (may sleep).
3229 *
3230 *      RETURNS:
3231 *      Zero on success, negative on errno-based value on error.
3232 */
3233int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3234                           const struct ata_port_info * const * ppi,
3235                           struct scsi_host_template *sht, void *host_priv,
3236                           int hflags)
3237{
3238        return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
3239}
3240EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3241
3242#endif /* CONFIG_PCI */
3243#endif /* CONFIG_ATA_BMDMA */
3244
3245/**
3246 *      ata_sff_port_init - Initialize SFF/BMDMA ATA port
3247 *      @ap: Port to initialize
3248 *
3249 *      Called on port allocation to initialize SFF/BMDMA specific
3250 *      fields.
3251 *
3252 *      LOCKING:
3253 *      None.
3254 */
3255void ata_sff_port_init(struct ata_port *ap)
3256{
3257        INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3258        ap->ctl = ATA_DEVCTL_OBS;
3259        ap->last_ctl = 0xFF;
3260}
3261
3262int __init ata_sff_init(void)
3263{
3264        ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3265        if (!ata_sff_wq)
3266                return -ENOMEM;
3267
3268        return 0;
3269}
3270
3271void ata_sff_exit(void)
3272{
3273        destroy_workqueue(ata_sff_wq);
3274}
3275