linux/drivers/ata/libata-sff.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  libata-sff.c - helper library for PCI IDE BMDMA
   4 *
   5 *  Copyright 2003-2006 Red Hat, Inc.  All rights reserved.
   6 *  Copyright 2003-2006 Jeff Garzik
   7 *
   8 *  libata documentation is available via 'make {ps|pdf}docs',
   9 *  as Documentation/driver-api/libata.rst
  10 *
  11 *  Hardware documentation available from http://www.t13.org/ and
  12 *  http://www.sata-io.org/
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/gfp.h>
  17#include <linux/pci.h>
  18#include <linux/module.h>
  19#include <linux/libata.h>
  20#include <linux/highmem.h>
  21
  22#include "libata.h"
  23
  24static struct workqueue_struct *ata_sff_wq;
  25
  26const struct ata_port_operations ata_sff_port_ops = {
  27        .inherits               = &ata_base_port_ops,
  28
  29        .qc_prep                = ata_noop_qc_prep,
  30        .qc_issue               = ata_sff_qc_issue,
  31        .qc_fill_rtf            = ata_sff_qc_fill_rtf,
  32
  33        .freeze                 = ata_sff_freeze,
  34        .thaw                   = ata_sff_thaw,
  35        .prereset               = ata_sff_prereset,
  36        .softreset              = ata_sff_softreset,
  37        .hardreset              = sata_sff_hardreset,
  38        .postreset              = ata_sff_postreset,
  39        .error_handler          = ata_sff_error_handler,
  40
  41        .sff_dev_select         = ata_sff_dev_select,
  42        .sff_check_status       = ata_sff_check_status,
  43        .sff_tf_load            = ata_sff_tf_load,
  44        .sff_tf_read            = ata_sff_tf_read,
  45        .sff_exec_command       = ata_sff_exec_command,
  46        .sff_data_xfer          = ata_sff_data_xfer,
  47        .sff_drain_fifo         = ata_sff_drain_fifo,
  48
  49        .lost_interrupt         = ata_sff_lost_interrupt,
  50};
  51EXPORT_SYMBOL_GPL(ata_sff_port_ops);
  52
  53/**
  54 *      ata_sff_check_status - Read device status reg & clear interrupt
  55 *      @ap: port where the device is
  56 *
  57 *      Reads ATA taskfile status register for currently-selected device
  58 *      and return its value. This also clears pending interrupts
  59 *      from this device
  60 *
  61 *      LOCKING:
  62 *      Inherited from caller.
  63 */
  64u8 ata_sff_check_status(struct ata_port *ap)
  65{
  66        return ioread8(ap->ioaddr.status_addr);
  67}
  68EXPORT_SYMBOL_GPL(ata_sff_check_status);
  69
  70/**
  71 *      ata_sff_altstatus - Read device alternate status reg
  72 *      @ap: port where the device is
  73 *
  74 *      Reads ATA taskfile alternate status register for
  75 *      currently-selected device and return its value.
  76 *
  77 *      Note: may NOT be used as the check_altstatus() entry in
  78 *      ata_port_operations.
  79 *
  80 *      LOCKING:
  81 *      Inherited from caller.
  82 */
  83static u8 ata_sff_altstatus(struct ata_port *ap)
  84{
  85        if (ap->ops->sff_check_altstatus)
  86                return ap->ops->sff_check_altstatus(ap);
  87
  88        return ioread8(ap->ioaddr.altstatus_addr);
  89}
  90
  91/**
  92 *      ata_sff_irq_status - Check if the device is busy
  93 *      @ap: port where the device is
  94 *
  95 *      Determine if the port is currently busy. Uses altstatus
  96 *      if available in order to avoid clearing shared IRQ status
  97 *      when finding an IRQ source. Non ctl capable devices don't
  98 *      share interrupt lines fortunately for us.
  99 *
 100 *      LOCKING:
 101 *      Inherited from caller.
 102 */
 103static u8 ata_sff_irq_status(struct ata_port *ap)
 104{
 105        u8 status;
 106
 107        if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
 108                status = ata_sff_altstatus(ap);
 109                /* Not us: We are busy */
 110                if (status & ATA_BUSY)
 111                        return status;
 112        }
 113        /* Clear INTRQ latch */
 114        status = ap->ops->sff_check_status(ap);
 115        return status;
 116}
 117
 118/**
 119 *      ata_sff_sync - Flush writes
 120 *      @ap: Port to wait for.
 121 *
 122 *      CAUTION:
 123 *      If we have an mmio device with no ctl and no altstatus
 124 *      method this will fail. No such devices are known to exist.
 125 *
 126 *      LOCKING:
 127 *      Inherited from caller.
 128 */
 129
 130static void ata_sff_sync(struct ata_port *ap)
 131{
 132        if (ap->ops->sff_check_altstatus)
 133                ap->ops->sff_check_altstatus(ap);
 134        else if (ap->ioaddr.altstatus_addr)
 135                ioread8(ap->ioaddr.altstatus_addr);
 136}
 137
 138/**
 139 *      ata_sff_pause           -       Flush writes and wait 400nS
 140 *      @ap: Port to pause for.
 141 *
 142 *      CAUTION:
 143 *      If we have an mmio device with no ctl and no altstatus
 144 *      method this will fail. No such devices are known to exist.
 145 *
 146 *      LOCKING:
 147 *      Inherited from caller.
 148 */
 149
 150void ata_sff_pause(struct ata_port *ap)
 151{
 152        ata_sff_sync(ap);
 153        ndelay(400);
 154}
 155EXPORT_SYMBOL_GPL(ata_sff_pause);
 156
 157/**
 158 *      ata_sff_dma_pause       -       Pause before commencing DMA
 159 *      @ap: Port to pause for.
 160 *
 161 *      Perform I/O fencing and ensure sufficient cycle delays occur
 162 *      for the HDMA1:0 transition
 163 */
 164
 165void ata_sff_dma_pause(struct ata_port *ap)
 166{
 167        if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
 168                /* An altstatus read will cause the needed delay without
 169                   messing up the IRQ status */
 170                ata_sff_altstatus(ap);
 171                return;
 172        }
 173        /* There are no DMA controllers without ctl. BUG here to ensure
 174           we never violate the HDMA1:0 transition timing and risk
 175           corruption. */
 176        BUG();
 177}
 178EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
 179
 180/**
 181 *      ata_sff_busy_sleep - sleep until BSY clears, or timeout
 182 *      @ap: port containing status register to be polled
 183 *      @tmout_pat: impatience timeout in msecs
 184 *      @tmout: overall timeout in msecs
 185 *
 186 *      Sleep until ATA Status register bit BSY clears,
 187 *      or a timeout occurs.
 188 *
 189 *      LOCKING:
 190 *      Kernel thread context (may sleep).
 191 *
 192 *      RETURNS:
 193 *      0 on success, -errno otherwise.
 194 */
 195int ata_sff_busy_sleep(struct ata_port *ap,
 196                       unsigned long tmout_pat, unsigned long tmout)
 197{
 198        unsigned long timer_start, timeout;
 199        u8 status;
 200
 201        status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
 202        timer_start = jiffies;
 203        timeout = ata_deadline(timer_start, tmout_pat);
 204        while (status != 0xff && (status & ATA_BUSY) &&
 205               time_before(jiffies, timeout)) {
 206                ata_msleep(ap, 50);
 207                status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
 208        }
 209
 210        if (status != 0xff && (status & ATA_BUSY))
 211                ata_port_warn(ap,
 212                              "port is slow to respond, please be patient (Status 0x%x)\n",
 213                              status);
 214
 215        timeout = ata_deadline(timer_start, tmout);
 216        while (status != 0xff && (status & ATA_BUSY) &&
 217               time_before(jiffies, timeout)) {
 218                ata_msleep(ap, 50);
 219                status = ap->ops->sff_check_status(ap);
 220        }
 221
 222        if (status == 0xff)
 223                return -ENODEV;
 224
 225        if (status & ATA_BUSY) {
 226                ata_port_err(ap,
 227                             "port failed to respond (%lu secs, Status 0x%x)\n",
 228                             DIV_ROUND_UP(tmout, 1000), status);
 229                return -EBUSY;
 230        }
 231
 232        return 0;
 233}
 234EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
 235
 236static int ata_sff_check_ready(struct ata_link *link)
 237{
 238        u8 status = link->ap->ops->sff_check_status(link->ap);
 239
 240        return ata_check_ready(status);
 241}
 242
 243/**
 244 *      ata_sff_wait_ready - sleep until BSY clears, or timeout
 245 *      @link: SFF link to wait ready status for
 246 *      @deadline: deadline jiffies for the operation
 247 *
 248 *      Sleep until ATA Status register bit BSY clears, or timeout
 249 *      occurs.
 250 *
 251 *      LOCKING:
 252 *      Kernel thread context (may sleep).
 253 *
 254 *      RETURNS:
 255 *      0 on success, -errno otherwise.
 256 */
 257int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
 258{
 259        return ata_wait_ready(link, deadline, ata_sff_check_ready);
 260}
 261EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
 262
 263/**
 264 *      ata_sff_set_devctl - Write device control reg
 265 *      @ap: port where the device is
 266 *      @ctl: value to write
 267 *
 268 *      Writes ATA taskfile device control register.
 269 *
 270 *      Note: may NOT be used as the sff_set_devctl() entry in
 271 *      ata_port_operations.
 272 *
 273 *      LOCKING:
 274 *      Inherited from caller.
 275 */
 276static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
 277{
 278        if (ap->ops->sff_set_devctl)
 279                ap->ops->sff_set_devctl(ap, ctl);
 280        else
 281                iowrite8(ctl, ap->ioaddr.ctl_addr);
 282}
 283
 284/**
 285 *      ata_sff_dev_select - Select device 0/1 on ATA bus
 286 *      @ap: ATA channel to manipulate
 287 *      @device: ATA device (numbered from zero) to select
 288 *
 289 *      Use the method defined in the ATA specification to
 290 *      make either device 0, or device 1, active on the
 291 *      ATA channel.  Works with both PIO and MMIO.
 292 *
 293 *      May be used as the dev_select() entry in ata_port_operations.
 294 *
 295 *      LOCKING:
 296 *      caller.
 297 */
 298void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
 299{
 300        u8 tmp;
 301
 302        if (device == 0)
 303                tmp = ATA_DEVICE_OBS;
 304        else
 305                tmp = ATA_DEVICE_OBS | ATA_DEV1;
 306
 307        iowrite8(tmp, ap->ioaddr.device_addr);
 308        ata_sff_pause(ap);      /* needed; also flushes, for mmio */
 309}
 310EXPORT_SYMBOL_GPL(ata_sff_dev_select);
 311
 312/**
 313 *      ata_dev_select - Select device 0/1 on ATA bus
 314 *      @ap: ATA channel to manipulate
 315 *      @device: ATA device (numbered from zero) to select
 316 *      @wait: non-zero to wait for Status register BSY bit to clear
 317 *      @can_sleep: non-zero if context allows sleeping
 318 *
 319 *      Use the method defined in the ATA specification to
 320 *      make either device 0, or device 1, active on the
 321 *      ATA channel.
 322 *
 323 *      This is a high-level version of ata_sff_dev_select(), which
 324 *      additionally provides the services of inserting the proper
 325 *      pauses and status polling, where needed.
 326 *
 327 *      LOCKING:
 328 *      caller.
 329 */
 330static void ata_dev_select(struct ata_port *ap, unsigned int device,
 331                           unsigned int wait, unsigned int can_sleep)
 332{
 333        if (ata_msg_probe(ap))
 334                ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n",
 335                              device, wait);
 336
 337        if (wait)
 338                ata_wait_idle(ap);
 339
 340        ap->ops->sff_dev_select(ap, device);
 341
 342        if (wait) {
 343                if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
 344                        ata_msleep(ap, 150);
 345                ata_wait_idle(ap);
 346        }
 347}
 348
 349/**
 350 *      ata_sff_irq_on - Enable interrupts on a port.
 351 *      @ap: Port on which interrupts are enabled.
 352 *
 353 *      Enable interrupts on a legacy IDE device using MMIO or PIO,
 354 *      wait for idle, clear any pending interrupts.
 355 *
 356 *      Note: may NOT be used as the sff_irq_on() entry in
 357 *      ata_port_operations.
 358 *
 359 *      LOCKING:
 360 *      Inherited from caller.
 361 */
 362void ata_sff_irq_on(struct ata_port *ap)
 363{
 364        struct ata_ioports *ioaddr = &ap->ioaddr;
 365
 366        if (ap->ops->sff_irq_on) {
 367                ap->ops->sff_irq_on(ap);
 368                return;
 369        }
 370
 371        ap->ctl &= ~ATA_NIEN;
 372        ap->last_ctl = ap->ctl;
 373
 374        if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
 375                ata_sff_set_devctl(ap, ap->ctl);
 376        ata_wait_idle(ap);
 377
 378        if (ap->ops->sff_irq_clear)
 379                ap->ops->sff_irq_clear(ap);
 380}
 381EXPORT_SYMBOL_GPL(ata_sff_irq_on);
 382
 383/**
 384 *      ata_sff_tf_load - send taskfile registers to host controller
 385 *      @ap: Port to which output is sent
 386 *      @tf: ATA taskfile register set
 387 *
 388 *      Outputs ATA taskfile to standard ATA host controller.
 389 *
 390 *      LOCKING:
 391 *      Inherited from caller.
 392 */
 393void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
 394{
 395        struct ata_ioports *ioaddr = &ap->ioaddr;
 396        unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
 397
 398        if (tf->ctl != ap->last_ctl) {
 399                if (ioaddr->ctl_addr)
 400                        iowrite8(tf->ctl, ioaddr->ctl_addr);
 401                ap->last_ctl = tf->ctl;
 402                ata_wait_idle(ap);
 403        }
 404
 405        if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
 406                WARN_ON_ONCE(!ioaddr->ctl_addr);
 407                iowrite8(tf->hob_feature, ioaddr->feature_addr);
 408                iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
 409                iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
 410                iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
 411                iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
 412                VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
 413                        tf->hob_feature,
 414                        tf->hob_nsect,
 415                        tf->hob_lbal,
 416                        tf->hob_lbam,
 417                        tf->hob_lbah);
 418        }
 419
 420        if (is_addr) {
 421                iowrite8(tf->feature, ioaddr->feature_addr);
 422                iowrite8(tf->nsect, ioaddr->nsect_addr);
 423                iowrite8(tf->lbal, ioaddr->lbal_addr);
 424                iowrite8(tf->lbam, ioaddr->lbam_addr);
 425                iowrite8(tf->lbah, ioaddr->lbah_addr);
 426                VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
 427                        tf->feature,
 428                        tf->nsect,
 429                        tf->lbal,
 430                        tf->lbam,
 431                        tf->lbah);
 432        }
 433
 434        if (tf->flags & ATA_TFLAG_DEVICE) {
 435                iowrite8(tf->device, ioaddr->device_addr);
 436                VPRINTK("device 0x%X\n", tf->device);
 437        }
 438
 439        ata_wait_idle(ap);
 440}
 441EXPORT_SYMBOL_GPL(ata_sff_tf_load);
 442
 443/**
 444 *      ata_sff_tf_read - input device's ATA taskfile shadow registers
 445 *      @ap: Port from which input is read
 446 *      @tf: ATA taskfile register set for storing input
 447 *
 448 *      Reads ATA taskfile registers for currently-selected device
 449 *      into @tf. Assumes the device has a fully SFF compliant task file
 450 *      layout and behaviour. If you device does not (eg has a different
 451 *      status method) then you will need to provide a replacement tf_read
 452 *
 453 *      LOCKING:
 454 *      Inherited from caller.
 455 */
 456void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 457{
 458        struct ata_ioports *ioaddr = &ap->ioaddr;
 459
 460        tf->command = ata_sff_check_status(ap);
 461        tf->feature = ioread8(ioaddr->error_addr);
 462        tf->nsect = ioread8(ioaddr->nsect_addr);
 463        tf->lbal = ioread8(ioaddr->lbal_addr);
 464        tf->lbam = ioread8(ioaddr->lbam_addr);
 465        tf->lbah = ioread8(ioaddr->lbah_addr);
 466        tf->device = ioread8(ioaddr->device_addr);
 467
 468        if (tf->flags & ATA_TFLAG_LBA48) {
 469                if (likely(ioaddr->ctl_addr)) {
 470                        iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
 471                        tf->hob_feature = ioread8(ioaddr->error_addr);
 472                        tf->hob_nsect = ioread8(ioaddr->nsect_addr);
 473                        tf->hob_lbal = ioread8(ioaddr->lbal_addr);
 474                        tf->hob_lbam = ioread8(ioaddr->lbam_addr);
 475                        tf->hob_lbah = ioread8(ioaddr->lbah_addr);
 476                        iowrite8(tf->ctl, ioaddr->ctl_addr);
 477                        ap->last_ctl = tf->ctl;
 478                } else
 479                        WARN_ON_ONCE(1);
 480        }
 481}
 482EXPORT_SYMBOL_GPL(ata_sff_tf_read);
 483
 484/**
 485 *      ata_sff_exec_command - issue ATA command to host controller
 486 *      @ap: port to which command is being issued
 487 *      @tf: ATA taskfile register set
 488 *
 489 *      Issues ATA command, with proper synchronization with interrupt
 490 *      handler / other threads.
 491 *
 492 *      LOCKING:
 493 *      spin_lock_irqsave(host lock)
 494 */
 495void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
 496{
 497        DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
 498
 499        iowrite8(tf->command, ap->ioaddr.command_addr);
 500        ata_sff_pause(ap);
 501}
 502EXPORT_SYMBOL_GPL(ata_sff_exec_command);
 503
 504/**
 505 *      ata_tf_to_host - issue ATA taskfile to host controller
 506 *      @ap: port to which command is being issued
 507 *      @tf: ATA taskfile register set
 508 *
 509 *      Issues ATA taskfile register set to ATA host controller,
 510 *      with proper synchronization with interrupt handler and
 511 *      other threads.
 512 *
 513 *      LOCKING:
 514 *      spin_lock_irqsave(host lock)
 515 */
 516static inline void ata_tf_to_host(struct ata_port *ap,
 517                                  const struct ata_taskfile *tf)
 518{
 519        ap->ops->sff_tf_load(ap, tf);
 520        ap->ops->sff_exec_command(ap, tf);
 521}
 522
 523/**
 524 *      ata_sff_data_xfer - Transfer data by PIO
 525 *      @qc: queued command
 526 *      @buf: data buffer
 527 *      @buflen: buffer length
 528 *      @rw: read/write
 529 *
 530 *      Transfer data from/to the device data register by PIO.
 531 *
 532 *      LOCKING:
 533 *      Inherited from caller.
 534 *
 535 *      RETURNS:
 536 *      Bytes consumed.
 537 */
 538unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf,
 539                               unsigned int buflen, int rw)
 540{
 541        struct ata_port *ap = qc->dev->link->ap;
 542        void __iomem *data_addr = ap->ioaddr.data_addr;
 543        unsigned int words = buflen >> 1;
 544
 545        /* Transfer multiple of 2 bytes */
 546        if (rw == READ)
 547                ioread16_rep(data_addr, buf, words);
 548        else
 549                iowrite16_rep(data_addr, buf, words);
 550
 551        /* Transfer trailing byte, if any. */
 552        if (unlikely(buflen & 0x01)) {
 553                unsigned char pad[2] = { };
 554
 555                /* Point buf to the tail of buffer */
 556                buf += buflen - 1;
 557
 558                /*
 559                 * Use io*16_rep() accessors here as well to avoid pointlessly
 560                 * swapping bytes to and from on the big endian machines...
 561                 */
 562                if (rw == READ) {
 563                        ioread16_rep(data_addr, pad, 1);
 564                        *buf = pad[0];
 565                } else {
 566                        pad[0] = *buf;
 567                        iowrite16_rep(data_addr, pad, 1);
 568                }
 569                words++;
 570        }
 571
 572        return words << 1;
 573}
 574EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
 575
 576/**
 577 *      ata_sff_data_xfer32 - Transfer data by PIO
 578 *      @qc: queued command
 579 *      @buf: data buffer
 580 *      @buflen: buffer length
 581 *      @rw: read/write
 582 *
 583 *      Transfer data from/to the device data register by PIO using 32bit
 584 *      I/O operations.
 585 *
 586 *      LOCKING:
 587 *      Inherited from caller.
 588 *
 589 *      RETURNS:
 590 *      Bytes consumed.
 591 */
 592
 593unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
 594                               unsigned int buflen, int rw)
 595{
 596        struct ata_device *dev = qc->dev;
 597        struct ata_port *ap = dev->link->ap;
 598        void __iomem *data_addr = ap->ioaddr.data_addr;
 599        unsigned int words = buflen >> 2;
 600        int slop = buflen & 3;
 601
 602        if (!(ap->pflags & ATA_PFLAG_PIO32))
 603                return ata_sff_data_xfer(qc, buf, buflen, rw);
 604
 605        /* Transfer multiple of 4 bytes */
 606        if (rw == READ)
 607                ioread32_rep(data_addr, buf, words);
 608        else
 609                iowrite32_rep(data_addr, buf, words);
 610
 611        /* Transfer trailing bytes, if any */
 612        if (unlikely(slop)) {
 613                unsigned char pad[4] = { };
 614
 615                /* Point buf to the tail of buffer */
 616                buf += buflen - slop;
 617
 618                /*
 619                 * Use io*_rep() accessors here as well to avoid pointlessly
 620                 * swapping bytes to and from on the big endian machines...
 621                 */
 622                if (rw == READ) {
 623                        if (slop < 3)
 624                                ioread16_rep(data_addr, pad, 1);
 625                        else
 626                                ioread32_rep(data_addr, pad, 1);
 627                        memcpy(buf, pad, slop);
 628                } else {
 629                        memcpy(pad, buf, slop);
 630                        if (slop < 3)
 631                                iowrite16_rep(data_addr, pad, 1);
 632                        else
 633                                iowrite32_rep(data_addr, pad, 1);
 634                }
 635        }
 636        return (buflen + 1) & ~1;
 637}
 638EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
 639
 640static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page,
 641                unsigned int offset, size_t xfer_size)
 642{
 643        bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
 644        unsigned char *buf;
 645
 646        buf = kmap_atomic(page);
 647        qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write);
 648        kunmap_atomic(buf);
 649
 650        if (!do_write && !PageSlab(page))
 651                flush_dcache_page(page);
 652}
 653
 654/**
 655 *      ata_pio_sector - Transfer a sector of data.
 656 *      @qc: Command on going
 657 *
 658 *      Transfer qc->sect_size bytes of data from/to the ATA device.
 659 *
 660 *      LOCKING:
 661 *      Inherited from caller.
 662 */
 663static void ata_pio_sector(struct ata_queued_cmd *qc)
 664{
 665        struct ata_port *ap = qc->ap;
 666        struct page *page;
 667        unsigned int offset;
 668
 669        if (!qc->cursg) {
 670                qc->curbytes = qc->nbytes;
 671                return;
 672        }
 673        if (qc->curbytes == qc->nbytes - qc->sect_size)
 674                ap->hsm_task_state = HSM_ST_LAST;
 675
 676        page = sg_page(qc->cursg);
 677        offset = qc->cursg->offset + qc->cursg_ofs;
 678
 679        /* get the current page and offset */
 680        page = nth_page(page, (offset >> PAGE_SHIFT));
 681        offset %= PAGE_SIZE;
 682
 683        DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
 684
 685        /*
 686         * Split the transfer when it splits a page boundary.  Note that the
 687         * split still has to be dword aligned like all ATA data transfers.
 688         */
 689        WARN_ON_ONCE(offset % 4);
 690        if (offset + qc->sect_size > PAGE_SIZE) {
 691                unsigned int split_len = PAGE_SIZE - offset;
 692
 693                ata_pio_xfer(qc, page, offset, split_len);
 694                ata_pio_xfer(qc, nth_page(page, 1), 0,
 695                             qc->sect_size - split_len);
 696        } else {
 697                ata_pio_xfer(qc, page, offset, qc->sect_size);
 698        }
 699
 700        qc->curbytes += qc->sect_size;
 701        qc->cursg_ofs += qc->sect_size;
 702
 703        if (qc->cursg_ofs == qc->cursg->length) {
 704                qc->cursg = sg_next(qc->cursg);
 705                if (!qc->cursg)
 706                        ap->hsm_task_state = HSM_ST_LAST;
 707                qc->cursg_ofs = 0;
 708        }
 709}
 710
 711/**
 712 *      ata_pio_sectors - Transfer one or many sectors.
 713 *      @qc: Command on going
 714 *
 715 *      Transfer one or many sectors of data from/to the
 716 *      ATA device for the DRQ request.
 717 *
 718 *      LOCKING:
 719 *      Inherited from caller.
 720 */
 721static void ata_pio_sectors(struct ata_queued_cmd *qc)
 722{
 723        if (is_multi_taskfile(&qc->tf)) {
 724                /* READ/WRITE MULTIPLE */
 725                unsigned int nsect;
 726
 727                WARN_ON_ONCE(qc->dev->multi_count == 0);
 728
 729                nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
 730                            qc->dev->multi_count);
 731                while (nsect--)
 732                        ata_pio_sector(qc);
 733        } else
 734                ata_pio_sector(qc);
 735
 736        ata_sff_sync(qc->ap); /* flush */
 737}
 738
 739/**
 740 *      atapi_send_cdb - Write CDB bytes to hardware
 741 *      @ap: Port to which ATAPI device is attached.
 742 *      @qc: Taskfile currently active
 743 *
 744 *      When device has indicated its readiness to accept
 745 *      a CDB, this function is called.  Send the CDB.
 746 *
 747 *      LOCKING:
 748 *      caller.
 749 */
 750static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
 751{
 752        /* send SCSI cdb */
 753        DPRINTK("send cdb\n");
 754        WARN_ON_ONCE(qc->dev->cdb_len < 12);
 755
 756        ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
 757        ata_sff_sync(ap);
 758        /* FIXME: If the CDB is for DMA do we need to do the transition delay
 759           or is bmdma_start guaranteed to do it ? */
 760        switch (qc->tf.protocol) {
 761        case ATAPI_PROT_PIO:
 762                ap->hsm_task_state = HSM_ST;
 763                break;
 764        case ATAPI_PROT_NODATA:
 765                ap->hsm_task_state = HSM_ST_LAST;
 766                break;
 767#ifdef CONFIG_ATA_BMDMA
 768        case ATAPI_PROT_DMA:
 769                ap->hsm_task_state = HSM_ST_LAST;
 770                /* initiate bmdma */
 771                ap->ops->bmdma_start(qc);
 772                break;
 773#endif /* CONFIG_ATA_BMDMA */
 774        default:
 775                BUG();
 776        }
 777}
 778
 779/**
 780 *      __atapi_pio_bytes - Transfer data from/to the ATAPI device.
 781 *      @qc: Command on going
 782 *      @bytes: number of bytes
 783 *
 784 *      Transfer Transfer data from/to the ATAPI device.
 785 *
 786 *      LOCKING:
 787 *      Inherited from caller.
 788 *
 789 */
 790static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
 791{
 792        int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
 793        struct ata_port *ap = qc->ap;
 794        struct ata_device *dev = qc->dev;
 795        struct ata_eh_info *ehi = &dev->link->eh_info;
 796        struct scatterlist *sg;
 797        struct page *page;
 798        unsigned char *buf;
 799        unsigned int offset, count, consumed;
 800
 801next_sg:
 802        sg = qc->cursg;
 803        if (unlikely(!sg)) {
 804                ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
 805                                  "buf=%u cur=%u bytes=%u",
 806                                  qc->nbytes, qc->curbytes, bytes);
 807                return -1;
 808        }
 809
 810        page = sg_page(sg);
 811        offset = sg->offset + qc->cursg_ofs;
 812
 813        /* get the current page and offset */
 814        page = nth_page(page, (offset >> PAGE_SHIFT));
 815        offset %= PAGE_SIZE;
 816
 817        /* don't overrun current sg */
 818        count = min(sg->length - qc->cursg_ofs, bytes);
 819
 820        /* don't cross page boundaries */
 821        count = min(count, (unsigned int)PAGE_SIZE - offset);
 822
 823        DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
 824
 825        /* do the actual data transfer */
 826        buf = kmap_atomic(page);
 827        consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw);
 828        kunmap_atomic(buf);
 829
 830        bytes -= min(bytes, consumed);
 831        qc->curbytes += count;
 832        qc->cursg_ofs += count;
 833
 834        if (qc->cursg_ofs == sg->length) {
 835                qc->cursg = sg_next(qc->cursg);
 836                qc->cursg_ofs = 0;
 837        }
 838
 839        /*
 840         * There used to be a  WARN_ON_ONCE(qc->cursg && count != consumed);
 841         * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
 842         * check correctly as it doesn't know if it is the last request being
 843         * made. Somebody should implement a proper sanity check.
 844         */
 845        if (bytes)
 846                goto next_sg;
 847        return 0;
 848}
 849
 850/**
 851 *      atapi_pio_bytes - Transfer data from/to the ATAPI device.
 852 *      @qc: Command on going
 853 *
 854 *      Transfer Transfer data from/to the ATAPI device.
 855 *
 856 *      LOCKING:
 857 *      Inherited from caller.
 858 */
 859static void atapi_pio_bytes(struct ata_queued_cmd *qc)
 860{
 861        struct ata_port *ap = qc->ap;
 862        struct ata_device *dev = qc->dev;
 863        struct ata_eh_info *ehi = &dev->link->eh_info;
 864        unsigned int ireason, bc_lo, bc_hi, bytes;
 865        int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
 866
 867        /* Abuse qc->result_tf for temp storage of intermediate TF
 868         * here to save some kernel stack usage.
 869         * For normal completion, qc->result_tf is not relevant. For
 870         * error, qc->result_tf is later overwritten by ata_qc_complete().
 871         * So, the correctness of qc->result_tf is not affected.
 872         */
 873        ap->ops->sff_tf_read(ap, &qc->result_tf);
 874        ireason = qc->result_tf.nsect;
 875        bc_lo = qc->result_tf.lbam;
 876        bc_hi = qc->result_tf.lbah;
 877        bytes = (bc_hi << 8) | bc_lo;
 878
 879        /* shall be cleared to zero, indicating xfer of data */
 880        if (unlikely(ireason & ATAPI_COD))
 881                goto atapi_check;
 882
 883        /* make sure transfer direction matches expected */
 884        i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
 885        if (unlikely(do_write != i_write))
 886                goto atapi_check;
 887
 888        if (unlikely(!bytes))
 889                goto atapi_check;
 890
 891        VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
 892
 893        if (unlikely(__atapi_pio_bytes(qc, bytes)))
 894                goto err_out;
 895        ata_sff_sync(ap); /* flush */
 896
 897        return;
 898
 899 atapi_check:
 900        ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
 901                          ireason, bytes);
 902 err_out:
 903        qc->err_mask |= AC_ERR_HSM;
 904        ap->hsm_task_state = HSM_ST_ERR;
 905}
 906
 907/**
 908 *      ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
 909 *      @ap: the target ata_port
 910 *      @qc: qc on going
 911 *
 912 *      RETURNS:
 913 *      1 if ok in workqueue, 0 otherwise.
 914 */
 915static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
 916                                                struct ata_queued_cmd *qc)
 917{
 918        if (qc->tf.flags & ATA_TFLAG_POLLING)
 919                return 1;
 920
 921        if (ap->hsm_task_state == HSM_ST_FIRST) {
 922                if (qc->tf.protocol == ATA_PROT_PIO &&
 923                   (qc->tf.flags & ATA_TFLAG_WRITE))
 924                    return 1;
 925
 926                if (ata_is_atapi(qc->tf.protocol) &&
 927                   !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
 928                        return 1;
 929        }
 930
 931        return 0;
 932}
 933
 934/**
 935 *      ata_hsm_qc_complete - finish a qc running on standard HSM
 936 *      @qc: Command to complete
 937 *      @in_wq: 1 if called from workqueue, 0 otherwise
 938 *
 939 *      Finish @qc which is running on standard HSM.
 940 *
 941 *      LOCKING:
 942 *      If @in_wq is zero, spin_lock_irqsave(host lock).
 943 *      Otherwise, none on entry and grabs host lock.
 944 */
 945static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 946{
 947        struct ata_port *ap = qc->ap;
 948
 949        if (ap->ops->error_handler) {
 950                if (in_wq) {
 951                        /* EH might have kicked in while host lock is
 952                         * released.
 953                         */
 954                        qc = ata_qc_from_tag(ap, qc->tag);
 955                        if (qc) {
 956                                if (likely(!(qc->err_mask & AC_ERR_HSM))) {
 957                                        ata_sff_irq_on(ap);
 958                                        ata_qc_complete(qc);
 959                                } else
 960                                        ata_port_freeze(ap);
 961                        }
 962                } else {
 963                        if (likely(!(qc->err_mask & AC_ERR_HSM)))
 964                                ata_qc_complete(qc);
 965                        else
 966                                ata_port_freeze(ap);
 967                }
 968        } else {
 969                if (in_wq) {
 970                        ata_sff_irq_on(ap);
 971                        ata_qc_complete(qc);
 972                } else
 973                        ata_qc_complete(qc);
 974        }
 975}
 976
 977/**
 978 *      ata_sff_hsm_move - move the HSM to the next state.
 979 *      @ap: the target ata_port
 980 *      @qc: qc on going
 981 *      @status: current device status
 982 *      @in_wq: 1 if called from workqueue, 0 otherwise
 983 *
 984 *      RETURNS:
 985 *      1 when poll next status needed, 0 otherwise.
 986 */
 987int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
 988                     u8 status, int in_wq)
 989{
 990        struct ata_link *link = qc->dev->link;
 991        struct ata_eh_info *ehi = &link->eh_info;
 992        int poll_next;
 993
 994        lockdep_assert_held(ap->lock);
 995
 996        WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
 997
 998        /* Make sure ata_sff_qc_issue() does not throw things
 999         * like DMA polling into the workqueue. Notice that
1000         * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
1001         */
1002        WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
1003
1004fsm_start:
1005        DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
1006                ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
1007
1008        switch (ap->hsm_task_state) {
1009        case HSM_ST_FIRST:
1010                /* Send first data block or PACKET CDB */
1011
1012                /* If polling, we will stay in the work queue after
1013                 * sending the data. Otherwise, interrupt handler
1014                 * takes over after sending the data.
1015                 */
1016                poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1017
1018                /* check device status */
1019                if (unlikely((status & ATA_DRQ) == 0)) {
1020                        /* handle BSY=0, DRQ=0 as error */
1021                        if (likely(status & (ATA_ERR | ATA_DF)))
1022                                /* device stops HSM for abort/error */
1023                                qc->err_mask |= AC_ERR_DEV;
1024                        else {
1025                                /* HSM violation. Let EH handle this */
1026                                ata_ehi_push_desc(ehi,
1027                                        "ST_FIRST: !(DRQ|ERR|DF)");
1028                                qc->err_mask |= AC_ERR_HSM;
1029                        }
1030
1031                        ap->hsm_task_state = HSM_ST_ERR;
1032                        goto fsm_start;
1033                }
1034
1035                /* Device should not ask for data transfer (DRQ=1)
1036                 * when it finds something wrong.
1037                 * We ignore DRQ here and stop the HSM by
1038                 * changing hsm_task_state to HSM_ST_ERR and
1039                 * let the EH abort the command or reset the device.
1040                 */
1041                if (unlikely(status & (ATA_ERR | ATA_DF))) {
1042                        /* Some ATAPI tape drives forget to clear the ERR bit
1043                         * when doing the next command (mostly request sense).
1044                         * We ignore ERR here to workaround and proceed sending
1045                         * the CDB.
1046                         */
1047                        if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1048                                ata_ehi_push_desc(ehi, "ST_FIRST: "
1049                                        "DRQ=1 with device error, "
1050                                        "dev_stat 0x%X", status);
1051                                qc->err_mask |= AC_ERR_HSM;
1052                                ap->hsm_task_state = HSM_ST_ERR;
1053                                goto fsm_start;
1054                        }
1055                }
1056
1057                if (qc->tf.protocol == ATA_PROT_PIO) {
1058                        /* PIO data out protocol.
1059                         * send first data block.
1060                         */
1061
1062                        /* ata_pio_sectors() might change the state
1063                         * to HSM_ST_LAST. so, the state is changed here
1064                         * before ata_pio_sectors().
1065                         */
1066                        ap->hsm_task_state = HSM_ST;
1067                        ata_pio_sectors(qc);
1068                } else
1069                        /* send CDB */
1070                        atapi_send_cdb(ap, qc);
1071
1072                /* if polling, ata_sff_pio_task() handles the rest.
1073                 * otherwise, interrupt handler takes over from here.
1074                 */
1075                break;
1076
1077        case HSM_ST:
1078                /* complete command or read/write the data register */
1079                if (qc->tf.protocol == ATAPI_PROT_PIO) {
1080                        /* ATAPI PIO protocol */
1081                        if ((status & ATA_DRQ) == 0) {
1082                                /* No more data to transfer or device error.
1083                                 * Device error will be tagged in HSM_ST_LAST.
1084                                 */
1085                                ap->hsm_task_state = HSM_ST_LAST;
1086                                goto fsm_start;
1087                        }
1088
1089                        /* Device should not ask for data transfer (DRQ=1)
1090                         * when it finds something wrong.
1091                         * We ignore DRQ here and stop the HSM by
1092                         * changing hsm_task_state to HSM_ST_ERR and
1093                         * let the EH abort the command or reset the device.
1094                         */
1095                        if (unlikely(status & (ATA_ERR | ATA_DF))) {
1096                                ata_ehi_push_desc(ehi, "ST-ATAPI: "
1097                                        "DRQ=1 with device error, "
1098                                        "dev_stat 0x%X", status);
1099                                qc->err_mask |= AC_ERR_HSM;
1100                                ap->hsm_task_state = HSM_ST_ERR;
1101                                goto fsm_start;
1102                        }
1103
1104                        atapi_pio_bytes(qc);
1105
1106                        if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1107                                /* bad ireason reported by device */
1108                                goto fsm_start;
1109
1110                } else {
1111                        /* ATA PIO protocol */
1112                        if (unlikely((status & ATA_DRQ) == 0)) {
1113                                /* handle BSY=0, DRQ=0 as error */
1114                                if (likely(status & (ATA_ERR | ATA_DF))) {
1115                                        /* device stops HSM for abort/error */
1116                                        qc->err_mask |= AC_ERR_DEV;
1117
1118                                        /* If diagnostic failed and this is
1119                                         * IDENTIFY, it's likely a phantom
1120                                         * device.  Mark hint.
1121                                         */
1122                                        if (qc->dev->horkage &
1123                                            ATA_HORKAGE_DIAGNOSTIC)
1124                                                qc->err_mask |=
1125                                                        AC_ERR_NODEV_HINT;
1126                                } else {
1127                                        /* HSM violation. Let EH handle this.
1128                                         * Phantom devices also trigger this
1129                                         * condition.  Mark hint.
1130                                         */
1131                                        ata_ehi_push_desc(ehi, "ST-ATA: "
1132                                                "DRQ=0 without device error, "
1133                                                "dev_stat 0x%X", status);
1134                                        qc->err_mask |= AC_ERR_HSM |
1135                                                        AC_ERR_NODEV_HINT;
1136                                }
1137
1138                                ap->hsm_task_state = HSM_ST_ERR;
1139                                goto fsm_start;
1140                        }
1141
1142                        /* For PIO reads, some devices may ask for
1143                         * data transfer (DRQ=1) alone with ERR=1.
1144                         * We respect DRQ here and transfer one
1145                         * block of junk data before changing the
1146                         * hsm_task_state to HSM_ST_ERR.
1147                         *
1148                         * For PIO writes, ERR=1 DRQ=1 doesn't make
1149                         * sense since the data block has been
1150                         * transferred to the device.
1151                         */
1152                        if (unlikely(status & (ATA_ERR | ATA_DF))) {
1153                                /* data might be corrputed */
1154                                qc->err_mask |= AC_ERR_DEV;
1155
1156                                if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1157                                        ata_pio_sectors(qc);
1158                                        status = ata_wait_idle(ap);
1159                                }
1160
1161                                if (status & (ATA_BUSY | ATA_DRQ)) {
1162                                        ata_ehi_push_desc(ehi, "ST-ATA: "
1163                                                "BUSY|DRQ persists on ERR|DF, "
1164                                                "dev_stat 0x%X", status);
1165                                        qc->err_mask |= AC_ERR_HSM;
1166                                }
1167
1168                                /* There are oddball controllers with
1169                                 * status register stuck at 0x7f and
1170                                 * lbal/m/h at zero which makes it
1171                                 * pass all other presence detection
1172                                 * mechanisms we have.  Set NODEV_HINT
1173                                 * for it.  Kernel bz#7241.
1174                                 */
1175                                if (status == 0x7f)
1176                                        qc->err_mask |= AC_ERR_NODEV_HINT;
1177
1178                                /* ata_pio_sectors() might change the
1179                                 * state to HSM_ST_LAST. so, the state
1180                                 * is changed after ata_pio_sectors().
1181                                 */
1182                                ap->hsm_task_state = HSM_ST_ERR;
1183                                goto fsm_start;
1184                        }
1185
1186                        ata_pio_sectors(qc);
1187
1188                        if (ap->hsm_task_state == HSM_ST_LAST &&
1189                            (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1190                                /* all data read */
1191                                status = ata_wait_idle(ap);
1192                                goto fsm_start;
1193                        }
1194                }
1195
1196                poll_next = 1;
1197                break;
1198
1199        case HSM_ST_LAST:
1200                if (unlikely(!ata_ok(status))) {
1201                        qc->err_mask |= __ac_err_mask(status);
1202                        ap->hsm_task_state = HSM_ST_ERR;
1203                        goto fsm_start;
1204                }
1205
1206                /* no more data to transfer */
1207                DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1208                        ap->print_id, qc->dev->devno, status);
1209
1210                WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1211
1212                ap->hsm_task_state = HSM_ST_IDLE;
1213
1214                /* complete taskfile transaction */
1215                ata_hsm_qc_complete(qc, in_wq);
1216
1217                poll_next = 0;
1218                break;
1219
1220        case HSM_ST_ERR:
1221                ap->hsm_task_state = HSM_ST_IDLE;
1222
1223                /* complete taskfile transaction */
1224                ata_hsm_qc_complete(qc, in_wq);
1225
1226                poll_next = 0;
1227                break;
1228        default:
1229                poll_next = 0;
1230                WARN(true, "ata%d: SFF host state machine in invalid state %d",
1231                     ap->print_id, ap->hsm_task_state);
1232        }
1233
1234        return poll_next;
1235}
1236EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1237
1238void ata_sff_queue_work(struct work_struct *work)
1239{
1240        queue_work(ata_sff_wq, work);
1241}
1242EXPORT_SYMBOL_GPL(ata_sff_queue_work);
1243
1244void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
1245{
1246        queue_delayed_work(ata_sff_wq, dwork, delay);
1247}
1248EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
1249
1250void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1251{
1252        struct ata_port *ap = link->ap;
1253
1254        WARN_ON((ap->sff_pio_task_link != NULL) &&
1255                (ap->sff_pio_task_link != link));
1256        ap->sff_pio_task_link = link;
1257
1258        /* may fail if ata_sff_flush_pio_task() in progress */
1259        ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
1260}
1261EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1262
1263void ata_sff_flush_pio_task(struct ata_port *ap)
1264{
1265        DPRINTK("ENTER\n");
1266
1267        cancel_delayed_work_sync(&ap->sff_pio_task);
1268
1269        /*
1270         * We wanna reset the HSM state to IDLE.  If we do so without
1271         * grabbing the port lock, critical sections protected by it which
1272         * expect the HSM state to stay stable may get surprised.  For
1273         * example, we may set IDLE in between the time
1274         * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
1275         * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
1276         */
1277        spin_lock_irq(ap->lock);
1278        ap->hsm_task_state = HSM_ST_IDLE;
1279        spin_unlock_irq(ap->lock);
1280
1281        ap->sff_pio_task_link = NULL;
1282
1283        if (ata_msg_ctl(ap))
1284                ata_port_dbg(ap, "%s: EXIT\n", __func__);
1285}
1286
1287static void ata_sff_pio_task(struct work_struct *work)
1288{
1289        struct ata_port *ap =
1290                container_of(work, struct ata_port, sff_pio_task.work);
1291        struct ata_link *link = ap->sff_pio_task_link;
1292        struct ata_queued_cmd *qc;
1293        u8 status;
1294        int poll_next;
1295
1296        spin_lock_irq(ap->lock);
1297
1298        BUG_ON(ap->sff_pio_task_link == NULL);
1299        /* qc can be NULL if timeout occurred */
1300        qc = ata_qc_from_tag(ap, link->active_tag);
1301        if (!qc) {
1302                ap->sff_pio_task_link = NULL;
1303                goto out_unlock;
1304        }
1305
1306fsm_start:
1307        WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1308
1309        /*
1310         * This is purely heuristic.  This is a fast path.
1311         * Sometimes when we enter, BSY will be cleared in
1312         * a chk-status or two.  If not, the drive is probably seeking
1313         * or something.  Snooze for a couple msecs, then
1314         * chk-status again.  If still busy, queue delayed work.
1315         */
1316        status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1317        if (status & ATA_BUSY) {
1318                spin_unlock_irq(ap->lock);
1319                ata_msleep(ap, 2);
1320                spin_lock_irq(ap->lock);
1321
1322                status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1323                if (status & ATA_BUSY) {
1324                        ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1325                        goto out_unlock;
1326                }
1327        }
1328
1329        /*
1330         * hsm_move() may trigger another command to be processed.
1331         * clean the link beforehand.
1332         */
1333        ap->sff_pio_task_link = NULL;
1334        /* move the HSM */
1335        poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1336
1337        /* another command or interrupt handler
1338         * may be running at this point.
1339         */
1340        if (poll_next)
1341                goto fsm_start;
1342out_unlock:
1343        spin_unlock_irq(ap->lock);
1344}
1345
1346/**
1347 *      ata_sff_qc_issue - issue taskfile to a SFF controller
1348 *      @qc: command to issue to device
1349 *
1350 *      This function issues a PIO or NODATA command to a SFF
1351 *      controller.
1352 *
1353 *      LOCKING:
1354 *      spin_lock_irqsave(host lock)
1355 *
1356 *      RETURNS:
1357 *      Zero on success, AC_ERR_* mask on failure
1358 */
1359unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1360{
1361        struct ata_port *ap = qc->ap;
1362        struct ata_link *link = qc->dev->link;
1363
1364        /* Use polling pio if the LLD doesn't handle
1365         * interrupt driven pio and atapi CDB interrupt.
1366         */
1367        if (ap->flags & ATA_FLAG_PIO_POLLING)
1368                qc->tf.flags |= ATA_TFLAG_POLLING;
1369
1370        /* select the device */
1371        ata_dev_select(ap, qc->dev->devno, 1, 0);
1372
1373        /* start the command */
1374        switch (qc->tf.protocol) {
1375        case ATA_PROT_NODATA:
1376                if (qc->tf.flags & ATA_TFLAG_POLLING)
1377                        ata_qc_set_polling(qc);
1378
1379                ata_tf_to_host(ap, &qc->tf);
1380                ap->hsm_task_state = HSM_ST_LAST;
1381
1382                if (qc->tf.flags & ATA_TFLAG_POLLING)
1383                        ata_sff_queue_pio_task(link, 0);
1384
1385                break;
1386
1387        case ATA_PROT_PIO:
1388                if (qc->tf.flags & ATA_TFLAG_POLLING)
1389                        ata_qc_set_polling(qc);
1390
1391                ata_tf_to_host(ap, &qc->tf);
1392
1393                if (qc->tf.flags & ATA_TFLAG_WRITE) {
1394                        /* PIO data out protocol */
1395                        ap->hsm_task_state = HSM_ST_FIRST;
1396                        ata_sff_queue_pio_task(link, 0);
1397
1398                        /* always send first data block using the
1399                         * ata_sff_pio_task() codepath.
1400                         */
1401                } else {
1402                        /* PIO data in protocol */
1403                        ap->hsm_task_state = HSM_ST;
1404
1405                        if (qc->tf.flags & ATA_TFLAG_POLLING)
1406                                ata_sff_queue_pio_task(link, 0);
1407
1408                        /* if polling, ata_sff_pio_task() handles the
1409                         * rest.  otherwise, interrupt handler takes
1410                         * over from here.
1411                         */
1412                }
1413
1414                break;
1415
1416        case ATAPI_PROT_PIO:
1417        case ATAPI_PROT_NODATA:
1418                if (qc->tf.flags & ATA_TFLAG_POLLING)
1419                        ata_qc_set_polling(qc);
1420
1421                ata_tf_to_host(ap, &qc->tf);
1422
1423                ap->hsm_task_state = HSM_ST_FIRST;
1424
1425                /* send cdb by polling if no cdb interrupt */
1426                if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1427                    (qc->tf.flags & ATA_TFLAG_POLLING))
1428                        ata_sff_queue_pio_task(link, 0);
1429                break;
1430
1431        default:
1432                return AC_ERR_SYSTEM;
1433        }
1434
1435        return 0;
1436}
1437EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1438
1439/**
1440 *      ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
1441 *      @qc: qc to fill result TF for
1442 *
1443 *      @qc is finished and result TF needs to be filled.  Fill it
1444 *      using ->sff_tf_read.
1445 *
1446 *      LOCKING:
1447 *      spin_lock_irqsave(host lock)
1448 *
1449 *      RETURNS:
1450 *      true indicating that result TF is successfully filled.
1451 */
1452bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1453{
1454        qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1455        return true;
1456}
1457EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1458
1459static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1460{
1461        ap->stats.idle_irq++;
1462
1463#ifdef ATA_IRQ_TRAP
1464        if ((ap->stats.idle_irq % 1000) == 0) {
1465                ap->ops->sff_check_status(ap);
1466                if (ap->ops->sff_irq_clear)
1467                        ap->ops->sff_irq_clear(ap);
1468                ata_port_warn(ap, "irq trap\n");
1469                return 1;
1470        }
1471#endif
1472        return 0;       /* irq not handled */
1473}
1474
1475static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1476                                        struct ata_queued_cmd *qc,
1477                                        bool hsmv_on_idle)
1478{
1479        u8 status;
1480
1481        VPRINTK("ata%u: protocol %d task_state %d\n",
1482                ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1483
1484        /* Check whether we are expecting interrupt in this state */
1485        switch (ap->hsm_task_state) {
1486        case HSM_ST_FIRST:
1487                /* Some pre-ATAPI-4 devices assert INTRQ
1488                 * at this state when ready to receive CDB.
1489                 */
1490
1491                /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1492                 * The flag was turned on only for atapi devices.  No
1493                 * need to check ata_is_atapi(qc->tf.protocol) again.
1494                 */
1495                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1496                        return ata_sff_idle_irq(ap);
1497                break;
1498        case HSM_ST_IDLE:
1499                return ata_sff_idle_irq(ap);
1500        default:
1501                break;
1502        }
1503
1504        /* check main status, clearing INTRQ if needed */
1505        status = ata_sff_irq_status(ap);
1506        if (status & ATA_BUSY) {
1507                if (hsmv_on_idle) {
1508                        /* BMDMA engine is already stopped, we're screwed */
1509                        qc->err_mask |= AC_ERR_HSM;
1510                        ap->hsm_task_state = HSM_ST_ERR;
1511                } else
1512                        return ata_sff_idle_irq(ap);
1513        }
1514
1515        /* clear irq events */
1516        if (ap->ops->sff_irq_clear)
1517                ap->ops->sff_irq_clear(ap);
1518
1519        ata_sff_hsm_move(ap, qc, status, 0);
1520
1521        return 1;       /* irq handled */
1522}
1523
1524/**
1525 *      ata_sff_port_intr - Handle SFF port interrupt
1526 *      @ap: Port on which interrupt arrived (possibly...)
1527 *      @qc: Taskfile currently active in engine
1528 *
1529 *      Handle port interrupt for given queued command.
1530 *
1531 *      LOCKING:
1532 *      spin_lock_irqsave(host lock)
1533 *
1534 *      RETURNS:
1535 *      One if interrupt was handled, zero if not (shared irq).
1536 */
1537unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1538{
1539        return __ata_sff_port_intr(ap, qc, false);
1540}
1541EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1542
1543static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1544        unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1545{
1546        struct ata_host *host = dev_instance;
1547        bool retried = false;
1548        unsigned int i;
1549        unsigned int handled, idle, polling;
1550        unsigned long flags;
1551
1552        /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1553        spin_lock_irqsave(&host->lock, flags);
1554
1555retry:
1556        handled = idle = polling = 0;
1557        for (i = 0; i < host->n_ports; i++) {
1558                struct ata_port *ap = host->ports[i];
1559                struct ata_queued_cmd *qc;
1560
1561                qc = ata_qc_from_tag(ap, ap->link.active_tag);
1562                if (qc) {
1563                        if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1564                                handled |= port_intr(ap, qc);
1565                        else
1566                                polling |= 1 << i;
1567                } else
1568                        idle |= 1 << i;
1569        }
1570
1571        /*
1572         * If no port was expecting IRQ but the controller is actually
1573         * asserting IRQ line, nobody cared will ensue.  Check IRQ
1574         * pending status if available and clear spurious IRQ.
1575         */
1576        if (!handled && !retried) {
1577                bool retry = false;
1578
1579                for (i = 0; i < host->n_ports; i++) {
1580                        struct ata_port *ap = host->ports[i];
1581
1582                        if (polling & (1 << i))
1583                                continue;
1584
1585                        if (!ap->ops->sff_irq_check ||
1586                            !ap->ops->sff_irq_check(ap))
1587                                continue;
1588
1589                        if (idle & (1 << i)) {
1590                                ap->ops->sff_check_status(ap);
1591                                if (ap->ops->sff_irq_clear)
1592                                        ap->ops->sff_irq_clear(ap);
1593                        } else {
1594                                /* clear INTRQ and check if BUSY cleared */
1595                                if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
1596                                        retry |= true;
1597                                /*
1598                                 * With command in flight, we can't do
1599                                 * sff_irq_clear() w/o racing with completion.
1600                                 */
1601                        }
1602                }
1603
1604                if (retry) {
1605                        retried = true;
1606                        goto retry;
1607                }
1608        }
1609
1610        spin_unlock_irqrestore(&host->lock, flags);
1611
1612        return IRQ_RETVAL(handled);
1613}
1614
1615/**
1616 *      ata_sff_interrupt - Default SFF ATA host interrupt handler
1617 *      @irq: irq line (unused)
1618 *      @dev_instance: pointer to our ata_host information structure
1619 *
1620 *      Default interrupt handler for PCI IDE devices.  Calls
1621 *      ata_sff_port_intr() for each port that is not disabled.
1622 *
1623 *      LOCKING:
1624 *      Obtains host lock during operation.
1625 *
1626 *      RETURNS:
1627 *      IRQ_NONE or IRQ_HANDLED.
1628 */
1629irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1630{
1631        return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1632}
1633EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1634
1635/**
1636 *      ata_sff_lost_interrupt  -       Check for an apparent lost interrupt
1637 *      @ap: port that appears to have timed out
1638 *
1639 *      Called from the libata error handlers when the core code suspects
1640 *      an interrupt has been lost. If it has complete anything we can and
1641 *      then return. Interface must support altstatus for this faster
1642 *      recovery to occur.
1643 *
1644 *      Locking:
1645 *      Caller holds host lock
1646 */
1647
1648void ata_sff_lost_interrupt(struct ata_port *ap)
1649{
1650        u8 status;
1651        struct ata_queued_cmd *qc;
1652
1653        /* Only one outstanding command per SFF channel */
1654        qc = ata_qc_from_tag(ap, ap->link.active_tag);
1655        /* We cannot lose an interrupt on a non-existent or polled command */
1656        if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1657                return;
1658        /* See if the controller thinks it is still busy - if so the command
1659           isn't a lost IRQ but is still in progress */
1660        status = ata_sff_altstatus(ap);
1661        if (status & ATA_BUSY)
1662                return;
1663
1664        /* There was a command running, we are no longer busy and we have
1665           no interrupt. */
1666        ata_port_warn(ap, "lost interrupt (Status 0x%x)\n",
1667                                                                status);
1668        /* Run the host interrupt logic as if the interrupt had not been
1669           lost */
1670        ata_sff_port_intr(ap, qc);
1671}
1672EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1673
1674/**
1675 *      ata_sff_freeze - Freeze SFF controller port
1676 *      @ap: port to freeze
1677 *
1678 *      Freeze SFF controller port.
1679 *
1680 *      LOCKING:
1681 *      Inherited from caller.
1682 */
1683void ata_sff_freeze(struct ata_port *ap)
1684{
1685        ap->ctl |= ATA_NIEN;
1686        ap->last_ctl = ap->ctl;
1687
1688        if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
1689                ata_sff_set_devctl(ap, ap->ctl);
1690
1691        /* Under certain circumstances, some controllers raise IRQ on
1692         * ATA_NIEN manipulation.  Also, many controllers fail to mask
1693         * previously pending IRQ on ATA_NIEN assertion.  Clear it.
1694         */
1695        ap->ops->sff_check_status(ap);
1696
1697        if (ap->ops->sff_irq_clear)
1698                ap->ops->sff_irq_clear(ap);
1699}
1700EXPORT_SYMBOL_GPL(ata_sff_freeze);
1701
1702/**
1703 *      ata_sff_thaw - Thaw SFF controller port
1704 *      @ap: port to thaw
1705 *
1706 *      Thaw SFF controller port.
1707 *
1708 *      LOCKING:
1709 *      Inherited from caller.
1710 */
1711void ata_sff_thaw(struct ata_port *ap)
1712{
1713        /* clear & re-enable interrupts */
1714        ap->ops->sff_check_status(ap);
1715        if (ap->ops->sff_irq_clear)
1716                ap->ops->sff_irq_clear(ap);
1717        ata_sff_irq_on(ap);
1718}
1719EXPORT_SYMBOL_GPL(ata_sff_thaw);
1720
1721/**
1722 *      ata_sff_prereset - prepare SFF link for reset
1723 *      @link: SFF link to be reset
1724 *      @deadline: deadline jiffies for the operation
1725 *
1726 *      SFF link @link is about to be reset.  Initialize it.  It first
1727 *      calls ata_std_prereset() and wait for !BSY if the port is
1728 *      being softreset.
1729 *
1730 *      LOCKING:
1731 *      Kernel thread context (may sleep)
1732 *
1733 *      RETURNS:
1734 *      0 on success, -errno otherwise.
1735 */
1736int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1737{
1738        struct ata_eh_context *ehc = &link->eh_context;
1739        int rc;
1740
1741        rc = ata_std_prereset(link, deadline);
1742        if (rc)
1743                return rc;
1744
1745        /* if we're about to do hardreset, nothing more to do */
1746        if (ehc->i.action & ATA_EH_HARDRESET)
1747                return 0;
1748
1749        /* wait for !BSY if we don't know that no device is attached */
1750        if (!ata_link_offline(link)) {
1751                rc = ata_sff_wait_ready(link, deadline);
1752                if (rc && rc != -ENODEV) {
1753                        ata_link_warn(link,
1754                                      "device not ready (errno=%d), forcing hardreset\n",
1755                                      rc);
1756                        ehc->i.action |= ATA_EH_HARDRESET;
1757                }
1758        }
1759
1760        return 0;
1761}
1762EXPORT_SYMBOL_GPL(ata_sff_prereset);
1763
1764/**
1765 *      ata_devchk - PATA device presence detection
1766 *      @ap: ATA channel to examine
1767 *      @device: Device to examine (starting at zero)
1768 *
1769 *      This technique was originally described in
1770 *      Hale Landis's ATADRVR (www.ata-atapi.com), and
1771 *      later found its way into the ATA/ATAPI spec.
1772 *
1773 *      Write a pattern to the ATA shadow registers,
1774 *      and if a device is present, it will respond by
1775 *      correctly storing and echoing back the
1776 *      ATA shadow register contents.
1777 *
1778 *      LOCKING:
1779 *      caller.
1780 */
1781static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1782{
1783        struct ata_ioports *ioaddr = &ap->ioaddr;
1784        u8 nsect, lbal;
1785
1786        ap->ops->sff_dev_select(ap, device);
1787
1788        iowrite8(0x55, ioaddr->nsect_addr);
1789        iowrite8(0xaa, ioaddr->lbal_addr);
1790
1791        iowrite8(0xaa, ioaddr->nsect_addr);
1792        iowrite8(0x55, ioaddr->lbal_addr);
1793
1794        iowrite8(0x55, ioaddr->nsect_addr);
1795        iowrite8(0xaa, ioaddr->lbal_addr);
1796
1797        nsect = ioread8(ioaddr->nsect_addr);
1798        lbal = ioread8(ioaddr->lbal_addr);
1799
1800        if ((nsect == 0x55) && (lbal == 0xaa))
1801                return 1;       /* we found a device */
1802
1803        return 0;               /* nothing found */
1804}
1805
1806/**
1807 *      ata_sff_dev_classify - Parse returned ATA device signature
1808 *      @dev: ATA device to classify (starting at zero)
1809 *      @present: device seems present
1810 *      @r_err: Value of error register on completion
1811 *
1812 *      After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1813 *      an ATA/ATAPI-defined set of values is placed in the ATA
1814 *      shadow registers, indicating the results of device detection
1815 *      and diagnostics.
1816 *
1817 *      Select the ATA device, and read the values from the ATA shadow
1818 *      registers.  Then parse according to the Error register value,
1819 *      and the spec-defined values examined by ata_dev_classify().
1820 *
1821 *      LOCKING:
1822 *      caller.
1823 *
1824 *      RETURNS:
1825 *      Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1826 */
1827unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1828                                  u8 *r_err)
1829{
1830        struct ata_port *ap = dev->link->ap;
1831        struct ata_taskfile tf;
1832        unsigned int class;
1833        u8 err;
1834
1835        ap->ops->sff_dev_select(ap, dev->devno);
1836
1837        memset(&tf, 0, sizeof(tf));
1838
1839        ap->ops->sff_tf_read(ap, &tf);
1840        err = tf.feature;
1841        if (r_err)
1842                *r_err = err;
1843
1844        /* see if device passed diags: continue and warn later */
1845        if (err == 0)
1846                /* diagnostic fail : do nothing _YET_ */
1847                dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
1848        else if (err == 1)
1849                /* do nothing */ ;
1850        else if ((dev->devno == 0) && (err == 0x81))
1851                /* do nothing */ ;
1852        else
1853                return ATA_DEV_NONE;
1854
1855        /* determine if device is ATA or ATAPI */
1856        class = ata_dev_classify(&tf);
1857
1858        if (class == ATA_DEV_UNKNOWN) {
1859                /* If the device failed diagnostic, it's likely to
1860                 * have reported incorrect device signature too.
1861                 * Assume ATA device if the device seems present but
1862                 * device signature is invalid with diagnostic
1863                 * failure.
1864                 */
1865                if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1866                        class = ATA_DEV_ATA;
1867                else
1868                        class = ATA_DEV_NONE;
1869        } else if ((class == ATA_DEV_ATA) &&
1870                   (ap->ops->sff_check_status(ap) == 0))
1871                class = ATA_DEV_NONE;
1872
1873        return class;
1874}
1875EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
1876
1877/**
1878 *      ata_sff_wait_after_reset - wait for devices to become ready after reset
1879 *      @link: SFF link which is just reset
1880 *      @devmask: mask of present devices
1881 *      @deadline: deadline jiffies for the operation
1882 *
1883 *      Wait devices attached to SFF @link to become ready after
1884 *      reset.  It contains preceding 150ms wait to avoid accessing TF
1885 *      status register too early.
1886 *
1887 *      LOCKING:
1888 *      Kernel thread context (may sleep).
1889 *
1890 *      RETURNS:
1891 *      0 on success, -ENODEV if some or all of devices in @devmask
1892 *      don't seem to exist.  -errno on other errors.
1893 */
1894int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1895                             unsigned long deadline)
1896{
1897        struct ata_port *ap = link->ap;
1898        struct ata_ioports *ioaddr = &ap->ioaddr;
1899        unsigned int dev0 = devmask & (1 << 0);
1900        unsigned int dev1 = devmask & (1 << 1);
1901        int rc, ret = 0;
1902
1903        ata_msleep(ap, ATA_WAIT_AFTER_RESET);
1904
1905        /* always check readiness of the master device */
1906        rc = ata_sff_wait_ready(link, deadline);
1907        /* -ENODEV means the odd clown forgot the D7 pulldown resistor
1908         * and TF status is 0xff, bail out on it too.
1909         */
1910        if (rc)
1911                return rc;
1912
1913        /* if device 1 was found in ata_devchk, wait for register
1914         * access briefly, then wait for BSY to clear.
1915         */
1916        if (dev1) {
1917                int i;
1918
1919                ap->ops->sff_dev_select(ap, 1);
1920
1921                /* Wait for register access.  Some ATAPI devices fail
1922                 * to set nsect/lbal after reset, so don't waste too
1923                 * much time on it.  We're gonna wait for !BSY anyway.
1924                 */
1925                for (i = 0; i < 2; i++) {
1926                        u8 nsect, lbal;
1927
1928                        nsect = ioread8(ioaddr->nsect_addr);
1929                        lbal = ioread8(ioaddr->lbal_addr);
1930                        if ((nsect == 1) && (lbal == 1))
1931                                break;
1932                        ata_msleep(ap, 50);     /* give drive a breather */
1933                }
1934
1935                rc = ata_sff_wait_ready(link, deadline);
1936                if (rc) {
1937                        if (rc != -ENODEV)
1938                                return rc;
1939                        ret = rc;
1940                }
1941        }
1942
1943        /* is all this really necessary? */
1944        ap->ops->sff_dev_select(ap, 0);
1945        if (dev1)
1946                ap->ops->sff_dev_select(ap, 1);
1947        if (dev0)
1948                ap->ops->sff_dev_select(ap, 0);
1949
1950        return ret;
1951}
1952EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
1953
1954static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
1955                             unsigned long deadline)
1956{
1957        struct ata_ioports *ioaddr = &ap->ioaddr;
1958
1959        DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1960
1961        if (ap->ioaddr.ctl_addr) {
1962                /* software reset.  causes dev0 to be selected */
1963                iowrite8(ap->ctl, ioaddr->ctl_addr);
1964                udelay(20);     /* FIXME: flush */
1965                iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1966                udelay(20);     /* FIXME: flush */
1967                iowrite8(ap->ctl, ioaddr->ctl_addr);
1968                ap->last_ctl = ap->ctl;
1969        }
1970
1971        /* wait the port to become ready */
1972        return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
1973}
1974
1975/**
1976 *      ata_sff_softreset - reset host port via ATA SRST
1977 *      @link: ATA link to reset
1978 *      @classes: resulting classes of attached devices
1979 *      @deadline: deadline jiffies for the operation
1980 *
1981 *      Reset host port using ATA SRST.
1982 *
1983 *      LOCKING:
1984 *      Kernel thread context (may sleep)
1985 *
1986 *      RETURNS:
1987 *      0 on success, -errno otherwise.
1988 */
1989int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
1990                      unsigned long deadline)
1991{
1992        struct ata_port *ap = link->ap;
1993        unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1994        unsigned int devmask = 0;
1995        int rc;
1996        u8 err;
1997
1998        DPRINTK("ENTER\n");
1999
2000        /* determine if device 0/1 are present */
2001        if (ata_devchk(ap, 0))
2002                devmask |= (1 << 0);
2003        if (slave_possible && ata_devchk(ap, 1))
2004                devmask |= (1 << 1);
2005
2006        /* select device 0 again */
2007        ap->ops->sff_dev_select(ap, 0);
2008
2009        /* issue bus reset */
2010        DPRINTK("about to softreset, devmask=%x\n", devmask);
2011        rc = ata_bus_softreset(ap, devmask, deadline);
2012        /* if link is occupied, -ENODEV too is an error */
2013        if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
2014                ata_link_err(link, "SRST failed (errno=%d)\n", rc);
2015                return rc;
2016        }
2017
2018        /* determine by signature whether we have ATA or ATAPI devices */
2019        classes[0] = ata_sff_dev_classify(&link->device[0],
2020                                          devmask & (1 << 0), &err);
2021        if (slave_possible && err != 0x81)
2022                classes[1] = ata_sff_dev_classify(&link->device[1],
2023                                                  devmask & (1 << 1), &err);
2024
2025        DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2026        return 0;
2027}
2028EXPORT_SYMBOL_GPL(ata_sff_softreset);
2029
2030/**
2031 *      sata_sff_hardreset - reset host port via SATA phy reset
2032 *      @link: link to reset
2033 *      @class: resulting class of attached device
2034 *      @deadline: deadline jiffies for the operation
2035 *
2036 *      SATA phy-reset host port using DET bits of SControl register,
2037 *      wait for !BSY and classify the attached device.
2038 *
2039 *      LOCKING:
2040 *      Kernel thread context (may sleep)
2041 *
2042 *      RETURNS:
2043 *      0 on success, -errno otherwise.
2044 */
2045int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
2046                       unsigned long deadline)
2047{
2048        struct ata_eh_context *ehc = &link->eh_context;
2049        const unsigned long *timing = sata_ehc_deb_timing(ehc);
2050        bool online;
2051        int rc;
2052
2053        rc = sata_link_hardreset(link, timing, deadline, &online,
2054                                 ata_sff_check_ready);
2055        if (online)
2056                *class = ata_sff_dev_classify(link->device, 1, NULL);
2057
2058        DPRINTK("EXIT, class=%u\n", *class);
2059        return rc;
2060}
2061EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2062
2063/**
2064 *      ata_sff_postreset - SFF postreset callback
2065 *      @link: the target SFF ata_link
2066 *      @classes: classes of attached devices
2067 *
2068 *      This function is invoked after a successful reset.  It first
2069 *      calls ata_std_postreset() and performs SFF specific postreset
2070 *      processing.
2071 *
2072 *      LOCKING:
2073 *      Kernel thread context (may sleep)
2074 */
2075void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2076{
2077        struct ata_port *ap = link->ap;
2078
2079        ata_std_postreset(link, classes);
2080
2081        /* is double-select really necessary? */
2082        if (classes[0] != ATA_DEV_NONE)
2083                ap->ops->sff_dev_select(ap, 1);
2084        if (classes[1] != ATA_DEV_NONE)
2085                ap->ops->sff_dev_select(ap, 0);
2086
2087        /* bail out if no device is present */
2088        if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2089                DPRINTK("EXIT, no device\n");
2090                return;
2091        }
2092
2093        /* set up device control */
2094        if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
2095                ata_sff_set_devctl(ap, ap->ctl);
2096                ap->last_ctl = ap->ctl;
2097        }
2098}
2099EXPORT_SYMBOL_GPL(ata_sff_postreset);
2100
2101/**
2102 *      ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
2103 *      @qc: command
2104 *
2105 *      Drain the FIFO and device of any stuck data following a command
2106 *      failing to complete. In some cases this is necessary before a
2107 *      reset will recover the device.
2108 *
2109 */
2110
2111void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2112{
2113        int count;
2114        struct ata_port *ap;
2115
2116        /* We only need to flush incoming data when a command was running */
2117        if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2118                return;
2119
2120        ap = qc->ap;
2121        /* Drain up to 64K of data before we give up this recovery method */
2122        for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2123                                                && count < 65536; count += 2)
2124                ioread16(ap->ioaddr.data_addr);
2125
2126        /* Can become DEBUG later */
2127        if (count)
2128                ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
2129
2130}
2131EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2132
2133/**
2134 *      ata_sff_error_handler - Stock error handler for SFF controller
2135 *      @ap: port to handle error for
2136 *
2137 *      Stock error handler for SFF controller.  It can handle both
2138 *      PATA and SATA controllers.  Many controllers should be able to
2139 *      use this EH as-is or with some added handling before and
2140 *      after.
2141 *
2142 *      LOCKING:
2143 *      Kernel thread context (may sleep)
2144 */
2145void ata_sff_error_handler(struct ata_port *ap)
2146{
2147        ata_reset_fn_t softreset = ap->ops->softreset;
2148        ata_reset_fn_t hardreset = ap->ops->hardreset;
2149        struct ata_queued_cmd *qc;
2150        unsigned long flags;
2151
2152        qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2153        if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2154                qc = NULL;
2155
2156        spin_lock_irqsave(ap->lock, flags);
2157
2158        /*
2159         * We *MUST* do FIFO draining before we issue a reset as
2160         * several devices helpfully clear their internal state and
2161         * will lock solid if we touch the data port post reset. Pass
2162         * qc in case anyone wants to do different PIO/DMA recovery or
2163         * has per command fixups
2164         */
2165        if (ap->ops->sff_drain_fifo)
2166                ap->ops->sff_drain_fifo(qc);
2167
2168        spin_unlock_irqrestore(ap->lock, flags);
2169
2170        /* ignore built-in hardresets if SCR access is not available */
2171        if ((hardreset == sata_std_hardreset ||
2172             hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2173                hardreset = NULL;
2174
2175        ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2176                  ap->ops->postreset);
2177}
2178EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2179
2180/**
2181 *      ata_sff_std_ports - initialize ioaddr with standard port offsets.
2182 *      @ioaddr: IO address structure to be initialized
2183 *
2184 *      Utility function which initializes data_addr, error_addr,
2185 *      feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2186 *      device_addr, status_addr, and command_addr to standard offsets
2187 *      relative to cmd_addr.
2188 *
2189 *      Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2190 */
2191void ata_sff_std_ports(struct ata_ioports *ioaddr)
2192{
2193        ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2194        ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2195        ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2196        ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2197        ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2198        ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2199        ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2200        ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2201        ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2202        ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2203}
2204EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2205
2206#ifdef CONFIG_PCI
2207
2208static int ata_resources_present(struct pci_dev *pdev, int port)
2209{
2210        int i;
2211
2212        /* Check the PCI resources for this channel are enabled */
2213        port = port * 2;
2214        for (i = 0; i < 2; i++) {
2215                if (pci_resource_start(pdev, port + i) == 0 ||
2216                    pci_resource_len(pdev, port + i) == 0)
2217                        return 0;
2218        }
2219        return 1;
2220}
2221
2222/**
2223 *      ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2224 *      @host: target ATA host
2225 *
2226 *      Acquire native PCI ATA resources for @host and initialize the
2227 *      first two ports of @host accordingly.  Ports marked dummy are
2228 *      skipped and allocation failure makes the port dummy.
2229 *
2230 *      Note that native PCI resources are valid even for legacy hosts
2231 *      as we fix up pdev resources array early in boot, so this
2232 *      function can be used for both native and legacy SFF hosts.
2233 *
2234 *      LOCKING:
2235 *      Inherited from calling layer (may sleep).
2236 *
2237 *      RETURNS:
2238 *      0 if at least one port is initialized, -ENODEV if no port is
2239 *      available.
2240 */
2241int ata_pci_sff_init_host(struct ata_host *host)
2242{
2243        struct device *gdev = host->dev;
2244        struct pci_dev *pdev = to_pci_dev(gdev);
2245        unsigned int mask = 0;
2246        int i, rc;
2247
2248        /* request, iomap BARs and init port addresses accordingly */
2249        for (i = 0; i < 2; i++) {
2250                struct ata_port *ap = host->ports[i];
2251                int base = i * 2;
2252                void __iomem * const *iomap;
2253
2254                if (ata_port_is_dummy(ap))
2255                        continue;
2256
2257                /* Discard disabled ports.  Some controllers show
2258                 * their unused channels this way.  Disabled ports are
2259                 * made dummy.
2260                 */
2261                if (!ata_resources_present(pdev, i)) {
2262                        ap->ops = &ata_dummy_port_ops;
2263                        continue;
2264                }
2265
2266                rc = pcim_iomap_regions(pdev, 0x3 << base,
2267                                        dev_driver_string(gdev));
2268                if (rc) {
2269                        dev_warn(gdev,
2270                                 "failed to request/iomap BARs for port %d (errno=%d)\n",
2271                                 i, rc);
2272                        if (rc == -EBUSY)
2273                                pcim_pin_device(pdev);
2274                        ap->ops = &ata_dummy_port_ops;
2275                        continue;
2276                }
2277                host->iomap = iomap = pcim_iomap_table(pdev);
2278
2279                ap->ioaddr.cmd_addr = iomap[base];
2280                ap->ioaddr.altstatus_addr =
2281                ap->ioaddr.ctl_addr = (void __iomem *)
2282                        ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2283                ata_sff_std_ports(&ap->ioaddr);
2284
2285                ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2286                        (unsigned long long)pci_resource_start(pdev, base),
2287                        (unsigned long long)pci_resource_start(pdev, base + 1));
2288
2289                mask |= 1 << i;
2290        }
2291
2292        if (!mask) {
2293                dev_err(gdev, "no available native port\n");
2294                return -ENODEV;
2295        }
2296
2297        return 0;
2298}
2299EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2300
2301/**
2302 *      ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
2303 *      @pdev: target PCI device
2304 *      @ppi: array of port_info, must be enough for two ports
2305 *      @r_host: out argument for the initialized ATA host
2306 *
2307 *      Helper to allocate PIO-only SFF ATA host for @pdev, acquire
2308 *      all PCI resources and initialize it accordingly in one go.
2309 *
2310 *      LOCKING:
2311 *      Inherited from calling layer (may sleep).
2312 *
2313 *      RETURNS:
2314 *      0 on success, -errno otherwise.
2315 */
2316int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2317                             const struct ata_port_info * const *ppi,
2318                             struct ata_host **r_host)
2319{
2320        struct ata_host *host;
2321        int rc;
2322
2323        if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2324                return -ENOMEM;
2325
2326        host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2327        if (!host) {
2328                dev_err(&pdev->dev, "failed to allocate ATA host\n");
2329                rc = -ENOMEM;
2330                goto err_out;
2331        }
2332
2333        rc = ata_pci_sff_init_host(host);
2334        if (rc)
2335                goto err_out;
2336
2337        devres_remove_group(&pdev->dev, NULL);
2338        *r_host = host;
2339        return 0;
2340
2341err_out:
2342        devres_release_group(&pdev->dev, NULL);
2343        return rc;
2344}
2345EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2346
2347/**
2348 *      ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2349 *      @host: target SFF ATA host
2350 *      @irq_handler: irq_handler used when requesting IRQ(s)
2351 *      @sht: scsi_host_template to use when registering the host
2352 *
2353 *      This is the counterpart of ata_host_activate() for SFF ATA
2354 *      hosts.  This separate helper is necessary because SFF hosts
2355 *      use two separate interrupts in legacy mode.
2356 *
2357 *      LOCKING:
2358 *      Inherited from calling layer (may sleep).
2359 *
2360 *      RETURNS:
2361 *      0 on success, -errno otherwise.
2362 */
2363int ata_pci_sff_activate_host(struct ata_host *host,
2364                              irq_handler_t irq_handler,
2365                              struct scsi_host_template *sht)
2366{
2367        struct device *dev = host->dev;
2368        struct pci_dev *pdev = to_pci_dev(dev);
2369        const char *drv_name = dev_driver_string(host->dev);
2370        int legacy_mode = 0, rc;
2371
2372        rc = ata_host_start(host);
2373        if (rc)
2374                return rc;
2375
2376        if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2377                u8 tmp8, mask = 0;
2378
2379                /*
2380                 * ATA spec says we should use legacy mode when one
2381                 * port is in legacy mode, but disabled ports on some
2382                 * PCI hosts appear as fixed legacy ports, e.g SB600/700
2383                 * on which the secondary port is not wired, so
2384                 * ignore ports that are marked as 'dummy' during
2385                 * this check
2386                 */
2387                pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2388                if (!ata_port_is_dummy(host->ports[0]))
2389                        mask |= (1 << 0);
2390                if (!ata_port_is_dummy(host->ports[1]))
2391                        mask |= (1 << 2);
2392                if ((tmp8 & mask) != mask)
2393                        legacy_mode = 1;
2394        }
2395
2396        if (!devres_open_group(dev, NULL, GFP_KERNEL))
2397                return -ENOMEM;
2398
2399        if (!legacy_mode && pdev->irq) {
2400                int i;
2401
2402                rc = devm_request_irq(dev, pdev->irq, irq_handler,
2403                                      IRQF_SHARED, drv_name, host);
2404                if (rc)
2405                        goto out;
2406
2407                for (i = 0; i < 2; i++) {
2408                        if (ata_port_is_dummy(host->ports[i]))
2409                                continue;
2410                        ata_port_desc(host->ports[i], "irq %d", pdev->irq);
2411                }
2412        } else if (legacy_mode) {
2413                if (!ata_port_is_dummy(host->ports[0])) {
2414                        rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2415                                              irq_handler, IRQF_SHARED,
2416                                              drv_name, host);
2417                        if (rc)
2418                                goto out;
2419
2420                        ata_port_desc(host->ports[0], "irq %d",
2421                                      ATA_PRIMARY_IRQ(pdev));
2422                }
2423
2424                if (!ata_port_is_dummy(host->ports[1])) {
2425                        rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2426                                              irq_handler, IRQF_SHARED,
2427                                              drv_name, host);
2428                        if (rc)
2429                                goto out;
2430
2431                        ata_port_desc(host->ports[1], "irq %d",
2432                                      ATA_SECONDARY_IRQ(pdev));
2433                }
2434        }
2435
2436        rc = ata_host_register(host, sht);
2437out:
2438        if (rc == 0)
2439                devres_remove_group(dev, NULL);
2440        else
2441                devres_release_group(dev, NULL);
2442
2443        return rc;
2444}
2445EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2446
2447static const struct ata_port_info *ata_sff_find_valid_pi(
2448                                        const struct ata_port_info * const *ppi)
2449{
2450        int i;
2451
2452        /* look up the first valid port_info */
2453        for (i = 0; i < 2 && ppi[i]; i++)
2454                if (ppi[i]->port_ops != &ata_dummy_port_ops)
2455                        return ppi[i];
2456
2457        return NULL;
2458}
2459
2460static int ata_pci_init_one(struct pci_dev *pdev,
2461                const struct ata_port_info * const *ppi,
2462                struct scsi_host_template *sht, void *host_priv,
2463                int hflags, bool bmdma)
2464{
2465        struct device *dev = &pdev->dev;
2466        const struct ata_port_info *pi;
2467        struct ata_host *host = NULL;
2468        int rc;
2469
2470        DPRINTK("ENTER\n");
2471
2472        pi = ata_sff_find_valid_pi(ppi);
2473        if (!pi) {
2474                dev_err(&pdev->dev, "no valid port_info specified\n");
2475                return -EINVAL;
2476        }
2477
2478        if (!devres_open_group(dev, NULL, GFP_KERNEL))
2479                return -ENOMEM;
2480
2481        rc = pcim_enable_device(pdev);
2482        if (rc)
2483                goto out;
2484
2485#ifdef CONFIG_ATA_BMDMA
2486        if (bmdma)
2487                /* prepare and activate BMDMA host */
2488                rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2489        else
2490#endif
2491                /* prepare and activate SFF host */
2492                rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2493        if (rc)
2494                goto out;
2495        host->private_data = host_priv;
2496        host->flags |= hflags;
2497
2498#ifdef CONFIG_ATA_BMDMA
2499        if (bmdma) {
2500                pci_set_master(pdev);
2501                rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2502        } else
2503#endif
2504                rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2505out:
2506        if (rc == 0)
2507                devres_remove_group(&pdev->dev, NULL);
2508        else
2509                devres_release_group(&pdev->dev, NULL);
2510
2511        return rc;
2512}
2513
2514/**
2515 *      ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
2516 *      @pdev: Controller to be initialized
2517 *      @ppi: array of port_info, must be enough for two ports
2518 *      @sht: scsi_host_template to use when registering the host
2519 *      @host_priv: host private_data
2520 *      @hflag: host flags
2521 *
2522 *      This is a helper function which can be called from a driver's
2523 *      xxx_init_one() probe function if the hardware uses traditional
2524 *      IDE taskfile registers and is PIO only.
2525 *
2526 *      ASSUMPTION:
2527 *      Nobody makes a single channel controller that appears solely as
2528 *      the secondary legacy port on PCI.
2529 *
2530 *      LOCKING:
2531 *      Inherited from PCI layer (may sleep).
2532 *
2533 *      RETURNS:
2534 *      Zero on success, negative on errno-based value on error.
2535 */
2536int ata_pci_sff_init_one(struct pci_dev *pdev,
2537                 const struct ata_port_info * const *ppi,
2538                 struct scsi_host_template *sht, void *host_priv, int hflag)
2539{
2540        return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
2541}
2542EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2543
2544#endif /* CONFIG_PCI */
2545
2546/*
2547 *      BMDMA support
2548 */
2549
2550#ifdef CONFIG_ATA_BMDMA
2551
2552const struct ata_port_operations ata_bmdma_port_ops = {
2553        .inherits               = &ata_sff_port_ops,
2554
2555        .error_handler          = ata_bmdma_error_handler,
2556        .post_internal_cmd      = ata_bmdma_post_internal_cmd,
2557
2558        .qc_prep                = ata_bmdma_qc_prep,
2559        .qc_issue               = ata_bmdma_qc_issue,
2560
2561        .sff_irq_clear          = ata_bmdma_irq_clear,
2562        .bmdma_setup            = ata_bmdma_setup,
2563        .bmdma_start            = ata_bmdma_start,
2564        .bmdma_stop             = ata_bmdma_stop,
2565        .bmdma_status           = ata_bmdma_status,
2566
2567        .port_start             = ata_bmdma_port_start,
2568};
2569EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2570
2571const struct ata_port_operations ata_bmdma32_port_ops = {
2572        .inherits               = &ata_bmdma_port_ops,
2573
2574        .sff_data_xfer          = ata_sff_data_xfer32,
2575        .port_start             = ata_bmdma_port_start32,
2576};
2577EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2578
2579/**
2580 *      ata_bmdma_fill_sg - Fill PCI IDE PRD table
2581 *      @qc: Metadata associated with taskfile to be transferred
2582 *
2583 *      Fill PCI IDE PRD (scatter-gather) table with segments
2584 *      associated with the current disk command.
2585 *
2586 *      LOCKING:
2587 *      spin_lock_irqsave(host lock)
2588 *
2589 */
2590static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2591{
2592        struct ata_port *ap = qc->ap;
2593        struct ata_bmdma_prd *prd = ap->bmdma_prd;
2594        struct scatterlist *sg;
2595        unsigned int si, pi;
2596
2597        pi = 0;
2598        for_each_sg(qc->sg, sg, qc->n_elem, si) {
2599                u32 addr, offset;
2600                u32 sg_len, len;
2601
2602                /* determine if physical DMA addr spans 64K boundary.
2603                 * Note h/w doesn't support 64-bit, so we unconditionally
2604                 * truncate dma_addr_t to u32.
2605                 */
2606                addr = (u32) sg_dma_address(sg);
2607                sg_len = sg_dma_len(sg);
2608
2609                while (sg_len) {
2610                        offset = addr & 0xffff;
2611                        len = sg_len;
2612                        if ((offset + sg_len) > 0x10000)
2613                                len = 0x10000 - offset;
2614
2615                        prd[pi].addr = cpu_to_le32(addr);
2616                        prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2617                        VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2618
2619                        pi++;
2620                        sg_len -= len;
2621                        addr += len;
2622                }
2623        }
2624
2625        prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2626}
2627
2628/**
2629 *      ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2630 *      @qc: Metadata associated with taskfile to be transferred
2631 *
2632 *      Fill PCI IDE PRD (scatter-gather) table with segments
2633 *      associated with the current disk command. Perform the fill
2634 *      so that we avoid writing any length 64K records for
2635 *      controllers that don't follow the spec.
2636 *
2637 *      LOCKING:
2638 *      spin_lock_irqsave(host lock)
2639 *
2640 */
2641static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2642{
2643        struct ata_port *ap = qc->ap;
2644        struct ata_bmdma_prd *prd = ap->bmdma_prd;
2645        struct scatterlist *sg;
2646        unsigned int si, pi;
2647
2648        pi = 0;
2649        for_each_sg(qc->sg, sg, qc->n_elem, si) {
2650                u32 addr, offset;
2651                u32 sg_len, len, blen;
2652
2653                /* determine if physical DMA addr spans 64K boundary.
2654                 * Note h/w doesn't support 64-bit, so we unconditionally
2655                 * truncate dma_addr_t to u32.
2656                 */
2657                addr = (u32) sg_dma_address(sg);
2658                sg_len = sg_dma_len(sg);
2659
2660                while (sg_len) {
2661                        offset = addr & 0xffff;
2662                        len = sg_len;
2663                        if ((offset + sg_len) > 0x10000)
2664                                len = 0x10000 - offset;
2665
2666                        blen = len & 0xffff;
2667                        prd[pi].addr = cpu_to_le32(addr);
2668                        if (blen == 0) {
2669                                /* Some PATA chipsets like the CS5530 can't
2670                                   cope with 0x0000 meaning 64K as the spec
2671                                   says */
2672                                prd[pi].flags_len = cpu_to_le32(0x8000);
2673                                blen = 0x8000;
2674                                prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2675                        }
2676                        prd[pi].flags_len = cpu_to_le32(blen);
2677                        VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2678
2679                        pi++;
2680                        sg_len -= len;
2681                        addr += len;
2682                }
2683        }
2684
2685        prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2686}
2687
2688/**
2689 *      ata_bmdma_qc_prep - Prepare taskfile for submission
2690 *      @qc: Metadata associated with taskfile to be prepared
2691 *
2692 *      Prepare ATA taskfile for submission.
2693 *
2694 *      LOCKING:
2695 *      spin_lock_irqsave(host lock)
2696 */
2697enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2698{
2699        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2700                return AC_ERR_OK;
2701
2702        ata_bmdma_fill_sg(qc);
2703
2704        return AC_ERR_OK;
2705}
2706EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2707
2708/**
2709 *      ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2710 *      @qc: Metadata associated with taskfile to be prepared
2711 *
2712 *      Prepare ATA taskfile for submission.
2713 *
2714 *      LOCKING:
2715 *      spin_lock_irqsave(host lock)
2716 */
2717enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2718{
2719        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2720                return AC_ERR_OK;
2721
2722        ata_bmdma_fill_sg_dumb(qc);
2723
2724        return AC_ERR_OK;
2725}
2726EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2727
2728/**
2729 *      ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
2730 *      @qc: command to issue to device
2731 *
2732 *      This function issues a PIO, NODATA or DMA command to a
2733 *      SFF/BMDMA controller.  PIO and NODATA are handled by
2734 *      ata_sff_qc_issue().
2735 *
2736 *      LOCKING:
2737 *      spin_lock_irqsave(host lock)
2738 *
2739 *      RETURNS:
2740 *      Zero on success, AC_ERR_* mask on failure
2741 */
2742unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2743{
2744        struct ata_port *ap = qc->ap;
2745        struct ata_link *link = qc->dev->link;
2746
2747        /* defer PIO handling to sff_qc_issue */
2748        if (!ata_is_dma(qc->tf.protocol))
2749                return ata_sff_qc_issue(qc);
2750
2751        /* select the device */
2752        ata_dev_select(ap, qc->dev->devno, 1, 0);
2753
2754        /* start the command */
2755        switch (qc->tf.protocol) {
2756        case ATA_PROT_DMA:
2757                WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2758
2759                ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
2760                ap->ops->bmdma_setup(qc);           /* set up bmdma */
2761                ap->ops->bmdma_start(qc);           /* initiate bmdma */
2762                ap->hsm_task_state = HSM_ST_LAST;
2763                break;
2764
2765        case ATAPI_PROT_DMA:
2766                WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2767
2768                ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
2769                ap->ops->bmdma_setup(qc);           /* set up bmdma */
2770                ap->hsm_task_state = HSM_ST_FIRST;
2771
2772                /* send cdb by polling if no cdb interrupt */
2773                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2774                        ata_sff_queue_pio_task(link, 0);
2775                break;
2776
2777        default:
2778                WARN_ON(1);
2779                return AC_ERR_SYSTEM;
2780        }
2781
2782        return 0;
2783}
2784EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2785
2786/**
2787 *      ata_bmdma_port_intr - Handle BMDMA port interrupt
2788 *      @ap: Port on which interrupt arrived (possibly...)
2789 *      @qc: Taskfile currently active in engine
2790 *
2791 *      Handle port interrupt for given queued command.
2792 *
2793 *      LOCKING:
2794 *      spin_lock_irqsave(host lock)
2795 *
2796 *      RETURNS:
2797 *      One if interrupt was handled, zero if not (shared irq).
2798 */
2799unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2800{
2801        struct ata_eh_info *ehi = &ap->link.eh_info;
2802        u8 host_stat = 0;
2803        bool bmdma_stopped = false;
2804        unsigned int handled;
2805
2806        if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2807                /* check status of DMA engine */
2808                host_stat = ap->ops->bmdma_status(ap);
2809                VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
2810
2811                /* if it's not our irq... */
2812                if (!(host_stat & ATA_DMA_INTR))
2813                        return ata_sff_idle_irq(ap);
2814
2815                /* before we do anything else, clear DMA-Start bit */
2816                ap->ops->bmdma_stop(qc);
2817                bmdma_stopped = true;
2818
2819                if (unlikely(host_stat & ATA_DMA_ERR)) {
2820                        /* error when transferring data to/from memory */
2821                        qc->err_mask |= AC_ERR_HOST_BUS;
2822                        ap->hsm_task_state = HSM_ST_ERR;
2823                }
2824        }
2825
2826        handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2827
2828        if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2829                ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2830
2831        return handled;
2832}
2833EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2834
2835/**
2836 *      ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
2837 *      @irq: irq line (unused)
2838 *      @dev_instance: pointer to our ata_host information structure
2839 *
2840 *      Default interrupt handler for PCI IDE devices.  Calls
2841 *      ata_bmdma_port_intr() for each port that is not disabled.
2842 *
2843 *      LOCKING:
2844 *      Obtains host lock during operation.
2845 *
2846 *      RETURNS:
2847 *      IRQ_NONE or IRQ_HANDLED.
2848 */
2849irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2850{
2851        return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2852}
2853EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2854
2855/**
2856 *      ata_bmdma_error_handler - Stock error handler for BMDMA controller
2857 *      @ap: port to handle error for
2858 *
2859 *      Stock error handler for BMDMA controller.  It can handle both
2860 *      PATA and SATA controllers.  Most BMDMA controllers should be
2861 *      able to use this EH as-is or with some added handling before
2862 *      and after.
2863 *
2864 *      LOCKING:
2865 *      Kernel thread context (may sleep)
2866 */
2867void ata_bmdma_error_handler(struct ata_port *ap)
2868{
2869        struct ata_queued_cmd *qc;
2870        unsigned long flags;
2871        bool thaw = false;
2872
2873        qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2874        if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2875                qc = NULL;
2876
2877        /* reset PIO HSM and stop DMA engine */
2878        spin_lock_irqsave(ap->lock, flags);
2879
2880        if (qc && ata_is_dma(qc->tf.protocol)) {
2881                u8 host_stat;
2882
2883                host_stat = ap->ops->bmdma_status(ap);
2884
2885                /* BMDMA controllers indicate host bus error by
2886                 * setting DMA_ERR bit and timing out.  As it wasn't
2887                 * really a timeout event, adjust error mask and
2888                 * cancel frozen state.
2889                 */
2890                if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2891                        qc->err_mask = AC_ERR_HOST_BUS;
2892                        thaw = true;
2893                }
2894
2895                ap->ops->bmdma_stop(qc);
2896
2897                /* if we're gonna thaw, make sure IRQ is clear */
2898                if (thaw) {
2899                        ap->ops->sff_check_status(ap);
2900                        if (ap->ops->sff_irq_clear)
2901                                ap->ops->sff_irq_clear(ap);
2902                }
2903        }
2904
2905        spin_unlock_irqrestore(ap->lock, flags);
2906
2907        if (thaw)
2908                ata_eh_thaw_port(ap);
2909
2910        ata_sff_error_handler(ap);
2911}
2912EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2913
2914/**
2915 *      ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
2916 *      @qc: internal command to clean up
2917 *
2918 *      LOCKING:
2919 *      Kernel thread context (may sleep)
2920 */
2921void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2922{
2923        struct ata_port *ap = qc->ap;
2924        unsigned long flags;
2925
2926        if (ata_is_dma(qc->tf.protocol)) {
2927                spin_lock_irqsave(ap->lock, flags);
2928                ap->ops->bmdma_stop(qc);
2929                spin_unlock_irqrestore(ap->lock, flags);
2930        }
2931}
2932EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2933
2934/**
2935 *      ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
2936 *      @ap: Port associated with this ATA transaction.
2937 *
2938 *      Clear interrupt and error flags in DMA status register.
2939 *
2940 *      May be used as the irq_clear() entry in ata_port_operations.
2941 *
2942 *      LOCKING:
2943 *      spin_lock_irqsave(host lock)
2944 */
2945void ata_bmdma_irq_clear(struct ata_port *ap)
2946{
2947        void __iomem *mmio = ap->ioaddr.bmdma_addr;
2948
2949        if (!mmio)
2950                return;
2951
2952        iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
2953}
2954EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
2955
2956/**
2957 *      ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2958 *      @qc: Info associated with this ATA transaction.
2959 *
2960 *      LOCKING:
2961 *      spin_lock_irqsave(host lock)
2962 */
2963void ata_bmdma_setup(struct ata_queued_cmd *qc)
2964{
2965        struct ata_port *ap = qc->ap;
2966        unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2967        u8 dmactl;
2968
2969        /* load PRD table addr. */
2970        mb();   /* make sure PRD table writes are visible to controller */
2971        iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2972
2973        /* specify data direction, triple-check start bit is clear */
2974        dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2975        dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2976        if (!rw)
2977                dmactl |= ATA_DMA_WR;
2978        iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2979
2980        /* issue r/w command */
2981        ap->ops->sff_exec_command(ap, &qc->tf);
2982}
2983EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2984
2985/**
2986 *      ata_bmdma_start - Start a PCI IDE BMDMA transaction
2987 *      @qc: Info associated with this ATA transaction.
2988 *
2989 *      LOCKING:
2990 *      spin_lock_irqsave(host lock)
2991 */
2992void ata_bmdma_start(struct ata_queued_cmd *qc)
2993{
2994        struct ata_port *ap = qc->ap;
2995        u8 dmactl;
2996
2997        /* start host DMA transaction */
2998        dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2999        iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3000
3001        /* Strictly, one may wish to issue an ioread8() here, to
3002         * flush the mmio write.  However, control also passes
3003         * to the hardware at this point, and it will interrupt
3004         * us when we are to resume control.  So, in effect,
3005         * we don't care when the mmio write flushes.
3006         * Further, a read of the DMA status register _immediately_
3007         * following the write may not be what certain flaky hardware
3008         * is expected, so I think it is best to not add a readb()
3009         * without first all the MMIO ATA cards/mobos.
3010         * Or maybe I'm just being paranoid.
3011         *
3012         * FIXME: The posting of this write means I/O starts are
3013         * unnecessarily delayed for MMIO
3014         */
3015}
3016EXPORT_SYMBOL_GPL(ata_bmdma_start);
3017
3018/**
3019 *      ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3020 *      @qc: Command we are ending DMA for
3021 *
3022 *      Clears the ATA_DMA_START flag in the dma control register
3023 *
3024 *      May be used as the bmdma_stop() entry in ata_port_operations.
3025 *
3026 *      LOCKING:
3027 *      spin_lock_irqsave(host lock)
3028 */
3029void ata_bmdma_stop(struct ata_queued_cmd *qc)
3030{
3031        struct ata_port *ap = qc->ap;
3032        void __iomem *mmio = ap->ioaddr.bmdma_addr;
3033
3034        /* clear start/stop bit */
3035        iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3036                 mmio + ATA_DMA_CMD);
3037
3038        /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3039        ata_sff_dma_pause(ap);
3040}
3041EXPORT_SYMBOL_GPL(ata_bmdma_stop);
3042
3043/**
3044 *      ata_bmdma_status - Read PCI IDE BMDMA status
3045 *      @ap: Port associated with this ATA transaction.
3046 *
3047 *      Read and return BMDMA status register.
3048 *
3049 *      May be used as the bmdma_status() entry in ata_port_operations.
3050 *
3051 *      LOCKING:
3052 *      spin_lock_irqsave(host lock)
3053 */
3054u8 ata_bmdma_status(struct ata_port *ap)
3055{
3056        return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3057}
3058EXPORT_SYMBOL_GPL(ata_bmdma_status);
3059
3060
3061/**
3062 *      ata_bmdma_port_start - Set port up for bmdma.
3063 *      @ap: Port to initialize
3064 *
3065 *      Called just after data structures for each port are
3066 *      initialized.  Allocates space for PRD table.
3067 *
3068 *      May be used as the port_start() entry in ata_port_operations.
3069 *
3070 *      LOCKING:
3071 *      Inherited from caller.
3072 */
3073int ata_bmdma_port_start(struct ata_port *ap)
3074{
3075        if (ap->mwdma_mask || ap->udma_mask) {
3076                ap->bmdma_prd =
3077                        dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
3078                                            &ap->bmdma_prd_dma, GFP_KERNEL);
3079                if (!ap->bmdma_prd)
3080                        return -ENOMEM;
3081        }
3082
3083        return 0;
3084}
3085EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
3086
3087/**
3088 *      ata_bmdma_port_start32 - Set port up for dma.
3089 *      @ap: Port to initialize
3090 *
3091 *      Called just after data structures for each port are
3092 *      initialized.  Enables 32bit PIO and allocates space for PRD
3093 *      table.
3094 *
3095 *      May be used as the port_start() entry in ata_port_operations for
3096 *      devices that are capable of 32bit PIO.
3097 *
3098 *      LOCKING:
3099 *      Inherited from caller.
3100 */
3101int ata_bmdma_port_start32(struct ata_port *ap)
3102{
3103        ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3104        return ata_bmdma_port_start(ap);
3105}
3106EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3107
3108#ifdef CONFIG_PCI
3109
3110/**
3111 *      ata_pci_bmdma_clear_simplex -   attempt to kick device out of simplex
3112 *      @pdev: PCI device
3113 *
3114 *      Some PCI ATA devices report simplex mode but in fact can be told to
3115 *      enter non simplex mode. This implements the necessary logic to
3116 *      perform the task on such devices. Calling it on other devices will
3117 *      have -undefined- behaviour.
3118 */
3119int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3120{
3121        unsigned long bmdma = pci_resource_start(pdev, 4);
3122        u8 simplex;
3123
3124        if (bmdma == 0)
3125                return -ENOENT;
3126
3127        simplex = inb(bmdma + 0x02);
3128        outb(simplex & 0x60, bmdma + 0x02);
3129        simplex = inb(bmdma + 0x02);
3130        if (simplex & 0x80)
3131                return -EOPNOTSUPP;
3132        return 0;
3133}
3134EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3135
3136static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3137{
3138        int i;
3139
3140        dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
3141
3142        for (i = 0; i < 2; i++) {
3143                host->ports[i]->mwdma_mask = 0;
3144                host->ports[i]->udma_mask = 0;
3145        }
3146}
3147
3148/**
3149 *      ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
3150 *      @host: target ATA host
3151 *
3152 *      Acquire PCI BMDMA resources and initialize @host accordingly.
3153 *
3154 *      LOCKING:
3155 *      Inherited from calling layer (may sleep).
3156 */
3157void ata_pci_bmdma_init(struct ata_host *host)
3158{
3159        struct device *gdev = host->dev;
3160        struct pci_dev *pdev = to_pci_dev(gdev);
3161        int i, rc;
3162
3163        /* No BAR4 allocation: No DMA */
3164        if (pci_resource_start(pdev, 4) == 0) {
3165                ata_bmdma_nodma(host, "BAR4 is zero");
3166                return;
3167        }
3168
3169        /*
3170         * Some controllers require BMDMA region to be initialized
3171         * even if DMA is not in use to clear IRQ status via
3172         * ->sff_irq_clear method.  Try to initialize bmdma_addr
3173         * regardless of dma masks.
3174         */
3175        rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
3176        if (rc)
3177                ata_bmdma_nodma(host, "failed to set dma mask");
3178
3179        /* request and iomap DMA region */
3180        rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3181        if (rc) {
3182                ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3183                return;
3184        }
3185        host->iomap = pcim_iomap_table(pdev);
3186
3187        for (i = 0; i < 2; i++) {
3188                struct ata_port *ap = host->ports[i];
3189                void __iomem *bmdma = host->iomap[4] + 8 * i;
3190
3191                if (ata_port_is_dummy(ap))
3192                        continue;
3193
3194                ap->ioaddr.bmdma_addr = bmdma;
3195                if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3196                    (ioread8(bmdma + 2) & 0x80))
3197                        host->flags |= ATA_HOST_SIMPLEX;
3198
3199                ata_port_desc(ap, "bmdma 0x%llx",
3200                    (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3201        }
3202}
3203EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3204
3205/**
3206 *      ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
3207 *      @pdev: target PCI device
3208 *      @ppi: array of port_info, must be enough for two ports
3209 *      @r_host: out argument for the initialized ATA host
3210 *
3211 *      Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
3212 *      resources and initialize it accordingly in one go.
3213 *
3214 *      LOCKING:
3215 *      Inherited from calling layer (may sleep).
3216 *
3217 *      RETURNS:
3218 *      0 on success, -errno otherwise.
3219 */
3220int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3221                               const struct ata_port_info * const * ppi,
3222                               struct ata_host **r_host)
3223{
3224        int rc;
3225
3226        rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3227        if (rc)
3228                return rc;
3229
3230        ata_pci_bmdma_init(*r_host);
3231        return 0;
3232}
3233EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3234
3235/**
3236 *      ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
3237 *      @pdev: Controller to be initialized
3238 *      @ppi: array of port_info, must be enough for two ports
3239 *      @sht: scsi_host_template to use when registering the host
3240 *      @host_priv: host private_data
3241 *      @hflags: host flags
3242 *
3243 *      This function is similar to ata_pci_sff_init_one() but also
3244 *      takes care of BMDMA initialization.
3245 *
3246 *      LOCKING:
3247 *      Inherited from PCI layer (may sleep).
3248 *
3249 *      RETURNS:
3250 *      Zero on success, negative on errno-based value on error.
3251 */
3252int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3253                           const struct ata_port_info * const * ppi,
3254                           struct scsi_host_template *sht, void *host_priv,
3255                           int hflags)
3256{
3257        return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
3258}
3259EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3260
3261#endif /* CONFIG_PCI */
3262#endif /* CONFIG_ATA_BMDMA */
3263
3264/**
3265 *      ata_sff_port_init - Initialize SFF/BMDMA ATA port
3266 *      @ap: Port to initialize
3267 *
3268 *      Called on port allocation to initialize SFF/BMDMA specific
3269 *      fields.
3270 *
3271 *      LOCKING:
3272 *      None.
3273 */
3274void ata_sff_port_init(struct ata_port *ap)
3275{
3276        INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3277        ap->ctl = ATA_DEVCTL_OBS;
3278        ap->last_ctl = 0xFF;
3279}
3280
3281int __init ata_sff_init(void)
3282{
3283        ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3284        if (!ata_sff_wq)
3285                return -ENOMEM;
3286
3287        return 0;
3288}
3289
3290void ata_sff_exit(void)
3291{
3292        destroy_workqueue(ata_sff_wq);
3293}
3294