linux/drivers/ata/libata-eh.c
<<
>>
Prefs
   1/*
   2 *  libata-eh.c - libata error handling
   3 *
   4 *  Maintained by:  Tejun Heo <tj@kernel.org>
   5 *                  Please ALWAYS copy linux-ide@vger.kernel.org
   6 *                  on emails.
   7 *
   8 *  Copyright 2006 Tejun Heo <htejun@gmail.com>
   9 *
  10 *
  11 *  This program is free software; you can redistribute it and/or
  12 *  modify it under the terms of the GNU General Public License as
  13 *  published by the Free Software Foundation; either version 2, or
  14 *  (at your option) any later version.
  15 *
  16 *  This program is distributed in the hope that it will be useful,
  17 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 *  General Public License for more details.
  20 *
  21 *  You should have received a copy of the GNU General Public License
  22 *  along with this program; see the file COPYING.  If not, write to
  23 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  24 *  USA.
  25 *
  26 *
  27 *  libata documentation is available via 'make {ps|pdf}docs',
  28 *  as Documentation/DocBook/libata.*
  29 *
  30 *  Hardware documentation available from http://www.t13.org/ and
  31 *  http://www.sata-io.org/
  32 *
  33 */
  34
  35#include <linux/kernel.h>
  36#include <linux/blkdev.h>
  37#include <linux/export.h>
  38#include <linux/pci.h>
  39#include <scsi/scsi.h>
  40#include <scsi/scsi_host.h>
  41#include <scsi/scsi_eh.h>
  42#include <scsi/scsi_device.h>
  43#include <scsi/scsi_cmnd.h>
  44#include <scsi/scsi_dbg.h>
  45#include "../scsi/scsi_transport_api.h"
  46
  47#include <linux/libata.h>
  48
  49#include "libata.h"
  50
  51enum {
  52        /* speed down verdicts */
  53        ATA_EH_SPDN_NCQ_OFF             = (1 << 0),
  54        ATA_EH_SPDN_SPEED_DOWN          = (1 << 1),
  55        ATA_EH_SPDN_FALLBACK_TO_PIO     = (1 << 2),
  56        ATA_EH_SPDN_KEEP_ERRORS         = (1 << 3),
  57
  58        /* error flags */
  59        ATA_EFLAG_IS_IO                 = (1 << 0),
  60        ATA_EFLAG_DUBIOUS_XFER          = (1 << 1),
  61        ATA_EFLAG_OLD_ER                = (1 << 31),
  62
  63        /* error categories */
  64        ATA_ECAT_NONE                   = 0,
  65        ATA_ECAT_ATA_BUS                = 1,
  66        ATA_ECAT_TOUT_HSM               = 2,
  67        ATA_ECAT_UNK_DEV                = 3,
  68        ATA_ECAT_DUBIOUS_NONE           = 4,
  69        ATA_ECAT_DUBIOUS_ATA_BUS        = 5,
  70        ATA_ECAT_DUBIOUS_TOUT_HSM       = 6,
  71        ATA_ECAT_DUBIOUS_UNK_DEV        = 7,
  72        ATA_ECAT_NR                     = 8,
  73
  74        ATA_EH_CMD_DFL_TIMEOUT          =  5000,
  75
  76        /* always put at least this amount of time between resets */
  77        ATA_EH_RESET_COOL_DOWN          =  5000,
  78
  79        /* Waiting in ->prereset can never be reliable.  It's
  80         * sometimes nice to wait there but it can't be depended upon;
  81         * otherwise, we wouldn't be resetting.  Just give it enough
  82         * time for most drives to spin up.
  83         */
  84        ATA_EH_PRERESET_TIMEOUT         = 10000,
  85        ATA_EH_FASTDRAIN_INTERVAL       =  3000,
  86
  87        ATA_EH_UA_TRIES                 = 5,
  88
  89        /* probe speed down parameters, see ata_eh_schedule_probe() */
  90        ATA_EH_PROBE_TRIAL_INTERVAL     = 60000,        /* 1 min */
  91        ATA_EH_PROBE_TRIALS             = 2,
  92};
  93
  94/* The following table determines how we sequence resets.  Each entry
  95 * represents timeout for that try.  The first try can be soft or
  96 * hardreset.  All others are hardreset if available.  In most cases
  97 * the first reset w/ 10sec timeout should succeed.  Following entries
  98 * are mostly for error handling, hotplug and retarded devices.
  99 */
 100static const unsigned long ata_eh_reset_timeouts[] = {
 101        10000,  /* most drives spin up by 10sec */
 102        10000,  /* > 99% working drives spin up before 20sec */
 103        35000,  /* give > 30 secs of idleness for retarded devices */
 104         5000,  /* and sweet one last chance */
 105        ULONG_MAX, /* > 1 min has elapsed, give up */
 106};
 107
 108static const unsigned long ata_eh_identify_timeouts[] = {
 109         5000,  /* covers > 99% of successes and not too boring on failures */
 110        10000,  /* combined time till here is enough even for media access */
 111        30000,  /* for true idiots */
 112        ULONG_MAX,
 113};
 114
 115static const unsigned long ata_eh_flush_timeouts[] = {
 116        15000,  /* be generous with flush */
 117        15000,  /* ditto */
 118        30000,  /* and even more generous */
 119        ULONG_MAX,
 120};
 121
 122static const unsigned long ata_eh_other_timeouts[] = {
 123         5000,  /* same rationale as identify timeout */
 124        10000,  /* ditto */
 125        /* but no merciful 30sec for other commands, it just isn't worth it */
 126        ULONG_MAX,
 127};
 128
 129struct ata_eh_cmd_timeout_ent {
 130        const u8                *commands;
 131        const unsigned long     *timeouts;
 132};
 133
 134/* The following table determines timeouts to use for EH internal
 135 * commands.  Each table entry is a command class and matches the
 136 * commands the entry applies to and the timeout table to use.
 137 *
 138 * On the retry after a command timed out, the next timeout value from
 139 * the table is used.  If the table doesn't contain further entries,
 140 * the last value is used.
 141 *
 142 * ehc->cmd_timeout_idx keeps track of which timeout to use per
 143 * command class, so if SET_FEATURES times out on the first try, the
 144 * next try will use the second timeout value only for that class.
 145 */
 146#define CMDS(cmds...)   (const u8 []){ cmds, 0 }
 147static const struct ata_eh_cmd_timeout_ent
 148ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
 149        { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
 150          .timeouts = ata_eh_identify_timeouts, },
 151        { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
 152          .timeouts = ata_eh_other_timeouts, },
 153        { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
 154          .timeouts = ata_eh_other_timeouts, },
 155        { .commands = CMDS(ATA_CMD_SET_FEATURES),
 156          .timeouts = ata_eh_other_timeouts, },
 157        { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
 158          .timeouts = ata_eh_other_timeouts, },
 159        { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
 160          .timeouts = ata_eh_flush_timeouts },
 161};
 162#undef CMDS
 163
 164static void __ata_port_freeze(struct ata_port *ap);
 165#ifdef CONFIG_PM
 166static void ata_eh_handle_port_suspend(struct ata_port *ap);
 167static void ata_eh_handle_port_resume(struct ata_port *ap);
 168#else /* CONFIG_PM */
 169static void ata_eh_handle_port_suspend(struct ata_port *ap)
 170{ }
 171
 172static void ata_eh_handle_port_resume(struct ata_port *ap)
 173{ }
 174#endif /* CONFIG_PM */
 175
 176static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
 177                                 va_list args)
 178{
 179        ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
 180                                     ATA_EH_DESC_LEN - ehi->desc_len,
 181                                     fmt, args);
 182}
 183
 184/**
 185 *      __ata_ehi_push_desc - push error description without adding separator
 186 *      @ehi: target EHI
 187 *      @fmt: printf format string
 188 *
 189 *      Format string according to @fmt and append it to @ehi->desc.
 190 *
 191 *      LOCKING:
 192 *      spin_lock_irqsave(host lock)
 193 */
 194void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
 195{
 196        va_list args;
 197
 198        va_start(args, fmt);
 199        __ata_ehi_pushv_desc(ehi, fmt, args);
 200        va_end(args);
 201}
 202
 203/**
 204 *      ata_ehi_push_desc - push error description with separator
 205 *      @ehi: target EHI
 206 *      @fmt: printf format string
 207 *
 208 *      Format string according to @fmt and append it to @ehi->desc.
 209 *      If @ehi->desc is not empty, ", " is added in-between.
 210 *
 211 *      LOCKING:
 212 *      spin_lock_irqsave(host lock)
 213 */
 214void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
 215{
 216        va_list args;
 217
 218        if (ehi->desc_len)
 219                __ata_ehi_push_desc(ehi, ", ");
 220
 221        va_start(args, fmt);
 222        __ata_ehi_pushv_desc(ehi, fmt, args);
 223        va_end(args);
 224}
 225
 226/**
 227 *      ata_ehi_clear_desc - clean error description
 228 *      @ehi: target EHI
 229 *
 230 *      Clear @ehi->desc.
 231 *
 232 *      LOCKING:
 233 *      spin_lock_irqsave(host lock)
 234 */
 235void ata_ehi_clear_desc(struct ata_eh_info *ehi)
 236{
 237        ehi->desc[0] = '\0';
 238        ehi->desc_len = 0;
 239}
 240
 241/**
 242 *      ata_port_desc - append port description
 243 *      @ap: target ATA port
 244 *      @fmt: printf format string
 245 *
 246 *      Format string according to @fmt and append it to port
 247 *      description.  If port description is not empty, " " is added
 248 *      in-between.  This function is to be used while initializing
 249 *      ata_host.  The description is printed on host registration.
 250 *
 251 *      LOCKING:
 252 *      None.
 253 */
 254void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
 255{
 256        va_list args;
 257
 258        WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
 259
 260        if (ap->link.eh_info.desc_len)
 261                __ata_ehi_push_desc(&ap->link.eh_info, " ");
 262
 263        va_start(args, fmt);
 264        __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
 265        va_end(args);
 266}
 267
 268#ifdef CONFIG_PCI
 269
 270/**
 271 *      ata_port_pbar_desc - append PCI BAR description
 272 *      @ap: target ATA port
 273 *      @bar: target PCI BAR
 274 *      @offset: offset into PCI BAR
 275 *      @name: name of the area
 276 *
 277 *      If @offset is negative, this function formats a string which
 278 *      contains the name, address, size and type of the BAR and
 279 *      appends it to the port description.  If @offset is zero or
 280 *      positive, only name and offsetted address is appended.
 281 *
 282 *      LOCKING:
 283 *      None.
 284 */
 285void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
 286                        const char *name)
 287{
 288        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 289        char *type = "";
 290        unsigned long long start, len;
 291
 292        if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
 293                type = "m";
 294        else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
 295                type = "i";
 296
 297        start = (unsigned long long)pci_resource_start(pdev, bar);
 298        len = (unsigned long long)pci_resource_len(pdev, bar);
 299
 300        if (offset < 0)
 301                ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
 302        else
 303                ata_port_desc(ap, "%s 0x%llx", name,
 304                                start + (unsigned long long)offset);
 305}
 306
 307#endif /* CONFIG_PCI */
 308
 309static int ata_lookup_timeout_table(u8 cmd)
 310{
 311        int i;
 312
 313        for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
 314                const u8 *cur;
 315
 316                for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
 317                        if (*cur == cmd)
 318                                return i;
 319        }
 320
 321        return -1;
 322}
 323
 324/**
 325 *      ata_internal_cmd_timeout - determine timeout for an internal command
 326 *      @dev: target device
 327 *      @cmd: internal command to be issued
 328 *
 329 *      Determine timeout for internal command @cmd for @dev.
 330 *
 331 *      LOCKING:
 332 *      EH context.
 333 *
 334 *      RETURNS:
 335 *      Determined timeout.
 336 */
 337unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
 338{
 339        struct ata_eh_context *ehc = &dev->link->eh_context;
 340        int ent = ata_lookup_timeout_table(cmd);
 341        int idx;
 342
 343        if (ent < 0)
 344                return ATA_EH_CMD_DFL_TIMEOUT;
 345
 346        idx = ehc->cmd_timeout_idx[dev->devno][ent];
 347        return ata_eh_cmd_timeout_table[ent].timeouts[idx];
 348}
 349
 350/**
 351 *      ata_internal_cmd_timed_out - notification for internal command timeout
 352 *      @dev: target device
 353 *      @cmd: internal command which timed out
 354 *
 355 *      Notify EH that internal command @cmd for @dev timed out.  This
 356 *      function should be called only for commands whose timeouts are
 357 *      determined using ata_internal_cmd_timeout().
 358 *
 359 *      LOCKING:
 360 *      EH context.
 361 */
 362void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
 363{
 364        struct ata_eh_context *ehc = &dev->link->eh_context;
 365        int ent = ata_lookup_timeout_table(cmd);
 366        int idx;
 367
 368        if (ent < 0)
 369                return;
 370
 371        idx = ehc->cmd_timeout_idx[dev->devno][ent];
 372        if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
 373                ehc->cmd_timeout_idx[dev->devno][ent]++;
 374}
 375
 376static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
 377                             unsigned int err_mask)
 378{
 379        struct ata_ering_entry *ent;
 380
 381        WARN_ON(!err_mask);
 382
 383        ering->cursor++;
 384        ering->cursor %= ATA_ERING_SIZE;
 385
 386        ent = &ering->ring[ering->cursor];
 387        ent->eflags = eflags;
 388        ent->err_mask = err_mask;
 389        ent->timestamp = get_jiffies_64();
 390}
 391
 392static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
 393{
 394        struct ata_ering_entry *ent = &ering->ring[ering->cursor];
 395
 396        if (ent->err_mask)
 397                return ent;
 398        return NULL;
 399}
 400
 401int ata_ering_map(struct ata_ering *ering,
 402                  int (*map_fn)(struct ata_ering_entry *, void *),
 403                  void *arg)
 404{
 405        int idx, rc = 0;
 406        struct ata_ering_entry *ent;
 407
 408        idx = ering->cursor;
 409        do {
 410                ent = &ering->ring[idx];
 411                if (!ent->err_mask)
 412                        break;
 413                rc = map_fn(ent, arg);
 414                if (rc)
 415                        break;
 416                idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
 417        } while (idx != ering->cursor);
 418
 419        return rc;
 420}
 421
 422static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
 423{
 424        ent->eflags |= ATA_EFLAG_OLD_ER;
 425        return 0;
 426}
 427
 428static void ata_ering_clear(struct ata_ering *ering)
 429{
 430        ata_ering_map(ering, ata_ering_clear_cb, NULL);
 431}
 432
 433static unsigned int ata_eh_dev_action(struct ata_device *dev)
 434{
 435        struct ata_eh_context *ehc = &dev->link->eh_context;
 436
 437        return ehc->i.action | ehc->i.dev_action[dev->devno];
 438}
 439
 440static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
 441                                struct ata_eh_info *ehi, unsigned int action)
 442{
 443        struct ata_device *tdev;
 444
 445        if (!dev) {
 446                ehi->action &= ~action;
 447                ata_for_each_dev(tdev, link, ALL)
 448                        ehi->dev_action[tdev->devno] &= ~action;
 449        } else {
 450                /* doesn't make sense for port-wide EH actions */
 451                WARN_ON(!(action & ATA_EH_PERDEV_MASK));
 452
 453                /* break ehi->action into ehi->dev_action */
 454                if (ehi->action & action) {
 455                        ata_for_each_dev(tdev, link, ALL)
 456                                ehi->dev_action[tdev->devno] |=
 457                                        ehi->action & action;
 458                        ehi->action &= ~action;
 459                }
 460
 461                /* turn off the specified per-dev action */
 462                ehi->dev_action[dev->devno] &= ~action;
 463        }
 464}
 465
 466/**
 467 *      ata_eh_acquire - acquire EH ownership
 468 *      @ap: ATA port to acquire EH ownership for
 469 *
 470 *      Acquire EH ownership for @ap.  This is the basic exclusion
 471 *      mechanism for ports sharing a host.  Only one port hanging off
 472 *      the same host can claim the ownership of EH.
 473 *
 474 *      LOCKING:
 475 *      EH context.
 476 */
 477void ata_eh_acquire(struct ata_port *ap)
 478{
 479        mutex_lock(&ap->host->eh_mutex);
 480        WARN_ON_ONCE(ap->host->eh_owner);
 481        ap->host->eh_owner = current;
 482}
 483
 484/**
 485 *      ata_eh_release - release EH ownership
 486 *      @ap: ATA port to release EH ownership for
 487 *
 488 *      Release EH ownership for @ap if the caller.  The caller must
 489 *      have acquired EH ownership using ata_eh_acquire() previously.
 490 *
 491 *      LOCKING:
 492 *      EH context.
 493 */
 494void ata_eh_release(struct ata_port *ap)
 495{
 496        WARN_ON_ONCE(ap->host->eh_owner != current);
 497        ap->host->eh_owner = NULL;
 498        mutex_unlock(&ap->host->eh_mutex);
 499}
 500
 501/**
 502 *      ata_scsi_timed_out - SCSI layer time out callback
 503 *      @cmd: timed out SCSI command
 504 *
 505 *      Handles SCSI layer timeout.  We race with normal completion of
 506 *      the qc for @cmd.  If the qc is already gone, we lose and let
 507 *      the scsi command finish (EH_HANDLED).  Otherwise, the qc has
 508 *      timed out and EH should be invoked.  Prevent ata_qc_complete()
 509 *      from finishing it by setting EH_SCHEDULED and return
 510 *      EH_NOT_HANDLED.
 511 *
 512 *      TODO: kill this function once old EH is gone.
 513 *
 514 *      LOCKING:
 515 *      Called from timer context
 516 *
 517 *      RETURNS:
 518 *      EH_HANDLED or EH_NOT_HANDLED
 519 */
 520enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
 521{
 522        struct Scsi_Host *host = cmd->device->host;
 523        struct ata_port *ap = ata_shost_to_port(host);
 524        unsigned long flags;
 525        struct ata_queued_cmd *qc;
 526        enum blk_eh_timer_return ret;
 527
 528        DPRINTK("ENTER\n");
 529
 530        if (ap->ops->error_handler) {
 531                ret = BLK_EH_NOT_HANDLED;
 532                goto out;
 533        }
 534
 535        ret = BLK_EH_HANDLED;
 536        spin_lock_irqsave(ap->lock, flags);
 537        qc = ata_qc_from_tag(ap, ap->link.active_tag);
 538        if (qc) {
 539                WARN_ON(qc->scsicmd != cmd);
 540                qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
 541                qc->err_mask |= AC_ERR_TIMEOUT;
 542                ret = BLK_EH_NOT_HANDLED;
 543        }
 544        spin_unlock_irqrestore(ap->lock, flags);
 545
 546 out:
 547        DPRINTK("EXIT, ret=%d\n", ret);
 548        return ret;
 549}
 550
 551static void ata_eh_unload(struct ata_port *ap)
 552{
 553        struct ata_link *link;
 554        struct ata_device *dev;
 555        unsigned long flags;
 556
 557        /* Restore SControl IPM and SPD for the next driver and
 558         * disable attached devices.
 559         */
 560        ata_for_each_link(link, ap, PMP_FIRST) {
 561                sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
 562                ata_for_each_dev(dev, link, ALL)
 563                        ata_dev_disable(dev);
 564        }
 565
 566        /* freeze and set UNLOADED */
 567        spin_lock_irqsave(ap->lock, flags);
 568
 569        ata_port_freeze(ap);                    /* won't be thawed */
 570        ap->pflags &= ~ATA_PFLAG_EH_PENDING;    /* clear pending from freeze */
 571        ap->pflags |= ATA_PFLAG_UNLOADED;
 572
 573        spin_unlock_irqrestore(ap->lock, flags);
 574}
 575
 576/**
 577 *      ata_scsi_error - SCSI layer error handler callback
 578 *      @host: SCSI host on which error occurred
 579 *
 580 *      Handles SCSI-layer-thrown error events.
 581 *
 582 *      LOCKING:
 583 *      Inherited from SCSI layer (none, can sleep)
 584 *
 585 *      RETURNS:
 586 *      Zero.
 587 */
 588void ata_scsi_error(struct Scsi_Host *host)
 589{
 590        struct ata_port *ap = ata_shost_to_port(host);
 591        unsigned long flags;
 592        LIST_HEAD(eh_work_q);
 593
 594        DPRINTK("ENTER\n");
 595
 596        spin_lock_irqsave(host->host_lock, flags);
 597        list_splice_init(&host->eh_cmd_q, &eh_work_q);
 598        spin_unlock_irqrestore(host->host_lock, flags);
 599
 600        ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
 601
 602        /* If we timed raced normal completion and there is nothing to
 603           recover nr_timedout == 0 why exactly are we doing error recovery ? */
 604        ata_scsi_port_error_handler(host, ap);
 605
 606        /* finish or retry handled scmd's and clean up */
 607        WARN_ON(host->host_failed || !list_empty(&eh_work_q));
 608
 609        DPRINTK("EXIT\n");
 610}
 611
 612/**
 613 * ata_scsi_cmd_error_handler - error callback for a list of commands
 614 * @host:       scsi host containing the port
 615 * @ap:         ATA port within the host
 616 * @eh_work_q:  list of commands to process
 617 *
 618 * process the given list of commands and return those finished to the
 619 * ap->eh_done_q.  This function is the first part of the libata error
 620 * handler which processes a given list of failed commands.
 621 */
 622void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
 623                                struct list_head *eh_work_q)
 624{
 625        int i;
 626        unsigned long flags;
 627
 628        /* make sure sff pio task is not running */
 629        ata_sff_flush_pio_task(ap);
 630
 631        /* synchronize with host lock and sort out timeouts */
 632
 633        /* For new EH, all qcs are finished in one of three ways -
 634         * normal completion, error completion, and SCSI timeout.
 635         * Both completions can race against SCSI timeout.  When normal
 636         * completion wins, the qc never reaches EH.  When error
 637         * completion wins, the qc has ATA_QCFLAG_FAILED set.
 638         *
 639         * When SCSI timeout wins, things are a bit more complex.
 640         * Normal or error completion can occur after the timeout but
 641         * before this point.  In such cases, both types of
 642         * completions are honored.  A scmd is determined to have
 643         * timed out iff its associated qc is active and not failed.
 644         */
 645        if (ap->ops->error_handler) {
 646                struct scsi_cmnd *scmd, *tmp;
 647                int nr_timedout = 0;
 648
 649                spin_lock_irqsave(ap->lock, flags);
 650
 651                /* This must occur under the ap->lock as we don't want
 652                   a polled recovery to race the real interrupt handler
 653
 654                   The lost_interrupt handler checks for any completed but
 655                   non-notified command and completes much like an IRQ handler.
 656
 657                   We then fall into the error recovery code which will treat
 658                   this as if normal completion won the race */
 659
 660                if (ap->ops->lost_interrupt)
 661                        ap->ops->lost_interrupt(ap);
 662
 663                list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
 664                        struct ata_queued_cmd *qc;
 665
 666                        for (i = 0; i < ATA_MAX_QUEUE; i++) {
 667                                qc = __ata_qc_from_tag(ap, i);
 668                                if (qc->flags & ATA_QCFLAG_ACTIVE &&
 669                                    qc->scsicmd == scmd)
 670                                        break;
 671                        }
 672
 673                        if (i < ATA_MAX_QUEUE) {
 674                                /* the scmd has an associated qc */
 675                                if (!(qc->flags & ATA_QCFLAG_FAILED)) {
 676                                        /* which hasn't failed yet, timeout */
 677                                        qc->err_mask |= AC_ERR_TIMEOUT;
 678                                        qc->flags |= ATA_QCFLAG_FAILED;
 679                                        nr_timedout++;
 680                                }
 681                        } else {
 682                                /* Normal completion occurred after
 683                                 * SCSI timeout but before this point.
 684                                 * Successfully complete it.
 685                                 */
 686                                scmd->retries = scmd->allowed;
 687                                scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
 688                        }
 689                }
 690
 691                /* If we have timed out qcs.  They belong to EH from
 692                 * this point but the state of the controller is
 693                 * unknown.  Freeze the port to make sure the IRQ
 694                 * handler doesn't diddle with those qcs.  This must
 695                 * be done atomically w.r.t. setting QCFLAG_FAILED.
 696                 */
 697                if (nr_timedout)
 698                        __ata_port_freeze(ap);
 699
 700                spin_unlock_irqrestore(ap->lock, flags);
 701
 702                /* initialize eh_tries */
 703                ap->eh_tries = ATA_EH_MAX_TRIES;
 704        } else
 705                spin_unlock_wait(ap->lock);
 706
 707}
 708EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
 709
 710/**
 711 * ata_scsi_port_error_handler - recover the port after the commands
 712 * @host:       SCSI host containing the port
 713 * @ap:         the ATA port
 714 *
 715 * Handle the recovery of the port @ap after all the commands
 716 * have been recovered.
 717 */
 718void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
 719{
 720        unsigned long flags;
 721
 722        /* invoke error handler */
 723        if (ap->ops->error_handler) {
 724                struct ata_link *link;
 725
 726                /* acquire EH ownership */
 727                ata_eh_acquire(ap);
 728 repeat:
 729                /* kill fast drain timer */
 730                del_timer_sync(&ap->fastdrain_timer);
 731
 732                /* process port resume request */
 733                ata_eh_handle_port_resume(ap);
 734
 735                /* fetch & clear EH info */
 736                spin_lock_irqsave(ap->lock, flags);
 737
 738                ata_for_each_link(link, ap, HOST_FIRST) {
 739                        struct ata_eh_context *ehc = &link->eh_context;
 740                        struct ata_device *dev;
 741
 742                        memset(&link->eh_context, 0, sizeof(link->eh_context));
 743                        link->eh_context.i = link->eh_info;
 744                        memset(&link->eh_info, 0, sizeof(link->eh_info));
 745
 746                        ata_for_each_dev(dev, link, ENABLED) {
 747                                int devno = dev->devno;
 748
 749                                ehc->saved_xfer_mode[devno] = dev->xfer_mode;
 750                                if (ata_ncq_enabled(dev))
 751                                        ehc->saved_ncq_enabled |= 1 << devno;
 752                        }
 753                }
 754
 755                ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
 756                ap->pflags &= ~ATA_PFLAG_EH_PENDING;
 757                ap->excl_link = NULL;   /* don't maintain exclusion over EH */
 758
 759                spin_unlock_irqrestore(ap->lock, flags);
 760
 761                /* invoke EH, skip if unloading or suspended */
 762                if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
 763                        ap->ops->error_handler(ap);
 764                else {
 765                        /* if unloading, commence suicide */
 766                        if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
 767                            !(ap->pflags & ATA_PFLAG_UNLOADED))
 768                                ata_eh_unload(ap);
 769                        ata_eh_finish(ap);
 770                }
 771
 772                /* process port suspend request */
 773                ata_eh_handle_port_suspend(ap);
 774
 775                /* Exception might have happened after ->error_handler
 776                 * recovered the port but before this point.  Repeat
 777                 * EH in such case.
 778                 */
 779                spin_lock_irqsave(ap->lock, flags);
 780
 781                if (ap->pflags & ATA_PFLAG_EH_PENDING) {
 782                        if (--ap->eh_tries) {
 783                                spin_unlock_irqrestore(ap->lock, flags);
 784                                goto repeat;
 785                        }
 786                        ata_port_err(ap,
 787                                     "EH pending after %d tries, giving up\n",
 788                                     ATA_EH_MAX_TRIES);
 789                        ap->pflags &= ~ATA_PFLAG_EH_PENDING;
 790                }
 791
 792                /* this run is complete, make sure EH info is clear */
 793                ata_for_each_link(link, ap, HOST_FIRST)
 794                        memset(&link->eh_info, 0, sizeof(link->eh_info));
 795
 796                /* end eh (clear host_eh_scheduled) while holding
 797                 * ap->lock such that if exception occurs after this
 798                 * point but before EH completion, SCSI midlayer will
 799                 * re-initiate EH.
 800                 */
 801                ap->ops->end_eh(ap);
 802
 803                spin_unlock_irqrestore(ap->lock, flags);
 804                ata_eh_release(ap);
 805        } else {
 806                WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
 807                ap->ops->eng_timeout(ap);
 808        }
 809
 810        scsi_eh_flush_done_q(&ap->eh_done_q);
 811
 812        /* clean up */
 813        spin_lock_irqsave(ap->lock, flags);
 814
 815        if (ap->pflags & ATA_PFLAG_LOADING)
 816                ap->pflags &= ~ATA_PFLAG_LOADING;
 817        else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
 818                schedule_delayed_work(&ap->hotplug_task, 0);
 819
 820        if (ap->pflags & ATA_PFLAG_RECOVERED)
 821                ata_port_info(ap, "EH complete\n");
 822
 823        ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
 824
 825        /* tell wait_eh that we're done */
 826        ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
 827        wake_up_all(&ap->eh_wait_q);
 828
 829        spin_unlock_irqrestore(ap->lock, flags);
 830}
 831EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
 832
 833/**
 834 *      ata_port_wait_eh - Wait for the currently pending EH to complete
 835 *      @ap: Port to wait EH for
 836 *
 837 *      Wait until the currently pending EH is complete.
 838 *
 839 *      LOCKING:
 840 *      Kernel thread context (may sleep).
 841 */
 842void ata_port_wait_eh(struct ata_port *ap)
 843{
 844        unsigned long flags;
 845        DEFINE_WAIT(wait);
 846
 847 retry:
 848        spin_lock_irqsave(ap->lock, flags);
 849
 850        while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
 851                prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
 852                spin_unlock_irqrestore(ap->lock, flags);
 853                schedule();
 854                spin_lock_irqsave(ap->lock, flags);
 855        }
 856        finish_wait(&ap->eh_wait_q, &wait);
 857
 858        spin_unlock_irqrestore(ap->lock, flags);
 859
 860        /* make sure SCSI EH is complete */
 861        if (scsi_host_in_recovery(ap->scsi_host)) {
 862                ata_msleep(ap, 10);
 863                goto retry;
 864        }
 865}
 866EXPORT_SYMBOL_GPL(ata_port_wait_eh);
 867
 868static int ata_eh_nr_in_flight(struct ata_port *ap)
 869{
 870        unsigned int tag;
 871        int nr = 0;
 872
 873        /* count only non-internal commands */
 874        for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
 875                if (ata_qc_from_tag(ap, tag))
 876                        nr++;
 877
 878        return nr;
 879}
 880
 881void ata_eh_fastdrain_timerfn(unsigned long arg)
 882{
 883        struct ata_port *ap = (void *)arg;
 884        unsigned long flags;
 885        int cnt;
 886
 887        spin_lock_irqsave(ap->lock, flags);
 888
 889        cnt = ata_eh_nr_in_flight(ap);
 890
 891        /* are we done? */
 892        if (!cnt)
 893                goto out_unlock;
 894
 895        if (cnt == ap->fastdrain_cnt) {
 896                unsigned int tag;
 897
 898                /* No progress during the last interval, tag all
 899                 * in-flight qcs as timed out and freeze the port.
 900                 */
 901                for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
 902                        struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
 903                        if (qc)
 904                                qc->err_mask |= AC_ERR_TIMEOUT;
 905                }
 906
 907                ata_port_freeze(ap);
 908        } else {
 909                /* some qcs have finished, give it another chance */
 910                ap->fastdrain_cnt = cnt;
 911                ap->fastdrain_timer.expires =
 912                        ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
 913                add_timer(&ap->fastdrain_timer);
 914        }
 915
 916 out_unlock:
 917        spin_unlock_irqrestore(ap->lock, flags);
 918}
 919
 920/**
 921 *      ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
 922 *      @ap: target ATA port
 923 *      @fastdrain: activate fast drain
 924 *
 925 *      Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
 926 *      is non-zero and EH wasn't pending before.  Fast drain ensures
 927 *      that EH kicks in in timely manner.
 928 *
 929 *      LOCKING:
 930 *      spin_lock_irqsave(host lock)
 931 */
 932static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
 933{
 934        int cnt;
 935
 936        /* already scheduled? */
 937        if (ap->pflags & ATA_PFLAG_EH_PENDING)
 938                return;
 939
 940        ap->pflags |= ATA_PFLAG_EH_PENDING;
 941
 942        if (!fastdrain)
 943                return;
 944
 945        /* do we have in-flight qcs? */
 946        cnt = ata_eh_nr_in_flight(ap);
 947        if (!cnt)
 948                return;
 949
 950        /* activate fast drain */
 951        ap->fastdrain_cnt = cnt;
 952        ap->fastdrain_timer.expires =
 953                ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
 954        add_timer(&ap->fastdrain_timer);
 955}
 956
 957/**
 958 *      ata_qc_schedule_eh - schedule qc for error handling
 959 *      @qc: command to schedule error handling for
 960 *
 961 *      Schedule error handling for @qc.  EH will kick in as soon as
 962 *      other commands are drained.
 963 *
 964 *      LOCKING:
 965 *      spin_lock_irqsave(host lock)
 966 */
 967void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
 968{
 969        struct ata_port *ap = qc->ap;
 970        struct request_queue *q = qc->scsicmd->device->request_queue;
 971        unsigned long flags;
 972
 973        WARN_ON(!ap->ops->error_handler);
 974
 975        qc->flags |= ATA_QCFLAG_FAILED;
 976        ata_eh_set_pending(ap, 1);
 977
 978        /* The following will fail if timeout has already expired.
 979         * ata_scsi_error() takes care of such scmds on EH entry.
 980         * Note that ATA_QCFLAG_FAILED is unconditionally set after
 981         * this function completes.
 982         */
 983        spin_lock_irqsave(q->queue_lock, flags);
 984        blk_abort_request(qc->scsicmd->request);
 985        spin_unlock_irqrestore(q->queue_lock, flags);
 986}
 987
 988/**
 989 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
 990 * @ap: ATA port to schedule EH for
 991 *
 992 *      LOCKING: inherited from ata_port_schedule_eh
 993 *      spin_lock_irqsave(host lock)
 994 */
 995void ata_std_sched_eh(struct ata_port *ap)
 996{
 997        WARN_ON(!ap->ops->error_handler);
 998
 999        if (ap->pflags & ATA_PFLAG_INITIALIZING)
1000                return;
1001
1002        ata_eh_set_pending(ap, 1);
1003        scsi_schedule_eh(ap->scsi_host);
1004
1005        DPRINTK("port EH scheduled\n");
1006}
1007EXPORT_SYMBOL_GPL(ata_std_sched_eh);
1008
1009/**
1010 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
1011 * @ap: ATA port to end EH for
1012 *
1013 * In the libata object model there is a 1:1 mapping of ata_port to
1014 * shost, so host fields can be directly manipulated under ap->lock, in
1015 * the libsas case we need to hold a lock at the ha->level to coordinate
1016 * these events.
1017 *
1018 *      LOCKING:
1019 *      spin_lock_irqsave(host lock)
1020 */
1021void ata_std_end_eh(struct ata_port *ap)
1022{
1023        struct Scsi_Host *host = ap->scsi_host;
1024
1025        host->host_eh_scheduled = 0;
1026}
1027EXPORT_SYMBOL(ata_std_end_eh);
1028
1029
1030/**
1031 *      ata_port_schedule_eh - schedule error handling without a qc
1032 *      @ap: ATA port to schedule EH for
1033 *
1034 *      Schedule error handling for @ap.  EH will kick in as soon as
1035 *      all commands are drained.
1036 *
1037 *      LOCKING:
1038 *      spin_lock_irqsave(host lock)
1039 */
1040void ata_port_schedule_eh(struct ata_port *ap)
1041{
1042        /* see: ata_std_sched_eh, unless you know better */
1043        ap->ops->sched_eh(ap);
1044}
1045
1046static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1047{
1048        int tag, nr_aborted = 0;
1049
1050        WARN_ON(!ap->ops->error_handler);
1051
1052        /* we're gonna abort all commands, no need for fast drain */
1053        ata_eh_set_pending(ap, 0);
1054
1055        for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1056                struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1057
1058                if (qc && (!link || qc->dev->link == link)) {
1059                        qc->flags |= ATA_QCFLAG_FAILED;
1060                        ata_qc_complete(qc);
1061                        nr_aborted++;
1062                }
1063        }
1064
1065        if (!nr_aborted)
1066                ata_port_schedule_eh(ap);
1067
1068        return nr_aborted;
1069}
1070
1071/**
1072 *      ata_link_abort - abort all qc's on the link
1073 *      @link: ATA link to abort qc's for
1074 *
1075 *      Abort all active qc's active on @link and schedule EH.
1076 *
1077 *      LOCKING:
1078 *      spin_lock_irqsave(host lock)
1079 *
1080 *      RETURNS:
1081 *      Number of aborted qc's.
1082 */
1083int ata_link_abort(struct ata_link *link)
1084{
1085        return ata_do_link_abort(link->ap, link);
1086}
1087
1088/**
1089 *      ata_port_abort - abort all qc's on the port
1090 *      @ap: ATA port to abort qc's for
1091 *
1092 *      Abort all active qc's of @ap and schedule EH.
1093 *
1094 *      LOCKING:
1095 *      spin_lock_irqsave(host_set lock)
1096 *
1097 *      RETURNS:
1098 *      Number of aborted qc's.
1099 */
1100int ata_port_abort(struct ata_port *ap)
1101{
1102        return ata_do_link_abort(ap, NULL);
1103}
1104
1105/**
1106 *      __ata_port_freeze - freeze port
1107 *      @ap: ATA port to freeze
1108 *
1109 *      This function is called when HSM violation or some other
1110 *      condition disrupts normal operation of the port.  Frozen port
1111 *      is not allowed to perform any operation until the port is
1112 *      thawed, which usually follows a successful reset.
1113 *
1114 *      ap->ops->freeze() callback can be used for freezing the port
1115 *      hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
1116 *      port cannot be frozen hardware-wise, the interrupt handler
1117 *      must ack and clear interrupts unconditionally while the port
1118 *      is frozen.
1119 *
1120 *      LOCKING:
1121 *      spin_lock_irqsave(host lock)
1122 */
1123static void __ata_port_freeze(struct ata_port *ap)
1124{
1125        WARN_ON(!ap->ops->error_handler);
1126
1127        if (ap->ops->freeze)
1128                ap->ops->freeze(ap);
1129
1130        ap->pflags |= ATA_PFLAG_FROZEN;
1131
1132        DPRINTK("ata%u port frozen\n", ap->print_id);
1133}
1134
1135/**
1136 *      ata_port_freeze - abort & freeze port
1137 *      @ap: ATA port to freeze
1138 *
1139 *      Abort and freeze @ap.  The freeze operation must be called
1140 *      first, because some hardware requires special operations
1141 *      before the taskfile registers are accessible.
1142 *
1143 *      LOCKING:
1144 *      spin_lock_irqsave(host lock)
1145 *
1146 *      RETURNS:
1147 *      Number of aborted commands.
1148 */
1149int ata_port_freeze(struct ata_port *ap)
1150{
1151        int nr_aborted;
1152
1153        WARN_ON(!ap->ops->error_handler);
1154
1155        __ata_port_freeze(ap);
1156        nr_aborted = ata_port_abort(ap);
1157
1158        return nr_aborted;
1159}
1160
1161/**
1162 *      sata_async_notification - SATA async notification handler
1163 *      @ap: ATA port where async notification is received
1164 *
1165 *      Handler to be called when async notification via SDB FIS is
1166 *      received.  This function schedules EH if necessary.
1167 *
1168 *      LOCKING:
1169 *      spin_lock_irqsave(host lock)
1170 *
1171 *      RETURNS:
1172 *      1 if EH is scheduled, 0 otherwise.
1173 */
1174int sata_async_notification(struct ata_port *ap)
1175{
1176        u32 sntf;
1177        int rc;
1178
1179        if (!(ap->flags & ATA_FLAG_AN))
1180                return 0;
1181
1182        rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1183        if (rc == 0)
1184                sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1185
1186        if (!sata_pmp_attached(ap) || rc) {
1187                /* PMP is not attached or SNTF is not available */
1188                if (!sata_pmp_attached(ap)) {
1189                        /* PMP is not attached.  Check whether ATAPI
1190                         * AN is configured.  If so, notify media
1191                         * change.
1192                         */
1193                        struct ata_device *dev = ap->link.device;
1194
1195                        if ((dev->class == ATA_DEV_ATAPI) &&
1196                            (dev->flags & ATA_DFLAG_AN))
1197                                ata_scsi_media_change_notify(dev);
1198                        return 0;
1199                } else {
1200                        /* PMP is attached but SNTF is not available.
1201                         * ATAPI async media change notification is
1202                         * not used.  The PMP must be reporting PHY
1203                         * status change, schedule EH.
1204                         */
1205                        ata_port_schedule_eh(ap);
1206                        return 1;
1207                }
1208        } else {
1209                /* PMP is attached and SNTF is available */
1210                struct ata_link *link;
1211
1212                /* check and notify ATAPI AN */
1213                ata_for_each_link(link, ap, EDGE) {
1214                        if (!(sntf & (1 << link->pmp)))
1215                                continue;
1216
1217                        if ((link->device->class == ATA_DEV_ATAPI) &&
1218                            (link->device->flags & ATA_DFLAG_AN))
1219                                ata_scsi_media_change_notify(link->device);
1220                }
1221
1222                /* If PMP is reporting that PHY status of some
1223                 * downstream ports has changed, schedule EH.
1224                 */
1225                if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1226                        ata_port_schedule_eh(ap);
1227                        return 1;
1228                }
1229
1230                return 0;
1231        }
1232}
1233
1234/**
1235 *      ata_eh_freeze_port - EH helper to freeze port
1236 *      @ap: ATA port to freeze
1237 *
1238 *      Freeze @ap.
1239 *
1240 *      LOCKING:
1241 *      None.
1242 */
1243void ata_eh_freeze_port(struct ata_port *ap)
1244{
1245        unsigned long flags;
1246
1247        if (!ap->ops->error_handler)
1248                return;
1249
1250        spin_lock_irqsave(ap->lock, flags);
1251        __ata_port_freeze(ap);
1252        spin_unlock_irqrestore(ap->lock, flags);
1253}
1254
1255/**
1256 *      ata_port_thaw_port - EH helper to thaw port
1257 *      @ap: ATA port to thaw
1258 *
1259 *      Thaw frozen port @ap.
1260 *
1261 *      LOCKING:
1262 *      None.
1263 */
1264void ata_eh_thaw_port(struct ata_port *ap)
1265{
1266        unsigned long flags;
1267
1268        if (!ap->ops->error_handler)
1269                return;
1270
1271        spin_lock_irqsave(ap->lock, flags);
1272
1273        ap->pflags &= ~ATA_PFLAG_FROZEN;
1274
1275        if (ap->ops->thaw)
1276                ap->ops->thaw(ap);
1277
1278        spin_unlock_irqrestore(ap->lock, flags);
1279
1280        DPRINTK("ata%u port thawed\n", ap->print_id);
1281}
1282
1283static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1284{
1285        /* nada */
1286}
1287
1288static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1289{
1290        struct ata_port *ap = qc->ap;
1291        struct scsi_cmnd *scmd = qc->scsicmd;
1292        unsigned long flags;
1293
1294        spin_lock_irqsave(ap->lock, flags);
1295        qc->scsidone = ata_eh_scsidone;
1296        __ata_qc_complete(qc);
1297        WARN_ON(ata_tag_valid(qc->tag));
1298        spin_unlock_irqrestore(ap->lock, flags);
1299
1300        scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1301}
1302
1303/**
1304 *      ata_eh_qc_complete - Complete an active ATA command from EH
1305 *      @qc: Command to complete
1306 *
1307 *      Indicate to the mid and upper layers that an ATA command has
1308 *      completed.  To be used from EH.
1309 */
1310void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1311{
1312        struct scsi_cmnd *scmd = qc->scsicmd;
1313        scmd->retries = scmd->allowed;
1314        __ata_eh_qc_complete(qc);
1315}
1316
1317/**
1318 *      ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1319 *      @qc: Command to retry
1320 *
1321 *      Indicate to the mid and upper layers that an ATA command
1322 *      should be retried.  To be used from EH.
1323 *
1324 *      SCSI midlayer limits the number of retries to scmd->allowed.
1325 *      scmd->retries is decremented for commands which get retried
1326 *      due to unrelated failures (qc->err_mask is zero).
1327 */
1328void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1329{
1330        struct scsi_cmnd *scmd = qc->scsicmd;
1331        if (!qc->err_mask && scmd->retries)
1332                scmd->retries--;
1333        __ata_eh_qc_complete(qc);
1334}
1335
1336/**
1337 *      ata_dev_disable - disable ATA device
1338 *      @dev: ATA device to disable
1339 *
1340 *      Disable @dev.
1341 *
1342 *      Locking:
1343 *      EH context.
1344 */
1345void ata_dev_disable(struct ata_device *dev)
1346{
1347        if (!ata_dev_enabled(dev))
1348                return;
1349
1350        if (ata_msg_drv(dev->link->ap))
1351                ata_dev_warn(dev, "disabled\n");
1352        ata_acpi_on_disable(dev);
1353        ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1354        dev->class++;
1355
1356        /* From now till the next successful probe, ering is used to
1357         * track probe failures.  Clear accumulated device error info.
1358         */
1359        ata_ering_clear(&dev->ering);
1360}
1361
1362/**
1363 *      ata_eh_detach_dev - detach ATA device
1364 *      @dev: ATA device to detach
1365 *
1366 *      Detach @dev.
1367 *
1368 *      LOCKING:
1369 *      None.
1370 */
1371void ata_eh_detach_dev(struct ata_device *dev)
1372{
1373        struct ata_link *link = dev->link;
1374        struct ata_port *ap = link->ap;
1375        struct ata_eh_context *ehc = &link->eh_context;
1376        unsigned long flags;
1377
1378        ata_dev_disable(dev);
1379
1380        spin_lock_irqsave(ap->lock, flags);
1381
1382        dev->flags &= ~ATA_DFLAG_DETACH;
1383
1384        if (ata_scsi_offline_dev(dev)) {
1385                dev->flags |= ATA_DFLAG_DETACHED;
1386                ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1387        }
1388
1389        /* clear per-dev EH info */
1390        ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1391        ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1392        ehc->saved_xfer_mode[dev->devno] = 0;
1393        ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1394
1395        spin_unlock_irqrestore(ap->lock, flags);
1396}
1397
1398/**
1399 *      ata_eh_about_to_do - about to perform eh_action
1400 *      @link: target ATA link
1401 *      @dev: target ATA dev for per-dev action (can be NULL)
1402 *      @action: action about to be performed
1403 *
1404 *      Called just before performing EH actions to clear related bits
1405 *      in @link->eh_info such that eh actions are not unnecessarily
1406 *      repeated.
1407 *
1408 *      LOCKING:
1409 *      None.
1410 */
1411void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1412                        unsigned int action)
1413{
1414        struct ata_port *ap = link->ap;
1415        struct ata_eh_info *ehi = &link->eh_info;
1416        struct ata_eh_context *ehc = &link->eh_context;
1417        unsigned long flags;
1418
1419        spin_lock_irqsave(ap->lock, flags);
1420
1421        ata_eh_clear_action(link, dev, ehi, action);
1422
1423        /* About to take EH action, set RECOVERED.  Ignore actions on
1424         * slave links as master will do them again.
1425         */
1426        if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1427                ap->pflags |= ATA_PFLAG_RECOVERED;
1428
1429        spin_unlock_irqrestore(ap->lock, flags);
1430}
1431
1432/**
1433 *      ata_eh_done - EH action complete
1434*       @ap: target ATA port
1435 *      @dev: target ATA dev for per-dev action (can be NULL)
1436 *      @action: action just completed
1437 *
1438 *      Called right after performing EH actions to clear related bits
1439 *      in @link->eh_context.
1440 *
1441 *      LOCKING:
1442 *      None.
1443 */
1444void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1445                 unsigned int action)
1446{
1447        struct ata_eh_context *ehc = &link->eh_context;
1448
1449        ata_eh_clear_action(link, dev, &ehc->i, action);
1450}
1451
1452/**
1453 *      ata_err_string - convert err_mask to descriptive string
1454 *      @err_mask: error mask to convert to string
1455 *
1456 *      Convert @err_mask to descriptive string.  Errors are
1457 *      prioritized according to severity and only the most severe
1458 *      error is reported.
1459 *
1460 *      LOCKING:
1461 *      None.
1462 *
1463 *      RETURNS:
1464 *      Descriptive string for @err_mask
1465 */
1466static const char *ata_err_string(unsigned int err_mask)
1467{
1468        if (err_mask & AC_ERR_HOST_BUS)
1469                return "host bus error";
1470        if (err_mask & AC_ERR_ATA_BUS)
1471                return "ATA bus error";
1472        if (err_mask & AC_ERR_TIMEOUT)
1473                return "timeout";
1474        if (err_mask & AC_ERR_HSM)
1475                return "HSM violation";
1476        if (err_mask & AC_ERR_SYSTEM)
1477                return "internal error";
1478        if (err_mask & AC_ERR_MEDIA)
1479                return "media error";
1480        if (err_mask & AC_ERR_INVALID)
1481                return "invalid argument";
1482        if (err_mask & AC_ERR_DEV)
1483                return "device error";
1484        return "unknown error";
1485}
1486
1487/**
1488 *      ata_read_log_page - read a specific log page
1489 *      @dev: target device
1490 *      @log: log to read
1491 *      @page: page to read
1492 *      @buf: buffer to store read page
1493 *      @sectors: number of sectors to read
1494 *
1495 *      Read log page using READ_LOG_EXT command.
1496 *
1497 *      LOCKING:
1498 *      Kernel thread context (may sleep).
1499 *
1500 *      RETURNS:
1501 *      0 on success, AC_ERR_* mask otherwise.
1502 */
1503unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1504                               u8 page, void *buf, unsigned int sectors)
1505{
1506        struct ata_taskfile tf;
1507        unsigned int err_mask;
1508
1509        DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1510
1511        ata_tf_init(dev, &tf);
1512        tf.command = ATA_CMD_READ_LOG_EXT;
1513        tf.lbal = log;
1514        tf.lbam = page;
1515        tf.nsect = sectors;
1516        tf.hob_nsect = sectors >> 8;
1517        tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1518        tf.protocol = ATA_PROT_PIO;
1519
1520        err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1521                                     buf, sectors * ATA_SECT_SIZE, 0);
1522
1523        DPRINTK("EXIT, err_mask=%x\n", err_mask);
1524        return err_mask;
1525}
1526
1527/**
1528 *      ata_eh_read_log_10h - Read log page 10h for NCQ error details
1529 *      @dev: Device to read log page 10h from
1530 *      @tag: Resulting tag of the failed command
1531 *      @tf: Resulting taskfile registers of the failed command
1532 *
1533 *      Read log page 10h to obtain NCQ error details and clear error
1534 *      condition.
1535 *
1536 *      LOCKING:
1537 *      Kernel thread context (may sleep).
1538 *
1539 *      RETURNS:
1540 *      0 on success, -errno otherwise.
1541 */
1542static int ata_eh_read_log_10h(struct ata_device *dev,
1543                               int *tag, struct ata_taskfile *tf)
1544{
1545        u8 *buf = dev->link->ap->sector_buf;
1546        unsigned int err_mask;
1547        u8 csum;
1548        int i;
1549
1550        err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
1551        if (err_mask)
1552                return -EIO;
1553
1554        csum = 0;
1555        for (i = 0; i < ATA_SECT_SIZE; i++)
1556                csum += buf[i];
1557        if (csum)
1558                ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1559                             csum);
1560
1561        if (buf[0] & 0x80)
1562                return -ENOENT;
1563
1564        *tag = buf[0] & 0x1f;
1565
1566        tf->command = buf[2];
1567        tf->feature = buf[3];
1568        tf->lbal = buf[4];
1569        tf->lbam = buf[5];
1570        tf->lbah = buf[6];
1571        tf->device = buf[7];
1572        tf->hob_lbal = buf[8];
1573        tf->hob_lbam = buf[9];
1574        tf->hob_lbah = buf[10];
1575        tf->nsect = buf[12];
1576        tf->hob_nsect = buf[13];
1577
1578        return 0;
1579}
1580
1581/**
1582 *      atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1583 *      @dev: target ATAPI device
1584 *      @r_sense_key: out parameter for sense_key
1585 *
1586 *      Perform ATAPI TEST_UNIT_READY.
1587 *
1588 *      LOCKING:
1589 *      EH context (may sleep).
1590 *
1591 *      RETURNS:
1592 *      0 on success, AC_ERR_* mask on failure.
1593 */
1594unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1595{
1596        u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1597        struct ata_taskfile tf;
1598        unsigned int err_mask;
1599
1600        ata_tf_init(dev, &tf);
1601
1602        tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1603        tf.command = ATA_CMD_PACKET;
1604        tf.protocol = ATAPI_PROT_NODATA;
1605
1606        err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1607        if (err_mask == AC_ERR_DEV)
1608                *r_sense_key = tf.feature >> 4;
1609        return err_mask;
1610}
1611
1612/**
1613 *      atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1614 *      @dev: device to perform REQUEST_SENSE to
1615 *      @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1616 *      @dfl_sense_key: default sense key to use
1617 *
1618 *      Perform ATAPI REQUEST_SENSE after the device reported CHECK
1619 *      SENSE.  This function is EH helper.
1620 *
1621 *      LOCKING:
1622 *      Kernel thread context (may sleep).
1623 *
1624 *      RETURNS:
1625 *      0 on success, AC_ERR_* mask on failure
1626 */
1627unsigned int atapi_eh_request_sense(struct ata_device *dev,
1628                                           u8 *sense_buf, u8 dfl_sense_key)
1629{
1630        u8 cdb[ATAPI_CDB_LEN] =
1631                { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1632        struct ata_port *ap = dev->link->ap;
1633        struct ata_taskfile tf;
1634
1635        DPRINTK("ATAPI request sense\n");
1636
1637        /* FIXME: is this needed? */
1638        memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1639
1640        /* initialize sense_buf with the error register,
1641         * for the case where they are -not- overwritten
1642         */
1643        sense_buf[0] = 0x70;
1644        sense_buf[2] = dfl_sense_key;
1645
1646        /* some devices time out if garbage left in tf */
1647        ata_tf_init(dev, &tf);
1648
1649        tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1650        tf.command = ATA_CMD_PACKET;
1651
1652        /* is it pointless to prefer PIO for "safety reasons"? */
1653        if (ap->flags & ATA_FLAG_PIO_DMA) {
1654                tf.protocol = ATAPI_PROT_DMA;
1655                tf.feature |= ATAPI_PKT_DMA;
1656        } else {
1657                tf.protocol = ATAPI_PROT_PIO;
1658                tf.lbam = SCSI_SENSE_BUFFERSIZE;
1659                tf.lbah = 0;
1660        }
1661
1662        return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1663                                 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1664}
1665
1666/**
1667 *      ata_eh_analyze_serror - analyze SError for a failed port
1668 *      @link: ATA link to analyze SError for
1669 *
1670 *      Analyze SError if available and further determine cause of
1671 *      failure.
1672 *
1673 *      LOCKING:
1674 *      None.
1675 */
1676static void ata_eh_analyze_serror(struct ata_link *link)
1677{
1678        struct ata_eh_context *ehc = &link->eh_context;
1679        u32 serror = ehc->i.serror;
1680        unsigned int err_mask = 0, action = 0;
1681        u32 hotplug_mask;
1682
1683        if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1684                err_mask |= AC_ERR_ATA_BUS;
1685                action |= ATA_EH_RESET;
1686        }
1687        if (serror & SERR_PROTOCOL) {
1688                err_mask |= AC_ERR_HSM;
1689                action |= ATA_EH_RESET;
1690        }
1691        if (serror & SERR_INTERNAL) {
1692                err_mask |= AC_ERR_SYSTEM;
1693                action |= ATA_EH_RESET;
1694        }
1695
1696        /* Determine whether a hotplug event has occurred.  Both
1697         * SError.N/X are considered hotplug events for enabled or
1698         * host links.  For disabled PMP links, only N bit is
1699         * considered as X bit is left at 1 for link plugging.
1700         */
1701        if (link->lpm_policy > ATA_LPM_MAX_POWER)
1702                hotplug_mask = 0;       /* hotplug doesn't work w/ LPM */
1703        else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1704                hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1705        else
1706                hotplug_mask = SERR_PHYRDY_CHG;
1707
1708        if (serror & hotplug_mask)
1709                ata_ehi_hotplugged(&ehc->i);
1710
1711        ehc->i.err_mask |= err_mask;
1712        ehc->i.action |= action;
1713}
1714
1715/**
1716 *      ata_eh_analyze_ncq_error - analyze NCQ error
1717 *      @link: ATA link to analyze NCQ error for
1718 *
1719 *      Read log page 10h, determine the offending qc and acquire
1720 *      error status TF.  For NCQ device errors, all LLDDs have to do
1721 *      is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1722 *      care of the rest.
1723 *
1724 *      LOCKING:
1725 *      Kernel thread context (may sleep).
1726 */
1727void ata_eh_analyze_ncq_error(struct ata_link *link)
1728{
1729        struct ata_port *ap = link->ap;
1730        struct ata_eh_context *ehc = &link->eh_context;
1731        struct ata_device *dev = link->device;
1732        struct ata_queued_cmd *qc;
1733        struct ata_taskfile tf;
1734        int tag, rc;
1735
1736        /* if frozen, we can't do much */
1737        if (ap->pflags & ATA_PFLAG_FROZEN)
1738                return;
1739
1740        /* is it NCQ device error? */
1741        if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1742                return;
1743
1744        /* has LLDD analyzed already? */
1745        for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1746                qc = __ata_qc_from_tag(ap, tag);
1747
1748                if (!(qc->flags & ATA_QCFLAG_FAILED))
1749                        continue;
1750
1751                if (qc->err_mask)
1752                        return;
1753        }
1754
1755        /* okay, this error is ours */
1756        memset(&tf, 0, sizeof(tf));
1757        rc = ata_eh_read_log_10h(dev, &tag, &tf);
1758        if (rc) {
1759                ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1760                             rc);
1761                return;
1762        }
1763
1764        if (!(link->sactive & (1 << tag))) {
1765                ata_link_err(link, "log page 10h reported inactive tag %d\n",
1766                             tag);
1767                return;
1768        }
1769
1770        /* we've got the perpetrator, condemn it */
1771        qc = __ata_qc_from_tag(ap, tag);
1772        memcpy(&qc->result_tf, &tf, sizeof(tf));
1773        qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1774        qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1775        ehc->i.err_mask &= ~AC_ERR_DEV;
1776}
1777
1778/**
1779 *      ata_eh_analyze_tf - analyze taskfile of a failed qc
1780 *      @qc: qc to analyze
1781 *      @tf: Taskfile registers to analyze
1782 *
1783 *      Analyze taskfile of @qc and further determine cause of
1784 *      failure.  This function also requests ATAPI sense data if
1785 *      available.
1786 *
1787 *      LOCKING:
1788 *      Kernel thread context (may sleep).
1789 *
1790 *      RETURNS:
1791 *      Determined recovery action
1792 */
1793static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1794                                      const struct ata_taskfile *tf)
1795{
1796        unsigned int tmp, action = 0;
1797        u8 stat = tf->command, err = tf->feature;
1798
1799        if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1800                qc->err_mask |= AC_ERR_HSM;
1801                return ATA_EH_RESET;
1802        }
1803
1804        if (stat & (ATA_ERR | ATA_DF))
1805                qc->err_mask |= AC_ERR_DEV;
1806        else
1807                return 0;
1808
1809        switch (qc->dev->class) {
1810        case ATA_DEV_ATA:
1811                if (err & ATA_ICRC)
1812                        qc->err_mask |= AC_ERR_ATA_BUS;
1813                if (err & ATA_UNC)
1814                        qc->err_mask |= AC_ERR_MEDIA;
1815                if (err & ATA_IDNF)
1816                        qc->err_mask |= AC_ERR_INVALID;
1817                break;
1818
1819        case ATA_DEV_ATAPI:
1820                if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1821                        tmp = atapi_eh_request_sense(qc->dev,
1822                                                qc->scsicmd->sense_buffer,
1823                                                qc->result_tf.feature >> 4);
1824                        if (!tmp) {
1825                                /* ATA_QCFLAG_SENSE_VALID is used to
1826                                 * tell atapi_qc_complete() that sense
1827                                 * data is already valid.
1828                                 *
1829                                 * TODO: interpret sense data and set
1830                                 * appropriate err_mask.
1831                                 */
1832                                qc->flags |= ATA_QCFLAG_SENSE_VALID;
1833                        } else
1834                                qc->err_mask |= tmp;
1835                }
1836        }
1837
1838        if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1839                action |= ATA_EH_RESET;
1840
1841        return action;
1842}
1843
1844static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1845                                   int *xfer_ok)
1846{
1847        int base = 0;
1848
1849        if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1850                *xfer_ok = 1;
1851
1852        if (!*xfer_ok)
1853                base = ATA_ECAT_DUBIOUS_NONE;
1854
1855        if (err_mask & AC_ERR_ATA_BUS)
1856                return base + ATA_ECAT_ATA_BUS;
1857
1858        if (err_mask & AC_ERR_TIMEOUT)
1859                return base + ATA_ECAT_TOUT_HSM;
1860
1861        if (eflags & ATA_EFLAG_IS_IO) {
1862                if (err_mask & AC_ERR_HSM)
1863                        return base + ATA_ECAT_TOUT_HSM;
1864                if ((err_mask &
1865                     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1866                        return base + ATA_ECAT_UNK_DEV;
1867        }
1868
1869        return 0;
1870}
1871
1872struct speed_down_verdict_arg {
1873        u64 since;
1874        int xfer_ok;
1875        int nr_errors[ATA_ECAT_NR];
1876};
1877
1878static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1879{
1880        struct speed_down_verdict_arg *arg = void_arg;
1881        int cat;
1882
1883        if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1884                return -1;
1885
1886        cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1887                                      &arg->xfer_ok);
1888        arg->nr_errors[cat]++;
1889
1890        return 0;
1891}
1892
1893/**
1894 *      ata_eh_speed_down_verdict - Determine speed down verdict
1895 *      @dev: Device of interest
1896 *
1897 *      This function examines error ring of @dev and determines
1898 *      whether NCQ needs to be turned off, transfer speed should be
1899 *      stepped down, or falling back to PIO is necessary.
1900 *
1901 *      ECAT_ATA_BUS    : ATA_BUS error for any command
1902 *
1903 *      ECAT_TOUT_HSM   : TIMEOUT for any command or HSM violation for
1904 *                        IO commands
1905 *
1906 *      ECAT_UNK_DEV    : Unknown DEV error for IO commands
1907 *
1908 *      ECAT_DUBIOUS_*  : Identical to above three but occurred while
1909 *                        data transfer hasn't been verified.
1910 *
1911 *      Verdicts are
1912 *
1913 *      NCQ_OFF         : Turn off NCQ.
1914 *
1915 *      SPEED_DOWN      : Speed down transfer speed but don't fall back
1916 *                        to PIO.
1917 *
1918 *      FALLBACK_TO_PIO : Fall back to PIO.
1919 *
1920 *      Even if multiple verdicts are returned, only one action is
1921 *      taken per error.  An action triggered by non-DUBIOUS errors
1922 *      clears ering, while one triggered by DUBIOUS_* errors doesn't.
1923 *      This is to expedite speed down decisions right after device is
1924 *      initially configured.
1925 *
1926 *      The followings are speed down rules.  #1 and #2 deal with
1927 *      DUBIOUS errors.
1928 *
1929 *      1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1930 *         occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1931 *
1932 *      2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1933 *         occurred during last 5 mins, NCQ_OFF.
1934 *
1935 *      3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1936 *         occurred during last 5 mins, FALLBACK_TO_PIO
1937 *
1938 *      4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1939 *         during last 10 mins, NCQ_OFF.
1940 *
1941 *      5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1942 *         UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1943 *
1944 *      LOCKING:
1945 *      Inherited from caller.
1946 *
1947 *      RETURNS:
1948 *      OR of ATA_EH_SPDN_* flags.
1949 */
1950static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1951{
1952        const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1953        u64 j64 = get_jiffies_64();
1954        struct speed_down_verdict_arg arg;
1955        unsigned int verdict = 0;
1956
1957        /* scan past 5 mins of error history */
1958        memset(&arg, 0, sizeof(arg));
1959        arg.since = j64 - min(j64, j5mins);
1960        ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1961
1962        if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1963            arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1964                verdict |= ATA_EH_SPDN_SPEED_DOWN |
1965                        ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1966
1967        if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1968            arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1969                verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1970
1971        if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1972            arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1973            arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1974                verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1975
1976        /* scan past 10 mins of error history */
1977        memset(&arg, 0, sizeof(arg));
1978        arg.since = j64 - min(j64, j10mins);
1979        ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1980
1981        if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1982            arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1983                verdict |= ATA_EH_SPDN_NCQ_OFF;
1984
1985        if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1986            arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1987            arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1988                verdict |= ATA_EH_SPDN_SPEED_DOWN;
1989
1990        return verdict;
1991}
1992
1993/**
1994 *      ata_eh_speed_down - record error and speed down if necessary
1995 *      @dev: Failed device
1996 *      @eflags: mask of ATA_EFLAG_* flags
1997 *      @err_mask: err_mask of the error
1998 *
1999 *      Record error and examine error history to determine whether
2000 *      adjusting transmission speed is necessary.  It also sets
2001 *      transmission limits appropriately if such adjustment is
2002 *      necessary.
2003 *
2004 *      LOCKING:
2005 *      Kernel thread context (may sleep).
2006 *
2007 *      RETURNS:
2008 *      Determined recovery action.
2009 */
2010static unsigned int ata_eh_speed_down(struct ata_device *dev,
2011                                unsigned int eflags, unsigned int err_mask)
2012{
2013        struct ata_link *link = ata_dev_phys_link(dev);
2014        int xfer_ok = 0;
2015        unsigned int verdict;
2016        unsigned int action = 0;
2017
2018        /* don't bother if Cat-0 error */
2019        if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
2020                return 0;
2021
2022        /* record error and determine whether speed down is necessary */
2023        ata_ering_record(&dev->ering, eflags, err_mask);
2024        verdict = ata_eh_speed_down_verdict(dev);
2025
2026        /* turn off NCQ? */
2027        if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
2028            (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
2029                           ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
2030                dev->flags |= ATA_DFLAG_NCQ_OFF;
2031                ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
2032                goto done;
2033        }
2034
2035        /* speed down? */
2036        if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
2037                /* speed down SATA link speed if possible */
2038                if (sata_down_spd_limit(link, 0) == 0) {
2039                        action |= ATA_EH_RESET;
2040                        goto done;
2041                }
2042
2043                /* lower transfer mode */
2044                if (dev->spdn_cnt < 2) {
2045                        static const int dma_dnxfer_sel[] =
2046                                { ATA_DNXFER_DMA, ATA_DNXFER_40C };
2047                        static const int pio_dnxfer_sel[] =
2048                                { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
2049                        int sel;
2050
2051                        if (dev->xfer_shift != ATA_SHIFT_PIO)
2052                                sel = dma_dnxfer_sel[dev->spdn_cnt];
2053                        else
2054                                sel = pio_dnxfer_sel[dev->spdn_cnt];
2055
2056                        dev->spdn_cnt++;
2057
2058                        if (ata_down_xfermask_limit(dev, sel) == 0) {
2059                                action |= ATA_EH_RESET;
2060                                goto done;
2061                        }
2062                }
2063        }
2064
2065        /* Fall back to PIO?  Slowing down to PIO is meaningless for
2066         * SATA ATA devices.  Consider it only for PATA and SATAPI.
2067         */
2068        if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
2069            (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
2070            (dev->xfer_shift != ATA_SHIFT_PIO)) {
2071                if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
2072                        dev->spdn_cnt = 0;
2073                        action |= ATA_EH_RESET;
2074                        goto done;
2075                }
2076        }
2077
2078        return 0;
2079 done:
2080        /* device has been slowed down, blow error history */
2081        if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
2082                ata_ering_clear(&dev->ering);
2083        return action;
2084}
2085
2086/**
2087 *      ata_eh_worth_retry - analyze error and decide whether to retry
2088 *      @qc: qc to possibly retry
2089 *
2090 *      Look at the cause of the error and decide if a retry
2091 *      might be useful or not.  We don't want to retry media errors
2092 *      because the drive itself has probably already taken 10-30 seconds
2093 *      doing its own internal retries before reporting the failure.
2094 */
2095static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
2096{
2097        if (qc->err_mask & AC_ERR_MEDIA)
2098                return 0;       /* don't retry media errors */
2099        if (qc->flags & ATA_QCFLAG_IO)
2100                return 1;       /* otherwise retry anything from fs stack */
2101        if (qc->err_mask & AC_ERR_INVALID)
2102                return 0;       /* don't retry these */
2103        return qc->err_mask != AC_ERR_DEV;  /* retry if not dev error */
2104}
2105
2106/**
2107 *      ata_eh_link_autopsy - analyze error and determine recovery action
2108 *      @link: host link to perform autopsy on
2109 *
2110 *      Analyze why @link failed and determine which recovery actions
2111 *      are needed.  This function also sets more detailed AC_ERR_*
2112 *      values and fills sense data for ATAPI CHECK SENSE.
2113 *
2114 *      LOCKING:
2115 *      Kernel thread context (may sleep).
2116 */
2117static void ata_eh_link_autopsy(struct ata_link *link)
2118{
2119        struct ata_port *ap = link->ap;
2120        struct ata_eh_context *ehc = &link->eh_context;
2121        struct ata_device *dev;
2122        unsigned int all_err_mask = 0, eflags = 0;
2123        int tag;
2124        u32 serror;
2125        int rc;
2126
2127        DPRINTK("ENTER\n");
2128
2129        if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2130                return;
2131
2132        /* obtain and analyze SError */
2133        rc = sata_scr_read(link, SCR_ERROR, &serror);
2134        if (rc == 0) {
2135                ehc->i.serror |= serror;
2136                ata_eh_analyze_serror(link);
2137        } else if (rc != -EOPNOTSUPP) {
2138                /* SError read failed, force reset and probing */
2139                ehc->i.probe_mask |= ATA_ALL_DEVICES;
2140                ehc->i.action |= ATA_EH_RESET;
2141                ehc->i.err_mask |= AC_ERR_OTHER;
2142        }
2143
2144        /* analyze NCQ failure */
2145        ata_eh_analyze_ncq_error(link);
2146
2147        /* any real error trumps AC_ERR_OTHER */
2148        if (ehc->i.err_mask & ~AC_ERR_OTHER)
2149                ehc->i.err_mask &= ~AC_ERR_OTHER;
2150
2151        all_err_mask |= ehc->i.err_mask;
2152
2153        for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2154                struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2155
2156                if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2157                    ata_dev_phys_link(qc->dev) != link)
2158                        continue;
2159
2160                /* inherit upper level err_mask */
2161                qc->err_mask |= ehc->i.err_mask;
2162
2163                /* analyze TF */
2164                ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2165
2166                /* DEV errors are probably spurious in case of ATA_BUS error */
2167                if (qc->err_mask & AC_ERR_ATA_BUS)
2168                        qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2169                                          AC_ERR_INVALID);
2170
2171                /* any real error trumps unknown error */
2172                if (qc->err_mask & ~AC_ERR_OTHER)
2173                        qc->err_mask &= ~AC_ERR_OTHER;
2174
2175                /* SENSE_VALID trumps dev/unknown error and revalidation */
2176                if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2177                        qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2178
2179                /* determine whether the command is worth retrying */
2180                if (ata_eh_worth_retry(qc))
2181                        qc->flags |= ATA_QCFLAG_RETRY;
2182
2183                /* accumulate error info */
2184                ehc->i.dev = qc->dev;
2185                all_err_mask |= qc->err_mask;
2186                if (qc->flags & ATA_QCFLAG_IO)
2187                        eflags |= ATA_EFLAG_IS_IO;
2188        }
2189
2190        /* enforce default EH actions */
2191        if (ap->pflags & ATA_PFLAG_FROZEN ||
2192            all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2193                ehc->i.action |= ATA_EH_RESET;
2194        else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2195                 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2196                ehc->i.action |= ATA_EH_REVALIDATE;
2197
2198        /* If we have offending qcs and the associated failed device,
2199         * perform per-dev EH action only on the offending device.
2200         */
2201        if (ehc->i.dev) {
2202                ehc->i.dev_action[ehc->i.dev->devno] |=
2203                        ehc->i.action & ATA_EH_PERDEV_MASK;
2204                ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2205        }
2206
2207        /* propagate timeout to host link */
2208        if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2209                ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2210
2211        /* record error and consider speeding down */
2212        dev = ehc->i.dev;
2213        if (!dev && ((ata_link_max_devices(link) == 1 &&
2214                      ata_dev_enabled(link->device))))
2215            dev = link->device;
2216
2217        if (dev) {
2218                if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2219                        eflags |= ATA_EFLAG_DUBIOUS_XFER;
2220                ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2221        }
2222
2223        DPRINTK("EXIT\n");
2224}
2225
2226/**
2227 *      ata_eh_autopsy - analyze error and determine recovery action
2228 *      @ap: host port to perform autopsy on
2229 *
2230 *      Analyze all links of @ap and determine why they failed and
2231 *      which recovery actions are needed.
2232 *
2233 *      LOCKING:
2234 *      Kernel thread context (may sleep).
2235 */
2236void ata_eh_autopsy(struct ata_port *ap)
2237{
2238        struct ata_link *link;
2239
2240        ata_for_each_link(link, ap, EDGE)
2241                ata_eh_link_autopsy(link);
2242
2243        /* Handle the frigging slave link.  Autopsy is done similarly
2244         * but actions and flags are transferred over to the master
2245         * link and handled from there.
2246         */
2247        if (ap->slave_link) {
2248                struct ata_eh_context *mehc = &ap->link.eh_context;
2249                struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2250
2251                /* transfer control flags from master to slave */
2252                sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2253
2254                /* perform autopsy on the slave link */
2255                ata_eh_link_autopsy(ap->slave_link);
2256
2257                /* transfer actions from slave to master and clear slave */
2258                ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2259                mehc->i.action          |= sehc->i.action;
2260                mehc->i.dev_action[1]   |= sehc->i.dev_action[1];
2261                mehc->i.flags           |= sehc->i.flags;
2262                ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2263        }
2264
2265        /* Autopsy of fanout ports can affect host link autopsy.
2266         * Perform host link autopsy last.
2267         */
2268        if (sata_pmp_attached(ap))
2269                ata_eh_link_autopsy(&ap->link);
2270}
2271
2272/**
2273 *      ata_get_cmd_descript - get description for ATA command
2274 *      @command: ATA command code to get description for
2275 *
2276 *      Return a textual description of the given command, or NULL if the
2277 *      command is not known.
2278 *
2279 *      LOCKING:
2280 *      None
2281 */
2282const char *ata_get_cmd_descript(u8 command)
2283{
2284#ifdef CONFIG_ATA_VERBOSE_ERROR
2285        static const struct
2286        {
2287                u8 command;
2288                const char *text;
2289        } cmd_descr[] = {
2290                { ATA_CMD_DEV_RESET,            "DEVICE RESET" },
2291                { ATA_CMD_CHK_POWER,            "CHECK POWER MODE" },
2292                { ATA_CMD_STANDBY,              "STANDBY" },
2293                { ATA_CMD_IDLE,                 "IDLE" },
2294                { ATA_CMD_EDD,                  "EXECUTE DEVICE DIAGNOSTIC" },
2295                { ATA_CMD_DOWNLOAD_MICRO,       "DOWNLOAD MICROCODE" },
2296                { ATA_CMD_NOP,                  "NOP" },
2297                { ATA_CMD_FLUSH,                "FLUSH CACHE" },
2298                { ATA_CMD_FLUSH_EXT,            "FLUSH CACHE EXT" },
2299                { ATA_CMD_ID_ATA,               "IDENTIFY DEVICE" },
2300                { ATA_CMD_ID_ATAPI,             "IDENTIFY PACKET DEVICE" },
2301                { ATA_CMD_SERVICE,              "SERVICE" },
2302                { ATA_CMD_READ,                 "READ DMA" },
2303                { ATA_CMD_READ_EXT,             "READ DMA EXT" },
2304                { ATA_CMD_READ_QUEUED,          "READ DMA QUEUED" },
2305                { ATA_CMD_READ_STREAM_EXT,      "READ STREAM EXT" },
2306                { ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
2307                { ATA_CMD_WRITE,                "WRITE DMA" },
2308                { ATA_CMD_WRITE_EXT,            "WRITE DMA EXT" },
2309                { ATA_CMD_WRITE_QUEUED,         "WRITE DMA QUEUED EXT" },
2310                { ATA_CMD_WRITE_STREAM_EXT,     "WRITE STREAM EXT" },
2311                { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2312                { ATA_CMD_WRITE_FUA_EXT,        "WRITE DMA FUA EXT" },
2313                { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2314                { ATA_CMD_FPDMA_READ,           "READ FPDMA QUEUED" },
2315                { ATA_CMD_FPDMA_WRITE,          "WRITE FPDMA QUEUED" },
2316                { ATA_CMD_PIO_READ,             "READ SECTOR(S)" },
2317                { ATA_CMD_PIO_READ_EXT,         "READ SECTOR(S) EXT" },
2318                { ATA_CMD_PIO_WRITE,            "WRITE SECTOR(S)" },
2319                { ATA_CMD_PIO_WRITE_EXT,        "WRITE SECTOR(S) EXT" },
2320                { ATA_CMD_READ_MULTI,           "READ MULTIPLE" },
2321                { ATA_CMD_READ_MULTI_EXT,       "READ MULTIPLE EXT" },
2322                { ATA_CMD_WRITE_MULTI,          "WRITE MULTIPLE" },
2323                { ATA_CMD_WRITE_MULTI_EXT,      "WRITE MULTIPLE EXT" },
2324                { ATA_CMD_WRITE_MULTI_FUA_EXT,  "WRITE MULTIPLE FUA EXT" },
2325                { ATA_CMD_SET_FEATURES,         "SET FEATURES" },
2326                { ATA_CMD_SET_MULTI,            "SET MULTIPLE MODE" },
2327                { ATA_CMD_VERIFY,               "READ VERIFY SECTOR(S)" },
2328                { ATA_CMD_VERIFY_EXT,           "READ VERIFY SECTOR(S) EXT" },
2329                { ATA_CMD_WRITE_UNCORR_EXT,     "WRITE UNCORRECTABLE EXT" },
2330                { ATA_CMD_STANDBYNOW1,          "STANDBY IMMEDIATE" },
2331                { ATA_CMD_IDLEIMMEDIATE,        "IDLE IMMEDIATE" },
2332                { ATA_CMD_SLEEP,                "SLEEP" },
2333                { ATA_CMD_INIT_DEV_PARAMS,      "INITIALIZE DEVICE PARAMETERS" },
2334                { ATA_CMD_READ_NATIVE_MAX,      "READ NATIVE MAX ADDRESS" },
2335                { ATA_CMD_READ_NATIVE_MAX_EXT,  "READ NATIVE MAX ADDRESS EXT" },
2336                { ATA_CMD_SET_MAX,              "SET MAX ADDRESS" },
2337                { ATA_CMD_SET_MAX_EXT,          "SET MAX ADDRESS EXT" },
2338                { ATA_CMD_READ_LOG_EXT,         "READ LOG EXT" },
2339                { ATA_CMD_WRITE_LOG_EXT,        "WRITE LOG EXT" },
2340                { ATA_CMD_READ_LOG_DMA_EXT,     "READ LOG DMA EXT" },
2341                { ATA_CMD_WRITE_LOG_DMA_EXT,    "WRITE LOG DMA EXT" },
2342                { ATA_CMD_TRUSTED_RCV,          "TRUSTED RECEIVE" },
2343                { ATA_CMD_TRUSTED_RCV_DMA,      "TRUSTED RECEIVE DMA" },
2344                { ATA_CMD_TRUSTED_SND,          "TRUSTED SEND" },
2345                { ATA_CMD_TRUSTED_SND_DMA,      "TRUSTED SEND DMA" },
2346                { ATA_CMD_PMP_READ,             "READ BUFFER" },
2347                { ATA_CMD_PMP_WRITE,            "WRITE BUFFER" },
2348                { ATA_CMD_CONF_OVERLAY,         "DEVICE CONFIGURATION OVERLAY" },
2349                { ATA_CMD_SEC_SET_PASS,         "SECURITY SET PASSWORD" },
2350                { ATA_CMD_SEC_UNLOCK,           "SECURITY UNLOCK" },
2351                { ATA_CMD_SEC_ERASE_PREP,       "SECURITY ERASE PREPARE" },
2352                { ATA_CMD_SEC_ERASE_UNIT,       "SECURITY ERASE UNIT" },
2353                { ATA_CMD_SEC_FREEZE_LOCK,      "SECURITY FREEZE LOCK" },
2354                { ATA_CMD_SEC_DISABLE_PASS,     "SECURITY DISABLE PASSWORD" },
2355                { ATA_CMD_CONFIG_STREAM,        "CONFIGURE STREAM" },
2356                { ATA_CMD_SMART,                "SMART" },
2357                { ATA_CMD_MEDIA_LOCK,           "DOOR LOCK" },
2358                { ATA_CMD_MEDIA_UNLOCK,         "DOOR UNLOCK" },
2359                { ATA_CMD_DSM,                  "DATA SET MANAGEMENT" },
2360                { ATA_CMD_CHK_MED_CRD_TYP,      "CHECK MEDIA CARD TYPE" },
2361                { ATA_CMD_CFA_REQ_EXT_ERR,      "CFA REQUEST EXTENDED ERROR" },
2362                { ATA_CMD_CFA_WRITE_NE,         "CFA WRITE SECTORS WITHOUT ERASE" },
2363                { ATA_CMD_CFA_TRANS_SECT,       "CFA TRANSLATE SECTOR" },
2364                { ATA_CMD_CFA_ERASE,            "CFA ERASE SECTORS" },
2365                { ATA_CMD_CFA_WRITE_MULT_NE,    "CFA WRITE MULTIPLE WITHOUT ERASE" },
2366                { ATA_CMD_READ_LONG,            "READ LONG (with retries)" },
2367                { ATA_CMD_READ_LONG_ONCE,       "READ LONG (without retries)" },
2368                { ATA_CMD_WRITE_LONG,           "WRITE LONG (with retries)" },
2369                { ATA_CMD_WRITE_LONG_ONCE,      "WRITE LONG (without retries)" },
2370                { ATA_CMD_RESTORE,              "RECALIBRATE" },
2371                { 0,                            NULL } /* terminate list */
2372        };
2373
2374        unsigned int i;
2375        for (i = 0; cmd_descr[i].text; i++)
2376                if (cmd_descr[i].command == command)
2377                        return cmd_descr[i].text;
2378#endif
2379
2380        return NULL;
2381}
2382
2383/**
2384 *      ata_eh_link_report - report error handling to user
2385 *      @link: ATA link EH is going on
2386 *
2387 *      Report EH to user.
2388 *
2389 *      LOCKING:
2390 *      None.
2391 */
2392static void ata_eh_link_report(struct ata_link *link)
2393{
2394        struct ata_port *ap = link->ap;
2395        struct ata_eh_context *ehc = &link->eh_context;
2396        const char *frozen, *desc;
2397        char tries_buf[6];
2398        int tag, nr_failed = 0;
2399
2400        if (ehc->i.flags & ATA_EHI_QUIET)
2401                return;
2402
2403        desc = NULL;
2404        if (ehc->i.desc[0] != '\0')
2405                desc = ehc->i.desc;
2406
2407        for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2408                struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2409
2410                if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2411                    ata_dev_phys_link(qc->dev) != link ||
2412                    ((qc->flags & ATA_QCFLAG_QUIET) &&
2413                     qc->err_mask == AC_ERR_DEV))
2414                        continue;
2415                if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2416                        continue;
2417
2418                nr_failed++;
2419        }
2420
2421        if (!nr_failed && !ehc->i.err_mask)
2422                return;
2423
2424        frozen = "";
2425        if (ap->pflags & ATA_PFLAG_FROZEN)
2426                frozen = " frozen";
2427
2428        memset(tries_buf, 0, sizeof(tries_buf));
2429        if (ap->eh_tries < ATA_EH_MAX_TRIES)
2430                snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2431                         ap->eh_tries);
2432
2433        if (ehc->i.dev) {
2434                ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2435                            "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2436                            ehc->i.err_mask, link->sactive, ehc->i.serror,
2437                            ehc->i.action, frozen, tries_buf);
2438                if (desc)
2439                        ata_dev_err(ehc->i.dev, "%s\n", desc);
2440        } else {
2441                ata_link_err(link, "exception Emask 0x%x "
2442                             "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2443                             ehc->i.err_mask, link->sactive, ehc->i.serror,
2444                             ehc->i.action, frozen, tries_buf);
2445                if (desc)
2446                        ata_link_err(link, "%s\n", desc);
2447        }
2448
2449#ifdef CONFIG_ATA_VERBOSE_ERROR
2450        if (ehc->i.serror)
2451                ata_link_err(link,
2452                  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2453                  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2454                  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2455                  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2456                  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2457                  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2458                  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2459                  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2460                  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2461                  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2462                  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2463                  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2464                  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2465                  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2466                  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2467                  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2468                  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2469                  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2470#endif
2471
2472        for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2473                struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2474                struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2475                const u8 *cdb = qc->cdb;
2476                char data_buf[20] = "";
2477                char cdb_buf[70] = "";
2478
2479                if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2480                    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2481                        continue;
2482
2483                if (qc->dma_dir != DMA_NONE) {
2484                        static const char *dma_str[] = {
2485                                [DMA_BIDIRECTIONAL]     = "bidi",
2486                                [DMA_TO_DEVICE]         = "out",
2487                                [DMA_FROM_DEVICE]       = "in",
2488                        };
2489                        static const char *prot_str[] = {
2490                                [ATA_PROT_PIO]          = "pio",
2491                                [ATA_PROT_DMA]          = "dma",
2492                                [ATA_PROT_NCQ]          = "ncq",
2493                                [ATAPI_PROT_PIO]        = "pio",
2494                                [ATAPI_PROT_DMA]        = "dma",
2495                        };
2496
2497                        snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2498                                 prot_str[qc->tf.protocol], qc->nbytes,
2499                                 dma_str[qc->dma_dir]);
2500                }
2501
2502                if (ata_is_atapi(qc->tf.protocol)) {
2503                        if (qc->scsicmd)
2504                                scsi_print_command(qc->scsicmd);
2505                        else
2506                                snprintf(cdb_buf, sizeof(cdb_buf),
2507                                 "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
2508                                 "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
2509                                 cdb[0], cdb[1], cdb[2], cdb[3],
2510                                 cdb[4], cdb[5], cdb[6], cdb[7],
2511                                 cdb[8], cdb[9], cdb[10], cdb[11],
2512                                 cdb[12], cdb[13], cdb[14], cdb[15]);
2513                } else {
2514                        const char *descr = ata_get_cmd_descript(cmd->command);
2515                        if (descr)
2516                                ata_dev_err(qc->dev, "failed command: %s\n",
2517                                            descr);
2518                }
2519
2520                ata_dev_err(qc->dev,
2521                        "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2522                        "tag %d%s\n         %s"
2523                        "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2524                        "Emask 0x%x (%s)%s\n",
2525                        cmd->command, cmd->feature, cmd->nsect,
2526                        cmd->lbal, cmd->lbam, cmd->lbah,
2527                        cmd->hob_feature, cmd->hob_nsect,
2528                        cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2529                        cmd->device, qc->tag, data_buf, cdb_buf,
2530                        res->command, res->feature, res->nsect,
2531                        res->lbal, res->lbam, res->lbah,
2532                        res->hob_feature, res->hob_nsect,
2533                        res->hob_lbal, res->hob_lbam, res->hob_lbah,
2534                        res->device, qc->err_mask, ata_err_string(qc->err_mask),
2535                        qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2536
2537#ifdef CONFIG_ATA_VERBOSE_ERROR
2538                if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2539                                    ATA_ERR)) {
2540                        if (res->command & ATA_BUSY)
2541                                ata_dev_err(qc->dev, "status: { Busy }\n");
2542                        else
2543                                ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
2544                                  res->command & ATA_DRDY ? "DRDY " : "",
2545                                  res->command & ATA_DF ? "DF " : "",
2546                                  res->command & ATA_DRQ ? "DRQ " : "",
2547                                  res->command & ATA_ERR ? "ERR " : "");
2548                }
2549
2550                if (cmd->command != ATA_CMD_PACKET &&
2551                    (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2552                                     ATA_ABORTED)))
2553                        ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
2554                          res->feature & ATA_ICRC ? "ICRC " : "",
2555                          res->feature & ATA_UNC ? "UNC " : "",
2556                          res->feature & ATA_IDNF ? "IDNF " : "",
2557                          res->feature & ATA_ABORTED ? "ABRT " : "");
2558#endif
2559        }
2560}
2561
2562/**
2563 *      ata_eh_report - report error handling to user
2564 *      @ap: ATA port to report EH about
2565 *
2566 *      Report EH to user.
2567 *
2568 *      LOCKING:
2569 *      None.
2570 */
2571void ata_eh_report(struct ata_port *ap)
2572{
2573        struct ata_link *link;
2574
2575        ata_for_each_link(link, ap, HOST_FIRST)
2576                ata_eh_link_report(link);
2577}
2578
2579static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2580                        unsigned int *classes, unsigned long deadline,
2581                        bool clear_classes)
2582{
2583        struct ata_device *dev;
2584
2585        if (clear_classes)
2586                ata_for_each_dev(dev, link, ALL)
2587                        classes[dev->devno] = ATA_DEV_UNKNOWN;
2588
2589        return reset(link, classes, deadline);
2590}
2591
2592static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
2593{
2594        if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2595                return 0;
2596        if (rc == -EAGAIN)
2597                return 1;
2598        if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2599                return 1;
2600        return 0;
2601}
2602
2603int ata_eh_reset(struct ata_link *link, int classify,
2604                 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2605                 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2606{
2607        struct ata_port *ap = link->ap;
2608        struct ata_link *slave = ap->slave_link;
2609        struct ata_eh_context *ehc = &link->eh_context;
2610        struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2611        unsigned int *classes = ehc->classes;
2612        unsigned int lflags = link->flags;
2613        int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2614        int max_tries = 0, try = 0;
2615        struct ata_link *failed_link;
2616        struct ata_device *dev;
2617        unsigned long deadline, now;
2618        ata_reset_fn_t reset;
2619        unsigned long flags;
2620        u32 sstatus;
2621        int nr_unknown, rc;
2622
2623        /*
2624         * Prepare to reset
2625         */
2626        while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2627                max_tries++;
2628        if (link->flags & ATA_LFLAG_RST_ONCE)
2629                max_tries = 1;
2630        if (link->flags & ATA_LFLAG_NO_HRST)
2631                hardreset = NULL;
2632        if (link->flags & ATA_LFLAG_NO_SRST)
2633                softreset = NULL;
2634
2635        /* make sure each reset attempt is at least COOL_DOWN apart */
2636        if (ehc->i.flags & ATA_EHI_DID_RESET) {
2637                now = jiffies;
2638                WARN_ON(time_after(ehc->last_reset, now));
2639                deadline = ata_deadline(ehc->last_reset,
2640                                        ATA_EH_RESET_COOL_DOWN);
2641                if (time_before(now, deadline))
2642                        schedule_timeout_uninterruptible(deadline - now);
2643        }
2644
2645        spin_lock_irqsave(ap->lock, flags);
2646        ap->pflags |= ATA_PFLAG_RESETTING;
2647        spin_unlock_irqrestore(ap->lock, flags);
2648
2649        ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2650
2651        ata_for_each_dev(dev, link, ALL) {
2652                /* If we issue an SRST then an ATA drive (not ATAPI)
2653                 * may change configuration and be in PIO0 timing. If
2654                 * we do a hard reset (or are coming from power on)
2655                 * this is true for ATA or ATAPI. Until we've set a
2656                 * suitable controller mode we should not touch the
2657                 * bus as we may be talking too fast.
2658                 */
2659                dev->pio_mode = XFER_PIO_0;
2660                dev->dma_mode = 0xff;
2661
2662                /* If the controller has a pio mode setup function
2663                 * then use it to set the chipset to rights. Don't
2664                 * touch the DMA setup as that will be dealt with when
2665                 * configuring devices.
2666                 */
2667                if (ap->ops->set_piomode)
2668                        ap->ops->set_piomode(ap, dev);
2669        }
2670
2671        /* prefer hardreset */
2672        reset = NULL;
2673        ehc->i.action &= ~ATA_EH_RESET;
2674        if (hardreset) {
2675                reset = hardreset;
2676                ehc->i.action |= ATA_EH_HARDRESET;
2677        } else if (softreset) {
2678                reset = softreset;
2679                ehc->i.action |= ATA_EH_SOFTRESET;
2680        }
2681
2682        if (prereset) {
2683                unsigned long deadline = ata_deadline(jiffies,
2684                                                      ATA_EH_PRERESET_TIMEOUT);
2685
2686                if (slave) {
2687                        sehc->i.action &= ~ATA_EH_RESET;
2688                        sehc->i.action |= ehc->i.action;
2689                }
2690
2691                rc = prereset(link, deadline);
2692
2693                /* If present, do prereset on slave link too.  Reset
2694                 * is skipped iff both master and slave links report
2695                 * -ENOENT or clear ATA_EH_RESET.
2696                 */
2697                if (slave && (rc == 0 || rc == -ENOENT)) {
2698                        int tmp;
2699
2700                        tmp = prereset(slave, deadline);
2701                        if (tmp != -ENOENT)
2702                                rc = tmp;
2703
2704                        ehc->i.action |= sehc->i.action;
2705                }
2706
2707                if (rc) {
2708                        if (rc == -ENOENT) {
2709                                ata_link_dbg(link, "port disabled--ignoring\n");
2710                                ehc->i.action &= ~ATA_EH_RESET;
2711
2712                                ata_for_each_dev(dev, link, ALL)
2713                                        classes[dev->devno] = ATA_DEV_NONE;
2714
2715                                rc = 0;
2716                        } else
2717                                ata_link_err(link,
2718                                             "prereset failed (errno=%d)\n",
2719                                             rc);
2720                        goto out;
2721                }
2722
2723                /* prereset() might have cleared ATA_EH_RESET.  If so,
2724                 * bang classes, thaw and return.
2725                 */
2726                if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2727                        ata_for_each_dev(dev, link, ALL)
2728                                classes[dev->devno] = ATA_DEV_NONE;
2729                        if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2730                            ata_is_host_link(link))
2731                                ata_eh_thaw_port(ap);
2732                        rc = 0;
2733                        goto out;
2734                }
2735        }
2736
2737 retry:
2738        /*
2739         * Perform reset
2740         */
2741        if (ata_is_host_link(link))
2742                ata_eh_freeze_port(ap);
2743
2744        deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2745
2746        if (reset) {
2747                if (verbose)
2748                        ata_link_info(link, "%s resetting link\n",
2749                                      reset == softreset ? "soft" : "hard");
2750
2751                /* mark that this EH session started with reset */
2752                ehc->last_reset = jiffies;
2753                if (reset == hardreset)
2754                        ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2755                else
2756                        ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2757
2758                rc = ata_do_reset(link, reset, classes, deadline, true);
2759                if (rc && rc != -EAGAIN) {
2760                        failed_link = link;
2761                        goto fail;
2762                }
2763
2764                /* hardreset slave link if existent */
2765                if (slave && reset == hardreset) {
2766                        int tmp;
2767
2768                        if (verbose)
2769                                ata_link_info(slave, "hard resetting link\n");
2770
2771                        ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2772                        tmp = ata_do_reset(slave, reset, classes, deadline,
2773                                           false);
2774                        switch (tmp) {
2775                        case -EAGAIN:
2776                                rc = -EAGAIN;
2777                        case 0:
2778                                break;
2779                        default:
2780                                failed_link = slave;
2781                                rc = tmp;
2782                                goto fail;
2783                        }
2784                }
2785
2786                /* perform follow-up SRST if necessary */
2787                if (reset == hardreset &&
2788                    ata_eh_followup_srst_needed(link, rc)) {
2789                        reset = softreset;
2790
2791                        if (!reset) {
2792                                ata_link_err(link,
2793             "follow-up softreset required but no softreset available\n");
2794                                failed_link = link;
2795                                rc = -EINVAL;
2796                                goto fail;
2797                        }
2798
2799                        ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2800                        rc = ata_do_reset(link, reset, classes, deadline, true);
2801                        if (rc) {
2802                                failed_link = link;
2803                                goto fail;
2804                        }
2805                }
2806        } else {
2807                if (verbose)
2808                        ata_link_info(link,
2809        "no reset method available, skipping reset\n");
2810                if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2811                        lflags |= ATA_LFLAG_ASSUME_ATA;
2812        }
2813
2814        /*
2815         * Post-reset processing
2816         */
2817        ata_for_each_dev(dev, link, ALL) {
2818                /* After the reset, the device state is PIO 0 and the
2819                 * controller state is undefined.  Reset also wakes up
2820                 * drives from sleeping mode.
2821                 */
2822                dev->pio_mode = XFER_PIO_0;
2823                dev->flags &= ~ATA_DFLAG_SLEEPING;
2824
2825                if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2826                        continue;
2827
2828                /* apply class override */
2829                if (lflags & ATA_LFLAG_ASSUME_ATA)
2830                        classes[dev->devno] = ATA_DEV_ATA;
2831                else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2832                        classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2833        }
2834
2835        /* record current link speed */
2836        if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2837                link->sata_spd = (sstatus >> 4) & 0xf;
2838        if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2839                slave->sata_spd = (sstatus >> 4) & 0xf;
2840
2841        /* thaw the port */
2842        if (ata_is_host_link(link))
2843                ata_eh_thaw_port(ap);
2844
2845        /* postreset() should clear hardware SError.  Although SError
2846         * is cleared during link resume, clearing SError here is
2847         * necessary as some PHYs raise hotplug events after SRST.
2848         * This introduces race condition where hotplug occurs between
2849         * reset and here.  This race is mediated by cross checking
2850         * link onlineness and classification result later.
2851         */
2852        if (postreset) {
2853                postreset(link, classes);
2854                if (slave)
2855                        postreset(slave, classes);
2856        }
2857
2858        /*
2859         * Some controllers can't be frozen very well and may set spurious
2860         * error conditions during reset.  Clear accumulated error
2861         * information and re-thaw the port if frozen.  As reset is the
2862         * final recovery action and we cross check link onlineness against
2863         * device classification later, no hotplug event is lost by this.
2864         */
2865        spin_lock_irqsave(link->ap->lock, flags);
2866        memset(&link->eh_info, 0, sizeof(link->eh_info));
2867        if (slave)
2868                memset(&slave->eh_info, 0, sizeof(link->eh_info));
2869        ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2870        spin_unlock_irqrestore(link->ap->lock, flags);
2871
2872        if (ap->pflags & ATA_PFLAG_FROZEN)
2873                ata_eh_thaw_port(ap);
2874
2875        /*
2876         * Make sure onlineness and classification result correspond.
2877         * Hotplug could have happened during reset and some
2878         * controllers fail to wait while a drive is spinning up after
2879         * being hotplugged causing misdetection.  By cross checking
2880         * link on/offlineness and classification result, those
2881         * conditions can be reliably detected and retried.
2882         */
2883        nr_unknown = 0;
2884        ata_for_each_dev(dev, link, ALL) {
2885                if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2886                        if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2887                                ata_dev_dbg(dev, "link online but device misclassified\n");
2888                                classes[dev->devno] = ATA_DEV_NONE;
2889                                nr_unknown++;
2890                        }
2891                } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2892                        if (ata_class_enabled(classes[dev->devno]))
2893                                ata_dev_dbg(dev,
2894                                            "link offline, clearing class %d to NONE\n",
2895                                            classes[dev->devno]);
2896                        classes[dev->devno] = ATA_DEV_NONE;
2897                } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2898                        ata_dev_dbg(dev,
2899                                    "link status unknown, clearing UNKNOWN to NONE\n");
2900                        classes[dev->devno] = ATA_DEV_NONE;
2901                }
2902        }
2903
2904        if (classify && nr_unknown) {
2905                if (try < max_tries) {
2906                        ata_link_warn(link,
2907                                      "link online but %d devices misclassified, retrying\n",
2908                                      nr_unknown);
2909                        failed_link = link;
2910                        rc = -EAGAIN;
2911                        goto fail;
2912                }
2913                ata_link_warn(link,
2914                              "link online but %d devices misclassified, "
2915                              "device detection might fail\n", nr_unknown);
2916        }
2917
2918        /* reset successful, schedule revalidation */
2919        ata_eh_done(link, NULL, ATA_EH_RESET);
2920        if (slave)
2921                ata_eh_done(slave, NULL, ATA_EH_RESET);
2922        ehc->last_reset = jiffies;              /* update to completion time */
2923        ehc->i.action |= ATA_EH_REVALIDATE;
2924        link->lpm_policy = ATA_LPM_UNKNOWN;     /* reset LPM state */
2925
2926        rc = 0;
2927 out:
2928        /* clear hotplug flag */
2929        ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2930        if (slave)
2931                sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2932
2933        spin_lock_irqsave(ap->lock, flags);
2934        ap->pflags &= ~ATA_PFLAG_RESETTING;
2935        spin_unlock_irqrestore(ap->lock, flags);
2936
2937        return rc;
2938
2939 fail:
2940        /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2941        if (!ata_is_host_link(link) &&
2942            sata_scr_read(link, SCR_STATUS, &sstatus))
2943                rc = -ERESTART;
2944
2945        if (try >= max_tries) {
2946                /*
2947                 * Thaw host port even if reset failed, so that the port
2948                 * can be retried on the next phy event.  This risks
2949                 * repeated EH runs but seems to be a better tradeoff than
2950                 * shutting down a port after a botched hotplug attempt.
2951                 */
2952                if (ata_is_host_link(link))
2953                        ata_eh_thaw_port(ap);
2954                goto out;
2955        }
2956
2957        now = jiffies;
2958        if (time_before(now, deadline)) {
2959                unsigned long delta = deadline - now;
2960
2961                ata_link_warn(failed_link,
2962                        "reset failed (errno=%d), retrying in %u secs\n",
2963                        rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2964
2965                ata_eh_release(ap);
2966                while (delta)
2967                        delta = schedule_timeout_uninterruptible(delta);
2968                ata_eh_acquire(ap);
2969        }
2970
2971        /*
2972         * While disks spinup behind PMP, some controllers fail sending SRST.
2973         * They need to be reset - as well as the PMP - before retrying.
2974         */
2975        if (rc == -ERESTART) {
2976                if (ata_is_host_link(link))
2977                        ata_eh_thaw_port(ap);
2978                goto out;
2979        }
2980
2981        if (try == max_tries - 1) {
2982                sata_down_spd_limit(link, 0);
2983                if (slave)
2984                        sata_down_spd_limit(slave, 0);
2985        } else if (rc == -EPIPE)
2986                sata_down_spd_limit(failed_link, 0);
2987
2988        if (hardreset)
2989                reset = hardreset;
2990        goto retry;
2991}
2992
2993static inline void ata_eh_pull_park_action(struct ata_port *ap)
2994{
2995        struct ata_link *link;
2996        struct ata_device *dev;
2997        unsigned long flags;
2998
2999        /*
3000         * This function can be thought of as an extended version of
3001         * ata_eh_about_to_do() specially crafted to accommodate the
3002         * requirements of ATA_EH_PARK handling. Since the EH thread
3003         * does not leave the do {} while () loop in ata_eh_recover as
3004         * long as the timeout for a park request to *one* device on
3005         * the port has not expired, and since we still want to pick
3006         * up park requests to other devices on the same port or
3007         * timeout updates for the same device, we have to pull
3008         * ATA_EH_PARK actions from eh_info into eh_context.i
3009         * ourselves at the beginning of each pass over the loop.
3010         *
3011         * Additionally, all write accesses to &ap->park_req_pending
3012         * through INIT_COMPLETION() (see below) or complete_all()
3013         * (see ata_scsi_park_store()) are protected by the host lock.
3014         * As a result we have that park_req_pending.done is zero on
3015         * exit from this function, i.e. when ATA_EH_PARK actions for
3016         * *all* devices on port ap have been pulled into the
3017         * respective eh_context structs. If, and only if,
3018         * park_req_pending.done is non-zero by the time we reach
3019         * wait_for_completion_timeout(), another ATA_EH_PARK action
3020         * has been scheduled for at least one of the devices on port
3021         * ap and we have to cycle over the do {} while () loop in
3022         * ata_eh_recover() again.
3023         */
3024
3025        spin_lock_irqsave(ap->lock, flags);
3026        INIT_COMPLETION(ap->park_req_pending);
3027        ata_for_each_link(link, ap, EDGE) {
3028                ata_for_each_dev(dev, link, ALL) {
3029                        struct ata_eh_info *ehi = &link->eh_info;
3030
3031                        link->eh_context.i.dev_action[dev->devno] |=
3032                                ehi->dev_action[dev->devno] & ATA_EH_PARK;
3033                        ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
3034                }
3035        }
3036        spin_unlock_irqrestore(ap->lock, flags);
3037}
3038
3039static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
3040{
3041        struct ata_eh_context *ehc = &dev->link->eh_context;
3042        struct ata_taskfile tf;
3043        unsigned int err_mask;
3044
3045        ata_tf_init(dev, &tf);
3046        if (park) {
3047                ehc->unloaded_mask |= 1 << dev->devno;
3048                tf.command = ATA_CMD_IDLEIMMEDIATE;
3049                tf.feature = 0x44;
3050                tf.lbal = 0x4c;
3051                tf.lbam = 0x4e;
3052                tf.lbah = 0x55;
3053        } else {
3054                ehc->unloaded_mask &= ~(1 << dev->devno);
3055                tf.command = ATA_CMD_CHK_POWER;
3056        }
3057
3058        tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
3059        tf.protocol |= ATA_PROT_NODATA;
3060        err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3061        if (park && (err_mask || tf.lbal != 0xc4)) {
3062                ata_dev_err(dev, "head unload failed!\n");
3063                ehc->unloaded_mask &= ~(1 << dev->devno);
3064        }
3065}
3066
3067static int ata_eh_revalidate_and_attach(struct ata_link *link,
3068                                        struct ata_device **r_failed_dev)
3069{
3070        struct ata_port *ap = link->ap;
3071        struct ata_eh_context *ehc = &link->eh_context;
3072        struct ata_device *dev;
3073        unsigned int new_mask = 0;
3074        unsigned long flags;
3075        int rc = 0;
3076
3077        DPRINTK("ENTER\n");
3078
3079        /* For PATA drive side cable detection to work, IDENTIFY must
3080         * be done backwards such that PDIAG- is released by the slave
3081         * device before the master device is identified.
3082         */
3083        ata_for_each_dev(dev, link, ALL_REVERSE) {
3084                unsigned int action = ata_eh_dev_action(dev);
3085                unsigned int readid_flags = 0;
3086
3087                if (ehc->i.flags & ATA_EHI_DID_RESET)
3088                        readid_flags |= ATA_READID_POSTRESET;
3089
3090                if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
3091                        WARN_ON(dev->class == ATA_DEV_PMP);
3092
3093                        if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3094                                rc = -EIO;
3095                                goto err;
3096                        }
3097
3098                        ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
3099                        rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
3100                                                readid_flags);
3101                        if (rc)
3102                                goto err;
3103
3104                        ata_eh_done(link, dev, ATA_EH_REVALIDATE);
3105
3106                        /* Configuration may have changed, reconfigure
3107                         * transfer mode.
3108                         */
3109                        ehc->i.flags |= ATA_EHI_SETMODE;
3110
3111                        /* schedule the scsi_rescan_device() here */
3112                        schedule_work(&(ap->scsi_rescan_task));
3113                } else if (dev->class == ATA_DEV_UNKNOWN &&
3114                           ehc->tries[dev->devno] &&
3115                           ata_class_enabled(ehc->classes[dev->devno])) {
3116                        /* Temporarily set dev->class, it will be
3117                         * permanently set once all configurations are
3118                         * complete.  This is necessary because new
3119                         * device configuration is done in two
3120                         * separate loops.
3121                         */
3122                        dev->class = ehc->classes[dev->devno];
3123
3124                        if (dev->class == ATA_DEV_PMP)
3125                                rc = sata_pmp_attach(dev);
3126                        else
3127                                rc = ata_dev_read_id(dev, &dev->class,
3128                                                     readid_flags, dev->id);
3129
3130                        /* read_id might have changed class, store and reset */
3131                        ehc->classes[dev->devno] = dev->class;
3132                        dev->class = ATA_DEV_UNKNOWN;
3133
3134                        switch (rc) {
3135                        case 0:
3136                                /* clear error info accumulated during probe */
3137                                ata_ering_clear(&dev->ering);
3138                                new_mask |= 1 << dev->devno;
3139                                break;
3140                        case -ENOENT:
3141                                /* IDENTIFY was issued to non-existent
3142                                 * device.  No need to reset.  Just
3143                                 * thaw and ignore the device.
3144                                 */
3145                                ata_eh_thaw_port(ap);
3146                                break;
3147                        default:
3148                                goto err;
3149                        }
3150                }
3151        }
3152
3153        /* PDIAG- should have been released, ask cable type if post-reset */
3154        if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
3155                if (ap->ops->cable_detect)
3156                        ap->cbl = ap->ops->cable_detect(ap);
3157                ata_force_cbl(ap);
3158        }
3159
3160        /* Configure new devices forward such that user doesn't see
3161         * device detection messages backwards.
3162         */
3163        ata_for_each_dev(dev, link, ALL) {
3164                if (!(new_mask & (1 << dev->devno)))
3165                        continue;
3166
3167                dev->class = ehc->classes[dev->devno];
3168
3169                if (dev->class == ATA_DEV_PMP)
3170                        continue;
3171
3172                ehc->i.flags |= ATA_EHI_PRINTINFO;
3173                rc = ata_dev_configure(dev);
3174                ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3175                if (rc) {
3176                        dev->class = ATA_DEV_UNKNOWN;
3177                        goto err;
3178                }
3179
3180                spin_lock_irqsave(ap->lock, flags);
3181                ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3182                spin_unlock_irqrestore(ap->lock, flags);
3183
3184                /* new device discovered, configure xfermode */
3185                ehc->i.flags |= ATA_EHI_SETMODE;
3186        }
3187
3188        return 0;
3189
3190 err:
3191        *r_failed_dev = dev;
3192        DPRINTK("EXIT rc=%d\n", rc);
3193        return rc;
3194}
3195
3196/**
3197 *      ata_set_mode - Program timings and issue SET FEATURES - XFER
3198 *      @link: link on which timings will be programmed
3199 *      @r_failed_dev: out parameter for failed device
3200 *
3201 *      Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3202 *      ata_set_mode() fails, pointer to the failing device is
3203 *      returned in @r_failed_dev.
3204 *
3205 *      LOCKING:
3206 *      PCI/etc. bus probe sem.
3207 *
3208 *      RETURNS:
3209 *      0 on success, negative errno otherwise
3210 */
3211int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3212{
3213        struct ata_port *ap = link->ap;
3214        struct ata_device *dev;
3215        int rc;
3216
3217        /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3218        ata_for_each_dev(dev, link, ENABLED) {
3219                if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3220                        struct ata_ering_entry *ent;
3221
3222                        ent = ata_ering_top(&dev->ering);
3223                        if (ent)
3224                                ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3225                }
3226        }
3227
3228        /* has private set_mode? */
3229        if (ap->ops->set_mode)
3230                rc = ap->ops->set_mode(link, r_failed_dev);
3231        else
3232                rc = ata_do_set_mode(link, r_failed_dev);
3233
3234        /* if transfer mode has changed, set DUBIOUS_XFER on device */
3235        ata_for_each_dev(dev, link, ENABLED) {
3236                struct ata_eh_context *ehc = &link->eh_context;
3237                u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3238                u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3239
3240                if (dev->xfer_mode != saved_xfer_mode ||
3241                    ata_ncq_enabled(dev) != saved_ncq)
3242                        dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3243        }
3244
3245        return rc;
3246}
3247
3248/**
3249 *      atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3250 *      @dev: ATAPI device to clear UA for
3251 *
3252 *      Resets and other operations can make an ATAPI device raise
3253 *      UNIT ATTENTION which causes the next operation to fail.  This
3254 *      function clears UA.
3255 *
3256 *      LOCKING:
3257 *      EH context (may sleep).
3258 *
3259 *      RETURNS:
3260 *      0 on success, -errno on failure.
3261 */
3262static int atapi_eh_clear_ua(struct ata_device *dev)
3263{
3264        int i;
3265
3266        for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3267                u8 *sense_buffer = dev->link->ap->sector_buf;
3268                u8 sense_key = 0;
3269                unsigned int err_mask;
3270
3271                err_mask = atapi_eh_tur(dev, &sense_key);
3272                if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3273                        ata_dev_warn(dev,
3274                                     "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3275                                     err_mask);
3276                        return -EIO;
3277                }
3278
3279                if (!err_mask || sense_key != UNIT_ATTENTION)
3280                        return 0;
3281
3282                err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3283                if (err_mask) {
3284                        ata_dev_warn(dev, "failed to clear "
3285                                "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3286                        return -EIO;
3287                }
3288        }
3289
3290        ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3291                     ATA_EH_UA_TRIES);
3292
3293        return 0;
3294}
3295
3296/**
3297 *      ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3298 *      @dev: ATA device which may need FLUSH retry
3299 *
3300 *      If @dev failed FLUSH, it needs to be reported upper layer
3301 *      immediately as it means that @dev failed to remap and already
3302 *      lost at least a sector and further FLUSH retrials won't make
3303 *      any difference to the lost sector.  However, if FLUSH failed
3304 *      for other reasons, for example transmission error, FLUSH needs
3305 *      to be retried.
3306 *
3307 *      This function determines whether FLUSH failure retry is
3308 *      necessary and performs it if so.
3309 *
3310 *      RETURNS:
3311 *      0 if EH can continue, -errno if EH needs to be repeated.
3312 */
3313static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3314{
3315        struct ata_link *link = dev->link;
3316        struct ata_port *ap = link->ap;
3317        struct ata_queued_cmd *qc;
3318        struct ata_taskfile tf;
3319        unsigned int err_mask;
3320        int rc = 0;
3321
3322        /* did flush fail for this device? */
3323        if (!ata_tag_valid(link->active_tag))
3324                return 0;
3325
3326        qc = __ata_qc_from_tag(ap, link->active_tag);
3327        if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3328                               qc->tf.command != ATA_CMD_FLUSH))
3329                return 0;
3330
3331        /* if the device failed it, it should be reported to upper layers */
3332        if (qc->err_mask & AC_ERR_DEV)
3333                return 0;
3334
3335        /* flush failed for some other reason, give it another shot */
3336        ata_tf_init(dev, &tf);
3337
3338        tf.command = qc->tf.command;
3339        tf.flags |= ATA_TFLAG_DEVICE;
3340        tf.protocol = ATA_PROT_NODATA;
3341
3342        ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
3343                       tf.command, qc->err_mask);
3344
3345        err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3346        if (!err_mask) {
3347                /*
3348                 * FLUSH is complete but there's no way to
3349                 * successfully complete a failed command from EH.
3350                 * Making sure retry is allowed at least once and
3351                 * retrying it should do the trick - whatever was in
3352                 * the cache is already on the platter and this won't
3353                 * cause infinite loop.
3354                 */
3355                qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3356        } else {
3357                ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
3358                               err_mask);
3359                rc = -EIO;
3360
3361                /* if device failed it, report it to upper layers */
3362                if (err_mask & AC_ERR_DEV) {
3363                        qc->err_mask |= AC_ERR_DEV;
3364                        qc->result_tf = tf;
3365                        if (!(ap->pflags & ATA_PFLAG_FROZEN))
3366                                rc = 0;
3367                }
3368        }
3369        return rc;
3370}
3371
3372/**
3373 *      ata_eh_set_lpm - configure SATA interface power management
3374 *      @link: link to configure power management
3375 *      @policy: the link power management policy
3376 *      @r_failed_dev: out parameter for failed device
3377 *
3378 *      Enable SATA Interface power management.  This will enable
3379 *      Device Interface Power Management (DIPM) for min_power
3380 *      policy, and then call driver specific callbacks for
3381 *      enabling Host Initiated Power management.
3382 *
3383 *      LOCKING:
3384 *      EH context.
3385 *
3386 *      RETURNS:
3387 *      0 on success, -errno on failure.
3388 */
3389static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3390                          struct ata_device **r_failed_dev)
3391{
3392        struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
3393        struct ata_eh_context *ehc = &link->eh_context;
3394        struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3395        enum ata_lpm_policy old_policy = link->lpm_policy;
3396        bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
3397        unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3398        unsigned int err_mask;
3399        int rc;
3400
3401        /* if the link or host doesn't do LPM, noop */
3402        if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3403                return 0;
3404
3405        /*
3406         * DIPM is enabled only for MIN_POWER as some devices
3407         * misbehave when the host NACKs transition to SLUMBER.  Order
3408         * device and link configurations such that the host always
3409         * allows DIPM requests.
3410         */
3411        ata_for_each_dev(dev, link, ENABLED) {
3412                bool hipm = ata_id_has_hipm(dev->id);
3413                bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
3414
3415                /* find the first enabled and LPM enabled devices */
3416                if (!link_dev)
3417                        link_dev = dev;
3418
3419                if (!lpm_dev && (hipm || dipm))
3420                        lpm_dev = dev;
3421
3422                hints &= ~ATA_LPM_EMPTY;
3423                if (!hipm)
3424                        hints &= ~ATA_LPM_HIPM;
3425
3426                /* disable DIPM before changing link config */
3427                if (policy != ATA_LPM_MIN_POWER && dipm) {
3428                        err_mask = ata_dev_set_feature(dev,
3429                                        SETFEATURES_SATA_DISABLE, SATA_DIPM);
3430                        if (err_mask && err_mask != AC_ERR_DEV) {
3431                                ata_dev_warn(dev,
3432                                             "failed to disable DIPM, Emask 0x%x\n",
3433                                             err_mask);
3434                                rc = -EIO;
3435                                goto fail;
3436                        }
3437                }
3438        }
3439
3440        if (ap) {
3441                rc = ap->ops->set_lpm(link, policy, hints);
3442                if (!rc && ap->slave_link)
3443                        rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3444        } else
3445                rc = sata_pmp_set_lpm(link, policy, hints);
3446
3447        /*
3448         * Attribute link config failure to the first (LPM) enabled
3449         * device on the link.
3450         */
3451        if (rc) {
3452                if (rc == -EOPNOTSUPP) {
3453                        link->flags |= ATA_LFLAG_NO_LPM;
3454                        return 0;
3455                }
3456                dev = lpm_dev ? lpm_dev : link_dev;
3457                goto fail;
3458        }
3459
3460        /*
3461         * Low level driver acked the transition.  Issue DIPM command
3462         * with the new policy set.
3463         */
3464        link->lpm_policy = policy;
3465        if (ap && ap->slave_link)
3466                ap->slave_link->lpm_policy = policy;
3467
3468        /* host config updated, enable DIPM if transitioning to MIN_POWER */
3469        ata_for_each_dev(dev, link, ENABLED) {
3470                if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
3471                    ata_id_has_dipm(dev->id)) {
3472                        err_mask = ata_dev_set_feature(dev,
3473                                        SETFEATURES_SATA_ENABLE, SATA_DIPM);
3474                        if (err_mask && err_mask != AC_ERR_DEV) {
3475                                ata_dev_warn(dev,
3476                                        "failed to enable DIPM, Emask 0x%x\n",
3477                                        err_mask);
3478                                rc = -EIO;
3479                                goto fail;
3480                        }
3481                }
3482        }
3483
3484        return 0;
3485
3486fail:
3487        /* restore the old policy */
3488        link->lpm_policy = old_policy;
3489        if (ap && ap->slave_link)
3490                ap->slave_link->lpm_policy = old_policy;
3491
3492        /* if no device or only one more chance is left, disable LPM */
3493        if (!dev || ehc->tries[dev->devno] <= 2) {
3494                ata_link_warn(link, "disabling LPM on the link\n");
3495                link->flags |= ATA_LFLAG_NO_LPM;
3496        }
3497        if (r_failed_dev)
3498                *r_failed_dev = dev;
3499        return rc;
3500}
3501
3502int ata_link_nr_enabled(struct ata_link *link)
3503{
3504        struct ata_device *dev;
3505        int cnt = 0;
3506
3507        ata_for_each_dev(dev, link, ENABLED)
3508                cnt++;
3509        return cnt;
3510}
3511
3512static int ata_link_nr_vacant(struct ata_link *link)
3513{
3514        struct ata_device *dev;
3515        int cnt = 0;
3516
3517        ata_for_each_dev(dev, link, ALL)
3518                if (dev->class == ATA_DEV_UNKNOWN)
3519                        cnt++;
3520        return cnt;
3521}
3522
3523static int ata_eh_skip_recovery(struct ata_link *link)
3524{
3525        struct ata_port *ap = link->ap;
3526        struct ata_eh_context *ehc = &link->eh_context;
3527        struct ata_device *dev;
3528
3529        /* skip disabled links */
3530        if (link->flags & ATA_LFLAG_DISABLED)
3531                return 1;
3532
3533        /* skip if explicitly requested */
3534        if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3535                return 1;
3536
3537        /* thaw frozen port and recover failed devices */
3538        if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3539                return 0;
3540
3541        /* reset at least once if reset is requested */
3542        if ((ehc->i.action & ATA_EH_RESET) &&
3543            !(ehc->i.flags & ATA_EHI_DID_RESET))
3544                return 0;
3545
3546        /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3547        ata_for_each_dev(dev, link, ALL) {
3548                if (dev->class == ATA_DEV_UNKNOWN &&
3549                    ehc->classes[dev->devno] != ATA_DEV_NONE)
3550                        return 0;
3551        }
3552
3553        return 1;
3554}
3555
3556static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3557{
3558        u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3559        u64 now = get_jiffies_64();
3560        int *trials = void_arg;
3561
3562        if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
3563            (ent->timestamp < now - min(now, interval)))
3564                return -1;
3565
3566        (*trials)++;
3567        return 0;
3568}
3569
3570static int ata_eh_schedule_probe(struct ata_device *dev)
3571{
3572        struct ata_eh_context *ehc = &dev->link->eh_context;
3573        struct ata_link *link = ata_dev_phys_link(dev);
3574        int trials = 0;
3575
3576        if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3577            (ehc->did_probe_mask & (1 << dev->devno)))
3578                return 0;
3579
3580        ata_eh_detach_dev(dev);
3581        ata_dev_init(dev);
3582        ehc->did_probe_mask |= (1 << dev->devno);
3583        ehc->i.action |= ATA_EH_RESET;
3584        ehc->saved_xfer_mode[dev->devno] = 0;
3585        ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3586
3587        /* the link maybe in a deep sleep, wake it up */
3588        if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3589                if (ata_is_host_link(link))
3590                        link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3591                                               ATA_LPM_EMPTY);
3592                else
3593                        sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3594                                         ATA_LPM_EMPTY);
3595        }
3596
3597        /* Record and count probe trials on the ering.  The specific
3598         * error mask used is irrelevant.  Because a successful device
3599         * detection clears the ering, this count accumulates only if
3600         * there are consecutive failed probes.
3601         *
3602         * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3603         * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3604         * forced to 1.5Gbps.
3605         *
3606         * This is to work around cases where failed link speed
3607         * negotiation results in device misdetection leading to
3608         * infinite DEVXCHG or PHRDY CHG events.
3609         */
3610        ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3611        ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3612
3613        if (trials > ATA_EH_PROBE_TRIALS)
3614                sata_down_spd_limit(link, 1);
3615
3616        return 1;
3617}
3618
3619static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3620{
3621        struct ata_eh_context *ehc = &dev->link->eh_context;
3622
3623        /* -EAGAIN from EH routine indicates retry without prejudice.
3624         * The requester is responsible for ensuring forward progress.
3625         */
3626        if (err != -EAGAIN)
3627                ehc->tries[dev->devno]--;
3628
3629        switch (err) {
3630        case -ENODEV:
3631                /* device missing or wrong IDENTIFY data, schedule probing */
3632                ehc->i.probe_mask |= (1 << dev->devno);
3633        case -EINVAL:
3634                /* give it just one more chance */
3635                ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3636        case -EIO:
3637                if (ehc->tries[dev->devno] == 1) {
3638                        /* This is the last chance, better to slow
3639                         * down than lose it.
3640                         */
3641                        sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3642                        if (dev->pio_mode > XFER_PIO_0)
3643                                ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3644                }
3645        }
3646
3647        if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3648                /* disable device if it has used up all its chances */
3649                ata_dev_disable(dev);
3650
3651                /* detach if offline */
3652                if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3653                        ata_eh_detach_dev(dev);
3654
3655                /* schedule probe if necessary */
3656                if (ata_eh_schedule_probe(dev)) {
3657                        ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3658                        memset(ehc->cmd_timeout_idx[dev->devno], 0,
3659                               sizeof(ehc->cmd_timeout_idx[dev->devno]));
3660                }
3661
3662                return 1;
3663        } else {
3664                ehc->i.action |= ATA_EH_RESET;
3665                return 0;
3666        }
3667}
3668
3669/**
3670 *      ata_eh_recover - recover host port after error
3671 *      @ap: host port to recover
3672 *      @prereset: prereset method (can be NULL)
3673 *      @softreset: softreset method (can be NULL)
3674 *      @hardreset: hardreset method (can be NULL)
3675 *      @postreset: postreset method (can be NULL)
3676 *      @r_failed_link: out parameter for failed link
3677 *
3678 *      This is the alpha and omega, eum and yang, heart and soul of
3679 *      libata exception handling.  On entry, actions required to
3680 *      recover each link and hotplug requests are recorded in the
3681 *      link's eh_context.  This function executes all the operations
3682 *      with appropriate retrials and fallbacks to resurrect failed
3683 *      devices, detach goners and greet newcomers.
3684 *
3685 *      LOCKING:
3686 *      Kernel thread context (may sleep).
3687 *
3688 *      RETURNS:
3689 *      0 on success, -errno on failure.
3690 */
3691int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3692                   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3693                   ata_postreset_fn_t postreset,
3694                   struct ata_link **r_failed_link)
3695{
3696        struct ata_link *link;
3697        struct ata_device *dev;
3698        int rc, nr_fails;
3699        unsigned long flags, deadline;
3700
3701        DPRINTK("ENTER\n");
3702
3703        /* prep for recovery */
3704        ata_for_each_link(link, ap, EDGE) {
3705                struct ata_eh_context *ehc = &link->eh_context;
3706
3707                /* re-enable link? */
3708                if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3709                        ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3710                        spin_lock_irqsave(ap->lock, flags);
3711                        link->flags &= ~ATA_LFLAG_DISABLED;
3712                        spin_unlock_irqrestore(ap->lock, flags);
3713                        ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3714                }
3715
3716                ata_for_each_dev(dev, link, ALL) {
3717                        if (link->flags & ATA_LFLAG_NO_RETRY)
3718                                ehc->tries[dev->devno] = 1;
3719                        else
3720                                ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3721
3722                        /* collect port action mask recorded in dev actions */
3723                        ehc->i.action |= ehc->i.dev_action[dev->devno] &
3724                                         ~ATA_EH_PERDEV_MASK;
3725                        ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3726
3727                        /* process hotplug request */
3728                        if (dev->flags & ATA_DFLAG_DETACH)
3729                                ata_eh_detach_dev(dev);
3730
3731                        /* schedule probe if necessary */
3732                        if (!ata_dev_enabled(dev))
3733                                ata_eh_schedule_probe(dev);
3734                }
3735        }
3736
3737 retry:
3738        rc = 0;
3739
3740        /* if UNLOADING, finish immediately */
3741        if (ap->pflags & ATA_PFLAG_UNLOADING)
3742                goto out;
3743
3744        /* prep for EH */
3745        ata_for_each_link(link, ap, EDGE) {
3746                struct ata_eh_context *ehc = &link->eh_context;
3747
3748                /* skip EH if possible. */
3749                if (ata_eh_skip_recovery(link))
3750                        ehc->i.action = 0;
3751
3752                ata_for_each_dev(dev, link, ALL)
3753                        ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3754        }
3755
3756        /* reset */
3757        ata_for_each_link(link, ap, EDGE) {
3758                struct ata_eh_context *ehc = &link->eh_context;
3759
3760                if (!(ehc->i.action & ATA_EH_RESET))
3761                        continue;
3762
3763                rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3764                                  prereset, softreset, hardreset, postreset);
3765                if (rc) {
3766                        ata_link_err(link, "reset failed, giving up\n");
3767                        goto out;
3768                }
3769        }
3770
3771        do {
3772                unsigned long now;
3773
3774                /*
3775                 * clears ATA_EH_PARK in eh_info and resets
3776                 * ap->park_req_pending
3777                 */
3778                ata_eh_pull_park_action(ap);
3779
3780                deadline = jiffies;
3781                ata_for_each_link(link, ap, EDGE) {
3782                        ata_for_each_dev(dev, link, ALL) {
3783                                struct ata_eh_context *ehc = &link->eh_context;
3784                                unsigned long tmp;
3785
3786                                if (dev->class != ATA_DEV_ATA)
3787                                        continue;
3788                                if (!(ehc->i.dev_action[dev->devno] &
3789                                      ATA_EH_PARK))
3790                                        continue;
3791                                tmp = dev->unpark_deadline;
3792                                if (time_before(deadline, tmp))
3793                                        deadline = tmp;
3794                                else if (time_before_eq(tmp, jiffies))
3795                                        continue;
3796                                if (ehc->unloaded_mask & (1 << dev->devno))
3797                                        continue;
3798
3799                                ata_eh_park_issue_cmd(dev, 1);
3800                        }
3801                }
3802
3803                now = jiffies;
3804                if (time_before_eq(deadline, now))
3805                        break;
3806
3807                ata_eh_release(ap);
3808                deadline = wait_for_completion_timeout(&ap->park_req_pending,
3809                                                       deadline - now);
3810                ata_eh_acquire(ap);
3811        } while (deadline);
3812        ata_for_each_link(link, ap, EDGE) {
3813                ata_for_each_dev(dev, link, ALL) {
3814                        if (!(link->eh_context.unloaded_mask &
3815                              (1 << dev->devno)))
3816                                continue;
3817
3818                        ata_eh_park_issue_cmd(dev, 0);
3819                        ata_eh_done(link, dev, ATA_EH_PARK);
3820                }
3821        }
3822
3823        /* the rest */
3824        nr_fails = 0;
3825        ata_for_each_link(link, ap, PMP_FIRST) {
3826                struct ata_eh_context *ehc = &link->eh_context;
3827
3828                if (sata_pmp_attached(ap) && ata_is_host_link(link))
3829                        goto config_lpm;
3830
3831                /* revalidate existing devices and attach new ones */
3832                rc = ata_eh_revalidate_and_attach(link, &dev);
3833                if (rc)
3834                        goto rest_fail;
3835
3836                /* if PMP got attached, return, pmp EH will take care of it */
3837                if (link->device->class == ATA_DEV_PMP) {
3838                        ehc->i.action = 0;
3839                        return 0;
3840                }
3841
3842                /* configure transfer mode if necessary */
3843                if (ehc->i.flags & ATA_EHI_SETMODE) {
3844                        rc = ata_set_mode(link, &dev);
3845                        if (rc)
3846                                goto rest_fail;
3847                        ehc->i.flags &= ~ATA_EHI_SETMODE;
3848                }
3849
3850                /* If reset has been issued, clear UA to avoid
3851                 * disrupting the current users of the device.
3852                 */
3853                if (ehc->i.flags & ATA_EHI_DID_RESET) {
3854                        ata_for_each_dev(dev, link, ALL) {
3855                                if (dev->class != ATA_DEV_ATAPI)
3856                                        continue;
3857                                rc = atapi_eh_clear_ua(dev);
3858                                if (rc)
3859                                        goto rest_fail;
3860                                if (zpodd_dev_enabled(dev))
3861                                        zpodd_post_poweron(dev);
3862                        }
3863                }
3864
3865                /* retry flush if necessary */
3866                ata_for_each_dev(dev, link, ALL) {
3867                        if (dev->class != ATA_DEV_ATA)
3868                                continue;
3869                        rc = ata_eh_maybe_retry_flush(dev);
3870                        if (rc)
3871                                goto rest_fail;
3872                }
3873
3874        config_lpm:
3875                /* configure link power saving */
3876                if (link->lpm_policy != ap->target_lpm_policy) {
3877                        rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3878                        if (rc)
3879                                goto rest_fail;
3880                }
3881
3882                /* this link is okay now */
3883                ehc->i.flags = 0;
3884                continue;
3885
3886        rest_fail:
3887                nr_fails++;
3888                if (dev)
3889                        ata_eh_handle_dev_fail(dev, rc);
3890
3891                if (ap->pflags & ATA_PFLAG_FROZEN) {
3892                        /* PMP reset requires working host port.
3893                         * Can't retry if it's frozen.
3894                         */
3895                        if (sata_pmp_attached(ap))
3896                                goto out;
3897                        break;
3898                }
3899        }
3900
3901        if (nr_fails)
3902                goto retry;
3903
3904 out:
3905        if (rc && r_failed_link)
3906                *r_failed_link = link;
3907
3908        DPRINTK("EXIT, rc=%d\n", rc);
3909        return rc;
3910}
3911
3912/**
3913 *      ata_eh_finish - finish up EH
3914 *      @ap: host port to finish EH for
3915 *
3916 *      Recovery is complete.  Clean up EH states and retry or finish
3917 *      failed qcs.
3918 *
3919 *      LOCKING:
3920 *      None.
3921 */
3922void ata_eh_finish(struct ata_port *ap)
3923{
3924        int tag;
3925
3926        /* retry or finish qcs */
3927        for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3928                struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3929
3930                if (!(qc->flags & ATA_QCFLAG_FAILED))
3931                        continue;
3932
3933                if (qc->err_mask) {
3934                        /* FIXME: Once EH migration is complete,
3935                         * generate sense data in this function,
3936                         * considering both err_mask and tf.
3937                         */
3938                        if (qc->flags & ATA_QCFLAG_RETRY)
3939                                ata_eh_qc_retry(qc);
3940                        else
3941                                ata_eh_qc_complete(qc);
3942                } else {
3943                        if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3944                                ata_eh_qc_complete(qc);
3945                        } else {
3946                                /* feed zero TF to sense generation */
3947                                memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3948                                ata_eh_qc_retry(qc);
3949                        }
3950                }
3951        }
3952
3953        /* make sure nr_active_links is zero after EH */
3954        WARN_ON(ap->nr_active_links);
3955        ap->nr_active_links = 0;
3956}
3957
3958/**
3959 *      ata_do_eh - do standard error handling
3960 *      @ap: host port to handle error for
3961 *
3962 *      @prereset: prereset method (can be NULL)
3963 *      @softreset: softreset method (can be NULL)
3964 *      @hardreset: hardreset method (can be NULL)
3965 *      @postreset: postreset method (can be NULL)
3966 *
3967 *      Perform standard error handling sequence.
3968 *
3969 *      LOCKING:
3970 *      Kernel thread context (may sleep).
3971 */
3972void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3973               ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3974               ata_postreset_fn_t postreset)
3975{
3976        struct ata_device *dev;
3977        int rc;
3978
3979        ata_eh_autopsy(ap);
3980        ata_eh_report(ap);
3981
3982        rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3983                            NULL);
3984        if (rc) {
3985                ata_for_each_dev(dev, &ap->link, ALL)
3986                        ata_dev_disable(dev);
3987        }
3988
3989        ata_eh_finish(ap);
3990}
3991
3992/**
3993 *      ata_std_error_handler - standard error handler
3994 *      @ap: host port to handle error for
3995 *
3996 *      Standard error handler
3997 *
3998 *      LOCKING:
3999 *      Kernel thread context (may sleep).
4000 */
4001void ata_std_error_handler(struct ata_port *ap)
4002{
4003        struct ata_port_operations *ops = ap->ops;
4004        ata_reset_fn_t hardreset = ops->hardreset;
4005
4006        /* ignore built-in hardreset if SCR access is not available */
4007        if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
4008                hardreset = NULL;
4009
4010        ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
4011}
4012
4013#ifdef CONFIG_PM
4014/**
4015 *      ata_eh_handle_port_suspend - perform port suspend operation
4016 *      @ap: port to suspend
4017 *
4018 *      Suspend @ap.
4019 *
4020 *      LOCKING:
4021 *      Kernel thread context (may sleep).
4022 */
4023static void ata_eh_handle_port_suspend(struct ata_port *ap)
4024{
4025        unsigned long flags;
4026        int rc = 0;
4027        struct ata_device *dev;
4028
4029        /* are we suspending? */
4030        spin_lock_irqsave(ap->lock, flags);
4031        if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4032            ap->pm_mesg.event & PM_EVENT_RESUME) {
4033                spin_unlock_irqrestore(ap->lock, flags);
4034                return;
4035        }
4036        spin_unlock_irqrestore(ap->lock, flags);
4037
4038        WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
4039
4040        /*
4041         * If we have a ZPODD attached, check its zero
4042         * power ready status before the port is frozen.
4043         * Only needed for runtime suspend.
4044         */
4045        if (PMSG_IS_AUTO(ap->pm_mesg)) {
4046                ata_for_each_dev(dev, &ap->link, ENABLED) {
4047                        if (zpodd_dev_enabled(dev))
4048                                zpodd_on_suspend(dev);
4049                }
4050        }
4051
4052        /* tell ACPI we're suspending */
4053        rc = ata_acpi_on_suspend(ap);
4054        if (rc)
4055                goto out;
4056
4057        /* suspend */
4058        ata_eh_freeze_port(ap);
4059
4060        if (ap->ops->port_suspend)
4061                rc = ap->ops->port_suspend(ap, ap->pm_mesg);
4062
4063        ata_acpi_set_state(ap, ap->pm_mesg);
4064 out:
4065        /* report result */
4066        spin_lock_irqsave(ap->lock, flags);
4067
4068        ap->pflags &= ~ATA_PFLAG_PM_PENDING;
4069        if (rc == 0)
4070                ap->pflags |= ATA_PFLAG_SUSPENDED;
4071        else if (ap->pflags & ATA_PFLAG_FROZEN)
4072                ata_port_schedule_eh(ap);
4073
4074        if (ap->pm_result) {
4075                *ap->pm_result = rc;
4076                ap->pm_result = NULL;
4077        }
4078
4079        spin_unlock_irqrestore(ap->lock, flags);
4080
4081        return;
4082}
4083
4084/**
4085 *      ata_eh_handle_port_resume - perform port resume operation
4086 *      @ap: port to resume
4087 *
4088 *      Resume @ap.
4089 *
4090 *      LOCKING:
4091 *      Kernel thread context (may sleep).
4092 */
4093static void ata_eh_handle_port_resume(struct ata_port *ap)
4094{
4095        struct ata_link *link;
4096        struct ata_device *dev;
4097        unsigned long flags;
4098        int rc = 0;
4099
4100        /* are we resuming? */
4101        spin_lock_irqsave(ap->lock, flags);
4102        if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4103            !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
4104                spin_unlock_irqrestore(ap->lock, flags);
4105                return;
4106        }
4107        spin_unlock_irqrestore(ap->lock, flags);
4108
4109        WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
4110
4111        /*
4112         * Error timestamps are in jiffies which doesn't run while
4113         * suspended and PHY events during resume isn't too uncommon.
4114         * When the two are combined, it can lead to unnecessary speed
4115         * downs if the machine is suspended and resumed repeatedly.
4116         * Clear error history.
4117         */
4118        ata_for_each_link(link, ap, HOST_FIRST)
4119                ata_for_each_dev(dev, link, ALL)
4120                        ata_ering_clear(&dev->ering);
4121
4122        ata_acpi_set_state(ap, ap->pm_mesg);
4123
4124        if (ap->ops->port_resume)
4125                rc = ap->ops->port_resume(ap);
4126
4127        /* tell ACPI that we're resuming */
4128        ata_acpi_on_resume(ap);
4129
4130        /* report result */
4131        spin_lock_irqsave(ap->lock, flags);
4132        ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4133        if (ap->pm_result) {
4134                *ap->pm_result = rc;
4135                ap->pm_result = NULL;
4136        }
4137        spin_unlock_irqrestore(ap->lock, flags);
4138}
4139#endif /* CONFIG_PM */
4140