uboot/drivers/block/sata_dwc.c
<<
>>
Prefs
   1/*
   2 * sata_dwc.c
   3 *
   4 * Synopsys DesignWare Cores (DWC) SATA host driver
   5 *
   6 * Author: Mark Miesfeld <mmiesfeld@amcc.com>
   7 *
   8 * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
   9 * Copyright 2008 DENX Software Engineering
  10 *
  11 * Based on versions provided by AMCC and Synopsys which are:
  12 *          Copyright 2006 Applied Micro Circuits Corporation
  13 *          COPYRIGHT (C) 2005  SYNOPSYS, INC.  ALL RIGHTS RESERVED
  14 *
  15 * SPDX-License-Identifier:     GPL-2.0+
  16 */
  17/*
  18 * SATA support based on the chip canyonlands.
  19 *
  20 * 04-17-2009
  21 *              The local version of this driver for the canyonlands board
  22 *              does not use interrupts but polls the chip instead.
  23 */
  24
  25#include <common.h>
  26#include <command.h>
  27#include <pci.h>
  28#include <asm/processor.h>
  29#include <linux/errno.h>
  30#include <asm/io.h>
  31#include <malloc.h>
  32#include <ata.h>
  33#include <sata.h>
  34#include <linux/ctype.h>
  35
  36#include "sata_dwc.h"
  37
  38#define DMA_NUM_CHANS                   1
  39#define DMA_NUM_CHAN_REGS               8
  40
  41#define AHB_DMA_BRST_DFLT               16
  42
  43struct dmareg {
  44        u32 low;
  45        u32 high;
  46};
  47
  48struct dma_chan_regs {
  49        struct dmareg sar;
  50        struct dmareg dar;
  51        struct dmareg llp;
  52        struct dmareg ctl;
  53        struct dmareg sstat;
  54        struct dmareg dstat;
  55        struct dmareg sstatar;
  56        struct dmareg dstatar;
  57        struct dmareg cfg;
  58        struct dmareg sgr;
  59        struct dmareg dsr;
  60};
  61
  62struct dma_interrupt_regs {
  63        struct dmareg tfr;
  64        struct dmareg block;
  65        struct dmareg srctran;
  66        struct dmareg dsttran;
  67        struct dmareg error;
  68};
  69
  70struct ahb_dma_regs {
  71        struct dma_chan_regs    chan_regs[DMA_NUM_CHAN_REGS];
  72        struct dma_interrupt_regs       interrupt_raw;
  73        struct dma_interrupt_regs       interrupt_status;
  74        struct dma_interrupt_regs       interrupt_mask;
  75        struct dma_interrupt_regs       interrupt_clear;
  76        struct dmareg                   statusInt;
  77        struct dmareg                   rq_srcreg;
  78        struct dmareg                   rq_dstreg;
  79        struct dmareg                   rq_sgl_srcreg;
  80        struct dmareg                   rq_sgl_dstreg;
  81        struct dmareg                   rq_lst_srcreg;
  82        struct dmareg                   rq_lst_dstreg;
  83        struct dmareg                   dma_cfg;
  84        struct dmareg                   dma_chan_en;
  85        struct dmareg                   dma_id;
  86        struct dmareg                   dma_test;
  87        struct dmareg                   res1;
  88        struct dmareg                   res2;
  89        /* DMA Comp Params
  90         * Param 6 = dma_param[0], Param 5 = dma_param[1],
  91         * Param 4 = dma_param[2] ...
  92         */
  93        struct dmareg                   dma_params[6];
  94};
  95
  96#define DMA_EN                  0x00000001
  97#define DMA_DI                  0x00000000
  98#define DMA_CHANNEL(ch)         (0x00000001 << (ch))
  99#define DMA_ENABLE_CHAN(ch)     ((0x00000001 << (ch)) | \
 100                                ((0x000000001 << (ch)) << 8))
 101#define DMA_DISABLE_CHAN(ch)    (0x00000000 |   \
 102                                ((0x000000001 << (ch)) << 8))
 103
 104#define SATA_DWC_MAX_PORTS      1
 105#define SATA_DWC_SCR_OFFSET     0x24
 106#define SATA_DWC_REG_OFFSET     0x64
 107
 108struct sata_dwc_regs {
 109        u32 fptagr;
 110        u32 fpbor;
 111        u32 fptcr;
 112        u32 dmacr;
 113        u32 dbtsr;
 114        u32 intpr;
 115        u32 intmr;
 116        u32 errmr;
 117        u32 llcr;
 118        u32 phycr;
 119        u32 physr;
 120        u32 rxbistpd;
 121        u32 rxbistpd1;
 122        u32 rxbistpd2;
 123        u32 txbistpd;
 124        u32 txbistpd1;
 125        u32 txbistpd2;
 126        u32 bistcr;
 127        u32 bistfctr;
 128        u32 bistsr;
 129        u32 bistdecr;
 130        u32 res[15];
 131        u32 testr;
 132        u32 versionr;
 133        u32 idr;
 134        u32 unimpl[192];
 135        u32 dmadr[256];
 136};
 137
 138#define SATA_DWC_TXFIFO_DEPTH           0x01FF
 139#define SATA_DWC_RXFIFO_DEPTH           0x01FF
 140
 141#define SATA_DWC_DBTSR_MWR(size)        ((size / 4) & SATA_DWC_TXFIFO_DEPTH)
 142#define SATA_DWC_DBTSR_MRD(size)        (((size / 4) &  \
 143                                        SATA_DWC_RXFIFO_DEPTH) << 16)
 144#define SATA_DWC_INTPR_DMAT             0x00000001
 145#define SATA_DWC_INTPR_NEWFP            0x00000002
 146#define SATA_DWC_INTPR_PMABRT           0x00000004
 147#define SATA_DWC_INTPR_ERR              0x00000008
 148#define SATA_DWC_INTPR_NEWBIST          0x00000010
 149#define SATA_DWC_INTPR_IPF              0x10000000
 150#define SATA_DWC_INTMR_DMATM            0x00000001
 151#define SATA_DWC_INTMR_NEWFPM           0x00000002
 152#define SATA_DWC_INTMR_PMABRTM          0x00000004
 153#define SATA_DWC_INTMR_ERRM             0x00000008
 154#define SATA_DWC_INTMR_NEWBISTM         0x00000010
 155
 156#define SATA_DWC_DMACR_TMOD_TXCHEN      0x00000004
 157#define SATA_DWC_DMACR_TXRXCH_CLEAR     SATA_DWC_DMACR_TMOD_TXCHEN
 158
 159#define SATA_DWC_QCMD_MAX       32
 160
 161#define SATA_DWC_SERROR_ERR_BITS        0x0FFF0F03
 162
 163#define HSDEVP_FROM_AP(ap)      (struct sata_dwc_device_port*)  \
 164                                (ap)->private_data
 165
 166struct sata_dwc_device {
 167        struct device           *dev;
 168        struct ata_probe_ent    *pe;
 169        struct ata_host         *host;
 170        u8                      *reg_base;
 171        struct sata_dwc_regs    *sata_dwc_regs;
 172        int                     irq_dma;
 173};
 174
 175struct sata_dwc_device_port {
 176        struct sata_dwc_device  *hsdev;
 177        int                     cmd_issued[SATA_DWC_QCMD_MAX];
 178        u32                     dma_chan[SATA_DWC_QCMD_MAX];
 179        int                     dma_pending[SATA_DWC_QCMD_MAX];
 180};
 181
 182enum {
 183        SATA_DWC_CMD_ISSUED_NOT         = 0,
 184        SATA_DWC_CMD_ISSUED_PEND        = 1,
 185        SATA_DWC_CMD_ISSUED_EXEC        = 2,
 186        SATA_DWC_CMD_ISSUED_NODATA      = 3,
 187
 188        SATA_DWC_DMA_PENDING_NONE       = 0,
 189        SATA_DWC_DMA_PENDING_TX         = 1,
 190        SATA_DWC_DMA_PENDING_RX         = 2,
 191};
 192
 193#define msleep(a)       udelay(a * 1000)
 194#define ssleep(a)       msleep(a * 1000)
 195
 196static int ata_probe_timeout = (ATA_TMOUT_INTERNAL / 100);
 197
 198enum sata_dev_state {
 199        SATA_INIT = 0,
 200        SATA_READY = 1,
 201        SATA_NODEVICE = 2,
 202        SATA_ERROR = 3,
 203};
 204enum sata_dev_state dev_state = SATA_INIT;
 205
 206static struct ahb_dma_regs              *sata_dma_regs = 0;
 207static struct ata_host                  *phost;
 208static struct ata_port                  ap;
 209static struct ata_port                  *pap = &ap;
 210static struct ata_device                ata_device;
 211static struct sata_dwc_device_port      dwc_devp;
 212
 213static void     *scr_addr_sstatus;
 214static u32      temp_n_block = 0;
 215
 216static unsigned ata_exec_internal(struct ata_device *dev,
 217                        struct ata_taskfile *tf, const u8 *cdb,
 218                        int dma_dir, unsigned int buflen,
 219                        unsigned long timeout);
 220static unsigned int ata_dev_set_feature(struct ata_device *dev,
 221                        u8 enable,u8 feature);
 222static unsigned int ata_dev_init_params(struct ata_device *dev,
 223                        u16 heads, u16 sectors);
 224static u8 ata_irq_on(struct ata_port *ap);
 225static struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
 226                        unsigned int tag);
 227static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
 228                        u8 status, int in_wq);
 229static void ata_tf_to_host(struct ata_port *ap,
 230                        const struct ata_taskfile *tf);
 231static void ata_exec_command(struct ata_port *ap,
 232                        const struct ata_taskfile *tf);
 233static unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
 234static u8 ata_check_altstatus(struct ata_port *ap);
 235static u8 ata_check_status(struct ata_port *ap);
 236static void ata_dev_select(struct ata_port *ap, unsigned int device,
 237                        unsigned int wait, unsigned int can_sleep);
 238static void ata_qc_issue(struct ata_queued_cmd *qc);
 239static void ata_tf_load(struct ata_port *ap,
 240                        const struct ata_taskfile *tf);
 241static int ata_dev_read_sectors(unsigned char* pdata,
 242                        unsigned long datalen, u32 block, u32 n_block);
 243static int ata_dev_write_sectors(unsigned char* pdata,
 244                        unsigned long datalen , u32 block, u32 n_block);
 245static void ata_std_dev_select(struct ata_port *ap, unsigned int device);
 246static void ata_qc_complete(struct ata_queued_cmd *qc);
 247static void __ata_qc_complete(struct ata_queued_cmd *qc);
 248static void fill_result_tf(struct ata_queued_cmd *qc);
 249static void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 250static void ata_mmio_data_xfer(struct ata_device *dev,
 251                        unsigned char *buf,
 252                        unsigned int buflen,int do_write);
 253static void ata_pio_task(struct ata_port *arg_ap);
 254static void __ata_port_freeze(struct ata_port *ap);
 255static int ata_port_freeze(struct ata_port *ap);
 256static void ata_qc_free(struct ata_queued_cmd *qc);
 257static void ata_pio_sectors(struct ata_queued_cmd *qc);
 258static void ata_pio_sector(struct ata_queued_cmd *qc);
 259static void ata_pio_queue_task(struct ata_port *ap,
 260                        void *data,unsigned long delay);
 261static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq);
 262static int sata_dwc_softreset(struct ata_port *ap);
 263static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
 264                unsigned int flags, u16 *id);
 265static int check_sata_dev_state(void);
 266
 267static const struct ata_port_info sata_dwc_port_info[] = {
 268        {
 269                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 270                                ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING |
 271                                ATA_FLAG_SRST | ATA_FLAG_NCQ,
 272                .pio_mask       = 0x1f,
 273                .mwdma_mask     = 0x07,
 274                .udma_mask      = 0x7f,
 275        },
 276};
 277
 278int init_sata(int dev)
 279{
 280        struct sata_dwc_device hsdev;
 281        struct ata_host host;
 282        struct ata_port_info pi = sata_dwc_port_info[0];
 283        struct ata_link *link;
 284        struct sata_dwc_device_port hsdevp = dwc_devp;
 285        u8 *base = 0;
 286        u8 *sata_dma_regs_addr = 0;
 287        u8 status;
 288        unsigned long base_addr = 0;
 289        int chan = 0;
 290        int rc;
 291        int i;
 292
 293        phost = &host;
 294
 295        base = (u8*)SATA_BASE_ADDR;
 296
 297        hsdev.sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
 298
 299        host.n_ports = SATA_DWC_MAX_PORTS;
 300
 301        for (i = 0; i < SATA_DWC_MAX_PORTS; i++) {
 302                ap.pflags |= ATA_PFLAG_INITIALIZING;
 303                ap.flags = ATA_FLAG_DISABLED;
 304                ap.print_id = -1;
 305                ap.ctl = ATA_DEVCTL_OBS;
 306                ap.host = &host;
 307                ap.last_ctl = 0xFF;
 308
 309                link = &ap.link;
 310                link->ap = &ap;
 311                link->pmp = 0;
 312                link->active_tag = ATA_TAG_POISON;
 313                link->hw_sata_spd_limit = 0;
 314
 315                ap.port_no = i;
 316                host.ports[i] = &ap;
 317        }
 318
 319        ap.pio_mask = pi.pio_mask;
 320        ap.mwdma_mask = pi.mwdma_mask;
 321        ap.udma_mask = pi.udma_mask;
 322        ap.flags |= pi.flags;
 323        ap.link.flags |= pi.link_flags;
 324
 325        host.ports[0]->ioaddr.cmd_addr = base;
 326        host.ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
 327        scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
 328
 329        base_addr = (unsigned long)base;
 330
 331        host.ports[0]->ioaddr.cmd_addr = (void *)base_addr + 0x00;
 332        host.ports[0]->ioaddr.data_addr = (void *)base_addr + 0x00;
 333
 334        host.ports[0]->ioaddr.error_addr = (void *)base_addr + 0x04;
 335        host.ports[0]->ioaddr.feature_addr = (void *)base_addr + 0x04;
 336
 337        host.ports[0]->ioaddr.nsect_addr = (void *)base_addr + 0x08;
 338
 339        host.ports[0]->ioaddr.lbal_addr = (void *)base_addr + 0x0c;
 340        host.ports[0]->ioaddr.lbam_addr = (void *)base_addr + 0x10;
 341        host.ports[0]->ioaddr.lbah_addr = (void *)base_addr + 0x14;
 342
 343        host.ports[0]->ioaddr.device_addr = (void *)base_addr + 0x18;
 344        host.ports[0]->ioaddr.command_addr = (void *)base_addr + 0x1c;
 345        host.ports[0]->ioaddr.status_addr = (void *)base_addr + 0x1c;
 346
 347        host.ports[0]->ioaddr.altstatus_addr = (void *)base_addr + 0x20;
 348        host.ports[0]->ioaddr.ctl_addr = (void *)base_addr + 0x20;
 349
 350        sata_dma_regs_addr = (u8*)SATA_DMA_REG_ADDR;
 351        sata_dma_regs = (void *__iomem)sata_dma_regs_addr;
 352
 353        status = ata_check_altstatus(&ap);
 354
 355        if (status == 0x7f) {
 356                printf("Hard Disk not found.\n");
 357                dev_state = SATA_NODEVICE;
 358                rc = false;
 359                return rc;
 360        }
 361
 362        printf("Waiting for device...");
 363        i = 0;
 364        while (1) {
 365                udelay(10000);
 366
 367                status = ata_check_altstatus(&ap);
 368
 369                if ((status & ATA_BUSY) == 0) {
 370                        printf("\n");
 371                        break;
 372                }
 373
 374                i++;
 375                if (i > (ATA_RESET_TIME * 100)) {
 376                        printf("** TimeOUT **\n");
 377
 378                        dev_state = SATA_NODEVICE;
 379                        rc = false;
 380                        return rc;
 381                }
 382                if ((i >= 100) && ((i % 100) == 0))
 383                        printf(".");
 384        }
 385
 386        rc = sata_dwc_softreset(&ap);
 387
 388        if (rc) {
 389                printf("sata_dwc : error. soft reset failed\n");
 390                return rc;
 391        }
 392
 393        for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
 394                out_le32(&(sata_dma_regs->interrupt_mask.error.low),
 395                                DMA_DISABLE_CHAN(chan));
 396
 397                out_le32(&(sata_dma_regs->interrupt_mask.tfr.low),
 398                                DMA_DISABLE_CHAN(chan));
 399        }
 400
 401        out_le32(&(sata_dma_regs->dma_cfg.low), DMA_DI);
 402
 403        out_le32(&hsdev.sata_dwc_regs->intmr,
 404                SATA_DWC_INTMR_ERRM |
 405                SATA_DWC_INTMR_PMABRTM);
 406
 407        /* Unmask the error bits that should trigger
 408         * an error interrupt by setting the error mask register.
 409         */
 410        out_le32(&hsdev.sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
 411
 412        hsdev.host = ap.host;
 413        memset(&hsdevp, 0, sizeof(hsdevp));
 414        hsdevp.hsdev = &hsdev;
 415
 416        for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
 417                hsdevp.cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
 418
 419        out_le32((void __iomem *)scr_addr_sstatus + 4,
 420                in_le32((void __iomem *)scr_addr_sstatus + 4));
 421
 422        rc = 0;
 423        return rc;
 424}
 425
 426int reset_sata(int dev)
 427{
 428        return 0;
 429}
 430
 431static u8 ata_check_altstatus(struct ata_port *ap)
 432{
 433        u8 val = 0;
 434        val = readb(ap->ioaddr.altstatus_addr);
 435        return val;
 436}
 437
 438static int sata_dwc_softreset(struct ata_port *ap)
 439{
 440        u8 nsect,lbal = 0;
 441        u8 tmp = 0;
 442        struct ata_ioports *ioaddr = &ap->ioaddr;
 443
 444        in_le32((void *)ap->ioaddr.scr_addr + (SCR_ERROR * 4));
 445
 446        writeb(0x55, ioaddr->nsect_addr);
 447        writeb(0xaa, ioaddr->lbal_addr);
 448        writeb(0xaa, ioaddr->nsect_addr);
 449        writeb(0x55, ioaddr->lbal_addr);
 450        writeb(0x55, ioaddr->nsect_addr);
 451        writeb(0xaa, ioaddr->lbal_addr);
 452
 453        nsect = readb(ioaddr->nsect_addr);
 454        lbal = readb(ioaddr->lbal_addr);
 455
 456        if ((nsect == 0x55) && (lbal == 0xaa)) {
 457                printf("Device found\n");
 458        } else {
 459                printf("No device found\n");
 460                dev_state = SATA_NODEVICE;
 461                return false;
 462        }
 463
 464        tmp = ATA_DEVICE_OBS;
 465        writeb(tmp, ioaddr->device_addr);
 466        writeb(ap->ctl, ioaddr->ctl_addr);
 467
 468        udelay(200);
 469
 470        writeb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
 471
 472        udelay(200);
 473        writeb(ap->ctl, ioaddr->ctl_addr);
 474
 475        msleep(150);
 476        ata_check_status(ap);
 477
 478        msleep(50);
 479        ata_check_status(ap);
 480
 481        while (1) {
 482                u8 status = ata_check_status(ap);
 483
 484                if (!(status & ATA_BUSY))
 485                        break;
 486
 487                printf("Hard Disk status is BUSY.\n");
 488                msleep(50);
 489        }
 490
 491        tmp = ATA_DEVICE_OBS;
 492        writeb(tmp, ioaddr->device_addr);
 493
 494        nsect = readb(ioaddr->nsect_addr);
 495        lbal = readb(ioaddr->lbal_addr);
 496
 497        return 0;
 498}
 499
 500static u8 ata_check_status(struct ata_port *ap)
 501{
 502        u8 val = 0;
 503        val = readb(ap->ioaddr.status_addr);
 504        return val;
 505}
 506
 507static int ata_id_has_hipm(const u16 *id)
 508{
 509        u16 val = id[76];
 510
 511        if (val == 0 || val == 0xffff)
 512                return -1;
 513
 514        return val & (1 << 9);
 515}
 516
 517static int ata_id_has_dipm(const u16 *id)
 518{
 519        u16 val = id[78];
 520
 521        if (val == 0 || val == 0xffff)
 522                return -1;
 523
 524        return val & (1 << 3);
 525}
 526
 527int scan_sata(int dev)
 528{
 529        int i;
 530        int rc;
 531        u8 status;
 532        const u16 *id;
 533        struct ata_device *ata_dev = &ata_device;
 534        unsigned long pio_mask, mwdma_mask;
 535        char revbuf[7];
 536        u16 iobuf[ATA_SECTOR_WORDS];
 537
 538        memset(iobuf, 0, sizeof(iobuf));
 539
 540        if (dev_state == SATA_NODEVICE)
 541                return 1;
 542
 543        printf("Waiting for device...");
 544        i = 0;
 545        while (1) {
 546                udelay(10000);
 547
 548                status = ata_check_altstatus(&ap);
 549
 550                if ((status & ATA_BUSY) == 0) {
 551                        printf("\n");
 552                        break;
 553                }
 554
 555                i++;
 556                if (i > (ATA_RESET_TIME * 100)) {
 557                        printf("** TimeOUT **\n");
 558
 559                        dev_state = SATA_NODEVICE;
 560                        return 1;
 561                }
 562                if ((i >= 100) && ((i % 100) == 0))
 563                        printf(".");
 564        }
 565
 566        udelay(1000);
 567
 568        rc = ata_dev_read_id(ata_dev, &ata_dev->class,
 569                        ATA_READID_POSTRESET,ata_dev->id);
 570        if (rc) {
 571                printf("sata_dwc : error. failed sata scan\n");
 572                return 1;
 573        }
 574
 575        /* SATA drives indicate we have a bridge. We don't know which
 576         * end of the link the bridge is which is a problem
 577         */
 578        if (ata_id_is_sata(ata_dev->id))
 579                ap.cbl = ATA_CBL_SATA;
 580
 581        id = ata_dev->id;
 582
 583        ata_dev->flags &= ~ATA_DFLAG_CFG_MASK;
 584        ata_dev->max_sectors = 0;
 585        ata_dev->cdb_len = 0;
 586        ata_dev->n_sectors = 0;
 587        ata_dev->cylinders = 0;
 588        ata_dev->heads = 0;
 589        ata_dev->sectors = 0;
 590
 591        if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
 592                pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
 593                pio_mask <<= 3;
 594                pio_mask |= 0x7;
 595        } else {
 596                /* If word 64 isn't valid then Word 51 high byte holds
 597                 * the PIO timing number for the maximum. Turn it into
 598                 * a mask.
 599                 */
 600                u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
 601                if (mode < 5) {
 602                        pio_mask = (2 << mode) - 1;
 603                } else {
 604                        pio_mask = 1;
 605                }
 606        }
 607
 608        mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
 609
 610        if (ata_id_is_cfa(id)) {
 611                int pio = id[163] & 0x7;
 612                int dma = (id[163] >> 3) & 7;
 613
 614                if (pio)
 615                        pio_mask |= (1 << 5);
 616                if (pio > 1)
 617                        pio_mask |= (1 << 6);
 618                if (dma)
 619                        mwdma_mask |= (1 << 3);
 620                if (dma > 1)
 621                        mwdma_mask |= (1 << 4);
 622        }
 623
 624        if (ata_dev->class == ATA_DEV_ATA) {
 625                if (ata_id_is_cfa(id)) {
 626                        if (id[162] & 1)
 627                                printf("supports DRM functions and may "
 628                                        "not be fully accessable.\n");
 629                        strcpy(revbuf, "CFA");
 630                } else {
 631                        if (ata_id_has_tpm(id))
 632                                printf("supports DRM functions and may "
 633                                                "not be fully accessable.\n");
 634                }
 635
 636                ata_dev->n_sectors = ata_id_n_sectors((u16*)id);
 637
 638                if (ata_dev->id[59] & 0x100)
 639                        ata_dev->multi_count = ata_dev->id[59] & 0xff;
 640
 641                if (ata_id_has_lba(id)) {
 642                        char ncq_desc[20];
 643
 644                        ata_dev->flags |= ATA_DFLAG_LBA;
 645                        if (ata_id_has_lba48(id)) {
 646                                ata_dev->flags |= ATA_DFLAG_LBA48;
 647
 648                                if (ata_dev->n_sectors >= (1UL << 28) &&
 649                                        ata_id_has_flush_ext(id))
 650                                        ata_dev->flags |= ATA_DFLAG_FLUSH_EXT;
 651                        }
 652                        if (!ata_id_has_ncq(ata_dev->id))
 653                                ncq_desc[0] = '\0';
 654
 655                        if (ata_dev->horkage & ATA_HORKAGE_NONCQ)
 656                                strcpy(ncq_desc, "NCQ (not used)");
 657
 658                        if (ap.flags & ATA_FLAG_NCQ)
 659                                ata_dev->flags |= ATA_DFLAG_NCQ;
 660                }
 661                ata_dev->cdb_len = 16;
 662        }
 663        ata_dev->max_sectors = ATA_MAX_SECTORS;
 664        if (ata_dev->flags & ATA_DFLAG_LBA48)
 665                ata_dev->max_sectors = ATA_MAX_SECTORS_LBA48;
 666
 667        if (!(ata_dev->horkage & ATA_HORKAGE_IPM)) {
 668                if (ata_id_has_hipm(ata_dev->id))
 669                        ata_dev->flags |= ATA_DFLAG_HIPM;
 670                if (ata_id_has_dipm(ata_dev->id))
 671                        ata_dev->flags |= ATA_DFLAG_DIPM;
 672        }
 673
 674        if ((ap.cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ata_dev->id))) {
 675                ata_dev->udma_mask &= ATA_UDMA5;
 676                ata_dev->max_sectors = ATA_MAX_SECTORS;
 677        }
 678
 679        if (ata_dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
 680                printf("Drive reports diagnostics failure."
 681                                "This may indicate a drive\n");
 682                printf("fault or invalid emulation."
 683                                "Contact drive vendor for information.\n");
 684        }
 685
 686        rc = check_sata_dev_state();
 687
 688        ata_id_c_string(ata_dev->id,
 689                        (unsigned char *)sata_dev_desc[dev].revision,
 690                         ATA_ID_FW_REV, sizeof(sata_dev_desc[dev].revision));
 691        ata_id_c_string(ata_dev->id,
 692                        (unsigned char *)sata_dev_desc[dev].vendor,
 693                         ATA_ID_PROD, sizeof(sata_dev_desc[dev].vendor));
 694        ata_id_c_string(ata_dev->id,
 695                        (unsigned char *)sata_dev_desc[dev].product,
 696                         ATA_ID_SERNO, sizeof(sata_dev_desc[dev].product));
 697
 698        sata_dev_desc[dev].lba = (u32) ata_dev->n_sectors;
 699
 700#ifdef CONFIG_LBA48
 701        if (ata_dev->id[83] & (1 << 10)) {
 702                sata_dev_desc[dev].lba48 = 1;
 703        } else {
 704                sata_dev_desc[dev].lba48 = 0;
 705        }
 706#endif
 707
 708        return 0;
 709}
 710
 711static u8 ata_busy_wait(struct ata_port *ap,
 712                unsigned int bits,unsigned int max)
 713{
 714        u8 status;
 715
 716        do {
 717                udelay(10);
 718                status = ata_check_status(ap);
 719                max--;
 720        } while (status != 0xff && (status & bits) && (max > 0));
 721
 722        return status;
 723}
 724
 725static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
 726                unsigned int flags, u16 *id)
 727{
 728        struct ata_port *ap = pap;
 729        unsigned int class = *p_class;
 730        struct ata_taskfile tf;
 731        unsigned int err_mask = 0;
 732        const char *reason;
 733        int may_fallback = 1, tried_spinup = 0;
 734        u8 status;
 735        int rc;
 736
 737        status = ata_busy_wait(ap, ATA_BUSY, 30000);
 738        if (status & ATA_BUSY) {
 739                printf("BSY = 0 check. timeout.\n");
 740                rc = false;
 741                return rc;
 742        }
 743
 744        ata_dev_select(ap, dev->devno, 1, 1);
 745
 746retry:
 747        memset(&tf, 0, sizeof(tf));
 748        ap->print_id = 1;
 749        ap->flags &= ~ATA_FLAG_DISABLED;
 750        tf.ctl = ap->ctl;
 751        tf.device = ATA_DEVICE_OBS;
 752        tf.command = ATA_CMD_ID_ATA;
 753        tf.protocol = ATA_PROT_PIO;
 754
 755        /* Some devices choke if TF registers contain garbage.  Make
 756         * sure those are properly initialized.
 757         */
 758        tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 759
 760        /* Device presence detection is unreliable on some
 761         * controllers.  Always poll IDENTIFY if available.
 762         */
 763        tf.flags |= ATA_TFLAG_POLLING;
 764
 765        temp_n_block = 1;
 766
 767        err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
 768                                        sizeof(id[0]) * ATA_ID_WORDS, 0);
 769
 770        if (err_mask) {
 771                if (err_mask & AC_ERR_NODEV_HINT) {
 772                        printf("NODEV after polling detection\n");
 773                        return -ENOENT;
 774                }
 775
 776                if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
 777                        /* Device or controller might have reported
 778                         * the wrong device class.  Give a shot at the
 779                         * other IDENTIFY if the current one is
 780                         * aborted by the device.
 781                         */
 782                        if (may_fallback) {
 783                                may_fallback = 0;
 784
 785                                if (class == ATA_DEV_ATA) {
 786                                        class = ATA_DEV_ATAPI;
 787                                } else {
 788                                        class = ATA_DEV_ATA;
 789                                }
 790                                goto retry;
 791                        }
 792                        /* Control reaches here iff the device aborted
 793                         * both flavors of IDENTIFYs which happens
 794                         * sometimes with phantom devices.
 795                         */
 796                        printf("both IDENTIFYs aborted, assuming NODEV\n");
 797                        return -ENOENT;
 798                }
 799                rc = -EIO;
 800                reason = "I/O error";
 801                goto err_out;
 802        }
 803
 804        /* Falling back doesn't make sense if ID data was read
 805         * successfully at least once.
 806         */
 807        may_fallback = 0;
 808
 809        unsigned int id_cnt;
 810
 811        for (id_cnt = 0; id_cnt < ATA_ID_WORDS; id_cnt++)
 812                id[id_cnt] = le16_to_cpu(id[id_cnt]);
 813
 814
 815        rc = -EINVAL;
 816        reason = "device reports invalid type";
 817
 818        if (class == ATA_DEV_ATA) {
 819                if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
 820                        goto err_out;
 821        } else {
 822                if (ata_id_is_ata(id))
 823                        goto err_out;
 824        }
 825        if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
 826                tried_spinup = 1;
 827                /*
 828                 * Drive powered-up in standby mode, and requires a specific
 829                 * SET_FEATURES spin-up subcommand before it will accept
 830                 * anything other than the original IDENTIFY command.
 831                 */
 832                err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
 833                if (err_mask && id[2] != 0x738c) {
 834                        rc = -EIO;
 835                        reason = "SPINUP failed";
 836                        goto err_out;
 837                }
 838                /*
 839                 * If the drive initially returned incomplete IDENTIFY info,
 840                 * we now must reissue the IDENTIFY command.
 841                 */
 842                if (id[2] == 0x37c8)
 843                        goto retry;
 844        }
 845
 846        if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
 847                /*
 848                 * The exact sequence expected by certain pre-ATA4 drives is:
 849                 * SRST RESET
 850                 * IDENTIFY (optional in early ATA)
 851                 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
 852                 * anything else..
 853                 * Some drives were very specific about that exact sequence.
 854                 *
 855                 * Note that ATA4 says lba is mandatory so the second check
 856                 * shoud never trigger.
 857                 */
 858                if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
 859                        err_mask = ata_dev_init_params(dev, id[3], id[6]);
 860                        if (err_mask) {
 861                                rc = -EIO;
 862                                reason = "INIT_DEV_PARAMS failed";
 863                                goto err_out;
 864                        }
 865
 866                        /* current CHS translation info (id[53-58]) might be
 867                         * changed. reread the identify device info.
 868                         */
 869                        flags &= ~ATA_READID_POSTRESET;
 870                        goto retry;
 871                }
 872        }
 873
 874        *p_class = class;
 875        return 0;
 876
 877err_out:
 878        printf("failed to READ ID (%s, err_mask=0x%x)\n", reason, err_mask);
 879        return rc;
 880}
 881
 882static u8 ata_wait_idle(struct ata_port *ap)
 883{
 884        u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
 885        return status;
 886}
 887
 888static void ata_dev_select(struct ata_port *ap, unsigned int device,
 889                unsigned int wait, unsigned int can_sleep)
 890{
 891        if (wait)
 892                ata_wait_idle(ap);
 893
 894        ata_std_dev_select(ap, device);
 895
 896        if (wait)
 897                ata_wait_idle(ap);
 898}
 899
 900static void ata_std_dev_select(struct ata_port *ap, unsigned int device)
 901{
 902        u8 tmp;
 903
 904        if (device == 0) {
 905                tmp = ATA_DEVICE_OBS;
 906        } else {
 907                tmp = ATA_DEVICE_OBS | ATA_DEV1;
 908        }
 909
 910        writeb(tmp, ap->ioaddr.device_addr);
 911
 912        readb(ap->ioaddr.altstatus_addr);
 913
 914        udelay(1);
 915}
 916
 917static int waiting_for_reg_state(volatile u8 *offset,
 918                                int timeout_msec,
 919                                u32 sign)
 920{
 921        int i;
 922        u32 status;
 923
 924        for (i = 0; i < timeout_msec; i++) {
 925                status = readl(offset);
 926                if ((status & sign) != 0)
 927                        break;
 928                msleep(1);
 929        }
 930
 931        return (i < timeout_msec) ? 0 : -1;
 932}
 933
 934static void ata_qc_reinit(struct ata_queued_cmd *qc)
 935{
 936        qc->dma_dir = DMA_NONE;
 937        qc->flags = 0;
 938        qc->nbytes = qc->extrabytes = qc->curbytes = 0;
 939        qc->n_elem = 0;
 940        qc->err_mask = 0;
 941        qc->sect_size = ATA_SECT_SIZE;
 942        qc->nbytes = ATA_SECT_SIZE * temp_n_block;
 943
 944        memset(&qc->tf, 0, sizeof(qc->tf));
 945        qc->tf.ctl = 0;
 946        qc->tf.device = ATA_DEVICE_OBS;
 947
 948        qc->result_tf.command = ATA_DRDY;
 949        qc->result_tf.feature = 0;
 950}
 951
 952struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
 953                                        unsigned int tag)
 954{
 955        if (tag < ATA_MAX_QUEUE)
 956                return &ap->qcmd[tag];
 957        return NULL;
 958}
 959
 960static void __ata_port_freeze(struct ata_port *ap)
 961{
 962        printf("set port freeze.\n");
 963        ap->pflags |= ATA_PFLAG_FROZEN;
 964}
 965
 966static int ata_port_freeze(struct ata_port *ap)
 967{
 968        __ata_port_freeze(ap);
 969        return 0;
 970}
 971
 972unsigned ata_exec_internal(struct ata_device *dev,
 973                        struct ata_taskfile *tf, const u8 *cdb,
 974                        int dma_dir, unsigned int buflen,
 975                        unsigned long timeout)
 976{
 977        struct ata_link *link = dev->link;
 978        struct ata_port *ap = pap;
 979        struct ata_queued_cmd *qc;
 980        unsigned int tag, preempted_tag;
 981        u32 preempted_sactive, preempted_qc_active;
 982        int preempted_nr_active_links;
 983        unsigned int err_mask;
 984        int rc = 0;
 985        u8 status;
 986
 987        status = ata_busy_wait(ap, ATA_BUSY, 300000);
 988        if (status & ATA_BUSY) {
 989                printf("BSY = 0 check. timeout.\n");
 990                rc = false;
 991                return rc;
 992        }
 993
 994        if (ap->pflags & ATA_PFLAG_FROZEN)
 995                return AC_ERR_SYSTEM;
 996
 997        tag = ATA_TAG_INTERNAL;
 998
 999        if (test_and_set_bit(tag, &ap->qc_allocated)) {
1000                rc = false;
1001                return rc;
1002        }
1003
1004        qc = __ata_qc_from_tag(ap, tag);
1005        qc->tag = tag;
1006        qc->ap = ap;
1007        qc->dev = dev;
1008
1009        ata_qc_reinit(qc);
1010
1011        preempted_tag = link->active_tag;
1012        preempted_sactive = link->sactive;
1013        preempted_qc_active = ap->qc_active;
1014        preempted_nr_active_links = ap->nr_active_links;
1015        link->active_tag = ATA_TAG_POISON;
1016        link->sactive = 0;
1017        ap->qc_active = 0;
1018        ap->nr_active_links = 0;
1019
1020        qc->tf = *tf;
1021        if (cdb)
1022                memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1023        qc->flags |= ATA_QCFLAG_RESULT_TF;
1024        qc->dma_dir = dma_dir;
1025        qc->private_data = 0;
1026
1027        ata_qc_issue(qc);
1028
1029        if (!timeout)
1030                timeout = ata_probe_timeout * 1000 / HZ;
1031
1032        status = ata_busy_wait(ap, ATA_BUSY, 30000);
1033        if (status & ATA_BUSY) {
1034                printf("BSY = 0 check. timeout.\n");
1035                printf("altstatus = 0x%x.\n", status);
1036                qc->err_mask |= AC_ERR_OTHER;
1037                return qc->err_mask;
1038        }
1039
1040        if (waiting_for_reg_state(ap->ioaddr.altstatus_addr, 1000, 0x8)) {
1041                u8 status = 0;
1042                u8 errorStatus = 0;
1043
1044                status = readb(ap->ioaddr.altstatus_addr);
1045                if ((status & 0x01) != 0) {
1046                        errorStatus = readb(ap->ioaddr.feature_addr);
1047                        if (errorStatus == 0x04 &&
1048                                qc->tf.command == ATA_CMD_PIO_READ_EXT){
1049                                printf("Hard Disk doesn't support LBA48\n");
1050                                dev_state = SATA_ERROR;
1051                                qc->err_mask |= AC_ERR_OTHER;
1052                                return qc->err_mask;
1053                        }
1054                }
1055                qc->err_mask |= AC_ERR_OTHER;
1056                return qc->err_mask;
1057        }
1058
1059        status = ata_busy_wait(ap, ATA_BUSY, 10);
1060        if (status & ATA_BUSY) {
1061                printf("BSY = 0 check. timeout.\n");
1062                qc->err_mask |= AC_ERR_OTHER;
1063                return qc->err_mask;
1064        }
1065
1066        ata_pio_task(ap);
1067
1068        if (!rc) {
1069                if (qc->flags & ATA_QCFLAG_ACTIVE) {
1070                        qc->err_mask |= AC_ERR_TIMEOUT;
1071                        ata_port_freeze(ap);
1072                }
1073        }
1074
1075        if (qc->flags & ATA_QCFLAG_FAILED) {
1076                if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1077                        qc->err_mask |= AC_ERR_DEV;
1078
1079                if (!qc->err_mask)
1080                        qc->err_mask |= AC_ERR_OTHER;
1081
1082                if (qc->err_mask & ~AC_ERR_OTHER)
1083                        qc->err_mask &= ~AC_ERR_OTHER;
1084        }
1085
1086        *tf = qc->result_tf;
1087        err_mask = qc->err_mask;
1088        ata_qc_free(qc);
1089        link->active_tag = preempted_tag;
1090        link->sactive = preempted_sactive;
1091        ap->qc_active = preempted_qc_active;
1092        ap->nr_active_links = preempted_nr_active_links;
1093
1094        if (ap->flags & ATA_FLAG_DISABLED) {
1095                err_mask |= AC_ERR_SYSTEM;
1096                ap->flags &= ~ATA_FLAG_DISABLED;
1097        }
1098
1099        return err_mask;
1100}
1101
1102static void ata_qc_issue(struct ata_queued_cmd *qc)
1103{
1104        struct ata_port *ap = qc->ap;
1105        struct ata_link *link = qc->dev->link;
1106        u8 prot = qc->tf.protocol;
1107
1108        if (ata_is_ncq(prot)) {
1109                if (!link->sactive)
1110                        ap->nr_active_links++;
1111                link->sactive |= 1 << qc->tag;
1112        } else {
1113                ap->nr_active_links++;
1114                link->active_tag = qc->tag;
1115        }
1116
1117        qc->flags |= ATA_QCFLAG_ACTIVE;
1118        ap->qc_active |= 1 << qc->tag;
1119
1120        if (qc->dev->flags & ATA_DFLAG_SLEEPING) {
1121                msleep(1);
1122                return;
1123        }
1124
1125        qc->err_mask |= ata_qc_issue_prot(qc);
1126        if (qc->err_mask)
1127                goto err;
1128
1129        return;
1130err:
1131        ata_qc_complete(qc);
1132}
1133
1134static unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1135{
1136        struct ata_port *ap = qc->ap;
1137
1138        if (ap->flags & ATA_FLAG_PIO_POLLING) {
1139                switch (qc->tf.protocol) {
1140                case ATA_PROT_PIO:
1141                case ATA_PROT_NODATA:
1142                case ATAPI_PROT_PIO:
1143                case ATAPI_PROT_NODATA:
1144                        qc->tf.flags |= ATA_TFLAG_POLLING;
1145                        break;
1146                default:
1147                        break;
1148                }
1149        }
1150
1151        ata_dev_select(ap, qc->dev->devno, 1, 0);
1152
1153        switch (qc->tf.protocol) {
1154        case ATA_PROT_PIO:
1155                if (qc->tf.flags & ATA_TFLAG_POLLING)
1156                        qc->tf.ctl |= ATA_NIEN;
1157
1158                ata_tf_to_host(ap, &qc->tf);
1159
1160                ap->hsm_task_state = HSM_ST;
1161
1162                if (qc->tf.flags & ATA_TFLAG_POLLING)
1163                        ata_pio_queue_task(ap, qc, 0);
1164
1165                break;
1166
1167        default:
1168                return AC_ERR_SYSTEM;
1169        }
1170
1171        return 0;
1172}
1173
1174static void ata_tf_to_host(struct ata_port *ap,
1175                        const struct ata_taskfile *tf)
1176{
1177        ata_tf_load(ap, tf);
1178        ata_exec_command(ap, tf);
1179}
1180
1181static void ata_tf_load(struct ata_port *ap,
1182                        const struct ata_taskfile *tf)
1183{
1184        struct ata_ioports *ioaddr = &ap->ioaddr;
1185        unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
1186
1187        if (tf->ctl != ap->last_ctl) {
1188                if (ioaddr->ctl_addr)
1189                        writeb(tf->ctl, ioaddr->ctl_addr);
1190                ap->last_ctl = tf->ctl;
1191                ata_wait_idle(ap);
1192        }
1193
1194        if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
1195                writeb(tf->hob_feature, ioaddr->feature_addr);
1196                writeb(tf->hob_nsect, ioaddr->nsect_addr);
1197                writeb(tf->hob_lbal, ioaddr->lbal_addr);
1198                writeb(tf->hob_lbam, ioaddr->lbam_addr);
1199                writeb(tf->hob_lbah, ioaddr->lbah_addr);
1200        }
1201
1202        if (is_addr) {
1203                writeb(tf->feature, ioaddr->feature_addr);
1204                writeb(tf->nsect, ioaddr->nsect_addr);
1205                writeb(tf->lbal, ioaddr->lbal_addr);
1206                writeb(tf->lbam, ioaddr->lbam_addr);
1207                writeb(tf->lbah, ioaddr->lbah_addr);
1208        }
1209
1210        if (tf->flags & ATA_TFLAG_DEVICE)
1211                writeb(tf->device, ioaddr->device_addr);
1212
1213        ata_wait_idle(ap);
1214}
1215
1216static void ata_exec_command(struct ata_port *ap,
1217                        const struct ata_taskfile *tf)
1218{
1219        writeb(tf->command, ap->ioaddr.command_addr);
1220
1221        readb(ap->ioaddr.altstatus_addr);
1222
1223        udelay(1);
1224}
1225
1226static void ata_pio_queue_task(struct ata_port *ap,
1227                        void *data,unsigned long delay)
1228{
1229        ap->port_task_data = data;
1230}
1231
1232static unsigned int ac_err_mask(u8 status)
1233{
1234        if (status & (ATA_BUSY | ATA_DRQ))
1235                return AC_ERR_HSM;
1236        if (status & (ATA_ERR | ATA_DF))
1237                return AC_ERR_DEV;
1238        return 0;
1239}
1240
1241static unsigned int __ac_err_mask(u8 status)
1242{
1243        unsigned int mask = ac_err_mask(status);
1244        if (mask == 0)
1245                return AC_ERR_OTHER;
1246        return mask;
1247}
1248
1249static void ata_pio_task(struct ata_port *arg_ap)
1250{
1251        struct ata_port *ap = arg_ap;
1252        struct ata_queued_cmd *qc = ap->port_task_data;
1253        u8 status;
1254        int poll_next;
1255
1256fsm_start:
1257        /*
1258         * This is purely heuristic.  This is a fast path.
1259         * Sometimes when we enter, BSY will be cleared in
1260         * a chk-status or two.  If not, the drive is probably seeking
1261         * or something.  Snooze for a couple msecs, then
1262         * chk-status again.  If still busy, queue delayed work.
1263         */
1264        status = ata_busy_wait(ap, ATA_BUSY, 5);
1265        if (status & ATA_BUSY) {
1266                msleep(2);
1267                status = ata_busy_wait(ap, ATA_BUSY, 10);
1268                if (status & ATA_BUSY) {
1269                        ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
1270                        return;
1271                }
1272        }
1273
1274        poll_next = ata_hsm_move(ap, qc, status, 1);
1275
1276        /* another command or interrupt handler
1277         * may be running at this point.
1278         */
1279        if (poll_next)
1280                goto fsm_start;
1281}
1282
1283static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1284                        u8 status, int in_wq)
1285{
1286        int poll_next;
1287
1288fsm_start:
1289        switch (ap->hsm_task_state) {
1290        case HSM_ST_FIRST:
1291                poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1292
1293                if ((status & ATA_DRQ) == 0) {
1294                        if (status & (ATA_ERR | ATA_DF)) {
1295                                qc->err_mask |= AC_ERR_DEV;
1296                        } else {
1297                                qc->err_mask |= AC_ERR_HSM;
1298                        }
1299                        ap->hsm_task_state = HSM_ST_ERR;
1300                        goto fsm_start;
1301                }
1302
1303                /* Device should not ask for data transfer (DRQ=1)
1304                 * when it finds something wrong.
1305                 * We ignore DRQ here and stop the HSM by
1306                 * changing hsm_task_state to HSM_ST_ERR and
1307                 * let the EH abort the command or reset the device.
1308                 */
1309                if (status & (ATA_ERR | ATA_DF)) {
1310                        if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1311                                printf("DRQ=1 with device error, "
1312                                        "dev_stat 0x%X\n", status);
1313                                qc->err_mask |= AC_ERR_HSM;
1314                                ap->hsm_task_state = HSM_ST_ERR;
1315                                goto fsm_start;
1316                        }
1317                }
1318
1319                if (qc->tf.protocol == ATA_PROT_PIO) {
1320                        /* PIO data out protocol.
1321                         * send first data block.
1322                         */
1323                        /* ata_pio_sectors() might change the state
1324                         * to HSM_ST_LAST. so, the state is changed here
1325                         * before ata_pio_sectors().
1326                         */
1327                        ap->hsm_task_state = HSM_ST;
1328                        ata_pio_sectors(qc);
1329                } else {
1330                        printf("protocol is not ATA_PROT_PIO \n");
1331                }
1332                break;
1333
1334        case HSM_ST:
1335                if ((status & ATA_DRQ) == 0) {
1336                        if (status & (ATA_ERR | ATA_DF)) {
1337                                qc->err_mask |= AC_ERR_DEV;
1338                        } else {
1339                                /* HSM violation. Let EH handle this.
1340                                 * Phantom devices also trigger this
1341                                 * condition.  Mark hint.
1342                                 */
1343                                qc->err_mask |= AC_ERR_HSM | AC_ERR_NODEV_HINT;
1344                        }
1345
1346                        ap->hsm_task_state = HSM_ST_ERR;
1347                        goto fsm_start;
1348                }
1349                /* For PIO reads, some devices may ask for
1350                 * data transfer (DRQ=1) alone with ERR=1.
1351                 * We respect DRQ here and transfer one
1352                 * block of junk data before changing the
1353                 * hsm_task_state to HSM_ST_ERR.
1354                 *
1355                 * For PIO writes, ERR=1 DRQ=1 doesn't make
1356                 * sense since the data block has been
1357                 * transferred to the device.
1358                 */
1359                if (status & (ATA_ERR | ATA_DF)) {
1360                        qc->err_mask |= AC_ERR_DEV;
1361
1362                        if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1363                                ata_pio_sectors(qc);
1364                                status = ata_wait_idle(ap);
1365                        }
1366
1367                        if (status & (ATA_BUSY | ATA_DRQ))
1368                                qc->err_mask |= AC_ERR_HSM;
1369
1370                        /* ata_pio_sectors() might change the
1371                         * state to HSM_ST_LAST. so, the state
1372                         * is changed after ata_pio_sectors().
1373                         */
1374                        ap->hsm_task_state = HSM_ST_ERR;
1375                        goto fsm_start;
1376                }
1377
1378                ata_pio_sectors(qc);
1379                if (ap->hsm_task_state == HSM_ST_LAST &&
1380                        (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1381                        status = ata_wait_idle(ap);
1382                        goto fsm_start;
1383                }
1384
1385                poll_next = 1;
1386                break;
1387
1388        case HSM_ST_LAST:
1389                if (!ata_ok(status)) {
1390                        qc->err_mask |= __ac_err_mask(status);
1391                        ap->hsm_task_state = HSM_ST_ERR;
1392                        goto fsm_start;
1393                }
1394
1395                ap->hsm_task_state = HSM_ST_IDLE;
1396
1397                ata_hsm_qc_complete(qc, in_wq);
1398
1399                poll_next = 0;
1400                break;
1401
1402        case HSM_ST_ERR:
1403                /* make sure qc->err_mask is available to
1404                 * know what's wrong and recover
1405                 */
1406                ap->hsm_task_state = HSM_ST_IDLE;
1407
1408                ata_hsm_qc_complete(qc, in_wq);
1409
1410                poll_next = 0;
1411                break;
1412        default:
1413                poll_next = 0;
1414        }
1415
1416        return poll_next;
1417}
1418
1419static void ata_pio_sectors(struct ata_queued_cmd *qc)
1420{
1421        struct ata_port *ap;
1422        ap = pap;
1423        qc->pdata = ap->pdata;
1424
1425        ata_pio_sector(qc);
1426
1427        readb(qc->ap->ioaddr.altstatus_addr);
1428        udelay(1);
1429}
1430
1431static void ata_pio_sector(struct ata_queued_cmd *qc)
1432{
1433        int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1434        struct ata_port *ap = qc->ap;
1435        unsigned int offset;
1436        unsigned char *buf;
1437        char temp_data_buf[512];
1438
1439        if (qc->curbytes == qc->nbytes - qc->sect_size)
1440                ap->hsm_task_state = HSM_ST_LAST;
1441
1442        offset = qc->curbytes;
1443
1444        switch (qc->tf.command) {
1445        case ATA_CMD_ID_ATA:
1446                buf = (unsigned char *)&ata_device.id[0];
1447                break;
1448        case ATA_CMD_PIO_READ_EXT:
1449        case ATA_CMD_PIO_READ:
1450        case ATA_CMD_PIO_WRITE_EXT:
1451        case ATA_CMD_PIO_WRITE:
1452                buf = qc->pdata + offset;
1453                break;
1454        default:
1455                buf = (unsigned char *)&temp_data_buf[0];
1456        }
1457
1458        ata_mmio_data_xfer(qc->dev, buf, qc->sect_size, do_write);
1459
1460        qc->curbytes += qc->sect_size;
1461
1462}
1463
1464static void ata_mmio_data_xfer(struct ata_device *dev, unsigned char *buf,
1465                                unsigned int buflen, int do_write)
1466{
1467        struct ata_port *ap = pap;
1468        void __iomem *data_addr = ap->ioaddr.data_addr;
1469        unsigned int words = buflen >> 1;
1470        u16 *buf16 = (u16 *)buf;
1471        unsigned int i = 0;
1472
1473        udelay(100);
1474        if (do_write) {
1475                for (i = 0; i < words; i++)
1476                        writew(le16_to_cpu(buf16[i]), data_addr);
1477        } else {
1478                for (i = 0; i < words; i++)
1479                        buf16[i] = cpu_to_le16(readw(data_addr));
1480        }
1481
1482        if (buflen & 0x01) {
1483                __le16 align_buf[1] = { 0 };
1484                unsigned char *trailing_buf = buf + buflen - 1;
1485
1486                if (do_write) {
1487                        memcpy(align_buf, trailing_buf, 1);
1488                        writew(le16_to_cpu(align_buf[0]), data_addr);
1489                } else {
1490                        align_buf[0] = cpu_to_le16(readw(data_addr));
1491                        memcpy(trailing_buf, align_buf, 1);
1492                }
1493        }
1494}
1495
1496static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1497{
1498        struct ata_port *ap = qc->ap;
1499
1500        if (in_wq) {
1501                /* EH might have kicked in while host lock is
1502                 * released.
1503                 */
1504                qc = &ap->qcmd[qc->tag];
1505                if (qc) {
1506                        if (!(qc->err_mask & AC_ERR_HSM)) {
1507                                ata_irq_on(ap);
1508                                ata_qc_complete(qc);
1509                        } else {
1510                                ata_port_freeze(ap);
1511                        }
1512                }
1513        } else {
1514                if (!(qc->err_mask & AC_ERR_HSM)) {
1515                        ata_qc_complete(qc);
1516                } else {
1517                        ata_port_freeze(ap);
1518                }
1519        }
1520}
1521
1522static u8 ata_irq_on(struct ata_port *ap)
1523{
1524        struct ata_ioports *ioaddr = &ap->ioaddr;
1525        u8 tmp;
1526
1527        ap->ctl &= ~ATA_NIEN;
1528        ap->last_ctl = ap->ctl;
1529
1530        if (ioaddr->ctl_addr)
1531                writeb(ap->ctl, ioaddr->ctl_addr);
1532
1533        tmp = ata_wait_idle(ap);
1534
1535        return tmp;
1536}
1537
1538static unsigned int ata_tag_internal(unsigned int tag)
1539{
1540        return tag == ATA_MAX_QUEUE - 1;
1541}
1542
1543static void ata_qc_complete(struct ata_queued_cmd *qc)
1544{
1545        struct ata_device *dev = qc->dev;
1546        if (qc->err_mask)
1547                qc->flags |= ATA_QCFLAG_FAILED;
1548
1549        if (qc->flags & ATA_QCFLAG_FAILED) {
1550                if (!ata_tag_internal(qc->tag)) {
1551                        fill_result_tf(qc);
1552                        return;
1553                }
1554        }
1555        if (qc->flags & ATA_QCFLAG_RESULT_TF)
1556                fill_result_tf(qc);
1557
1558        /* Some commands need post-processing after successful
1559         * completion.
1560         */
1561        switch (qc->tf.command) {
1562        case ATA_CMD_SET_FEATURES:
1563                if (qc->tf.feature != SETFEATURES_WC_ON &&
1564                                qc->tf.feature != SETFEATURES_WC_OFF)
1565                        break;
1566        case ATA_CMD_INIT_DEV_PARAMS:
1567        case ATA_CMD_SET_MULTI:
1568                break;
1569
1570        case ATA_CMD_SLEEP:
1571                dev->flags |= ATA_DFLAG_SLEEPING;
1572                break;
1573        }
1574
1575        __ata_qc_complete(qc);
1576}
1577
1578static void fill_result_tf(struct ata_queued_cmd *qc)
1579{
1580        struct ata_port *ap = qc->ap;
1581
1582        qc->result_tf.flags = qc->tf.flags;
1583        ata_tf_read(ap, &qc->result_tf);
1584}
1585
1586static void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
1587{
1588        struct ata_ioports *ioaddr = &ap->ioaddr;
1589
1590        tf->command = ata_check_status(ap);
1591        tf->feature = readb(ioaddr->error_addr);
1592        tf->nsect = readb(ioaddr->nsect_addr);
1593        tf->lbal = readb(ioaddr->lbal_addr);
1594        tf->lbam = readb(ioaddr->lbam_addr);
1595        tf->lbah = readb(ioaddr->lbah_addr);
1596        tf->device = readb(ioaddr->device_addr);
1597
1598        if (tf->flags & ATA_TFLAG_LBA48) {
1599                if (ioaddr->ctl_addr) {
1600                        writeb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
1601
1602                        tf->hob_feature = readb(ioaddr->error_addr);
1603                        tf->hob_nsect = readb(ioaddr->nsect_addr);
1604                        tf->hob_lbal = readb(ioaddr->lbal_addr);
1605                        tf->hob_lbam = readb(ioaddr->lbam_addr);
1606                        tf->hob_lbah = readb(ioaddr->lbah_addr);
1607
1608                        writeb(tf->ctl, ioaddr->ctl_addr);
1609                        ap->last_ctl = tf->ctl;
1610                } else {
1611                        printf("sata_dwc warnning register read.\n");
1612                }
1613        }
1614}
1615
1616static void __ata_qc_complete(struct ata_queued_cmd *qc)
1617{
1618        struct ata_port *ap = qc->ap;
1619        struct ata_link *link = qc->dev->link;
1620
1621        link->active_tag = ATA_TAG_POISON;
1622        ap->nr_active_links--;
1623
1624        if (qc->flags & ATA_QCFLAG_CLEAR_EXCL && ap->excl_link == link)
1625                ap->excl_link = NULL;
1626
1627        qc->flags &= ~ATA_QCFLAG_ACTIVE;
1628        ap->qc_active &= ~(1 << qc->tag);
1629}
1630
1631static void ata_qc_free(struct ata_queued_cmd *qc)
1632{
1633        struct ata_port *ap = qc->ap;
1634        unsigned int tag;
1635        qc->flags = 0;
1636        tag = qc->tag;
1637        if (tag < ATA_MAX_QUEUE) {
1638                qc->tag = ATA_TAG_POISON;
1639                clear_bit(tag, &ap->qc_allocated);
1640        }
1641}
1642
1643static int check_sata_dev_state(void)
1644{
1645        unsigned long datalen;
1646        unsigned char *pdata;
1647        int ret = 0;
1648        int i = 0;
1649        char temp_data_buf[512];
1650
1651        while (1) {
1652                udelay(10000);
1653
1654                pdata = (unsigned char*)&temp_data_buf[0];
1655                datalen = 512;
1656
1657                ret = ata_dev_read_sectors(pdata, datalen, 0, 1);
1658
1659                if (ret == true)
1660                        break;
1661
1662                i++;
1663                if (i > (ATA_RESET_TIME * 100)) {
1664                        printf("** TimeOUT **\n");
1665                        dev_state = SATA_NODEVICE;
1666                        return false;
1667                }
1668
1669                if ((i >= 100) && ((i % 100) == 0))
1670                        printf(".");
1671        }
1672
1673        dev_state = SATA_READY;
1674
1675        return true;
1676}
1677
1678static unsigned int ata_dev_set_feature(struct ata_device *dev,
1679                                u8 enable, u8 feature)
1680{
1681        struct ata_taskfile tf;
1682        struct ata_port *ap;
1683        ap = pap;
1684        unsigned int err_mask;
1685
1686        memset(&tf, 0, sizeof(tf));
1687        tf.ctl = ap->ctl;
1688
1689        tf.device = ATA_DEVICE_OBS;
1690        tf.command = ATA_CMD_SET_FEATURES;
1691        tf.feature = enable;
1692        tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1693        tf.protocol = ATA_PROT_NODATA;
1694        tf.nsect = feature;
1695
1696        err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, 0, 0);
1697
1698        return err_mask;
1699}
1700
1701static unsigned int ata_dev_init_params(struct ata_device *dev,
1702                                u16 heads, u16 sectors)
1703{
1704        struct ata_taskfile tf;
1705        struct ata_port *ap;
1706        ap = pap;
1707        unsigned int err_mask;
1708
1709        if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
1710                return AC_ERR_INVALID;
1711
1712        memset(&tf, 0, sizeof(tf));
1713        tf.ctl = ap->ctl;
1714        tf.device = ATA_DEVICE_OBS;
1715        tf.command = ATA_CMD_INIT_DEV_PARAMS;
1716        tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1717        tf.protocol = ATA_PROT_NODATA;
1718        tf.nsect = sectors;
1719        tf.device |= (heads - 1) & 0x0f;
1720
1721        err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, 0, 0);
1722
1723        if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1724                err_mask = 0;
1725
1726        return err_mask;
1727}
1728
1729#if defined(CONFIG_SATA_DWC) && !defined(CONFIG_LBA48)
1730#define SATA_MAX_READ_BLK 0xFF
1731#else
1732#define SATA_MAX_READ_BLK 0xFFFF
1733#endif
1734
1735ulong sata_read(int device, ulong blknr, lbaint_t blkcnt, void *buffer)
1736{
1737        ulong start,blks, buf_addr;
1738        unsigned short smallblks;
1739        unsigned long datalen;
1740        unsigned char *pdata;
1741        device &= 0xff;
1742
1743        u32 block = 0;
1744        u32 n_block = 0;
1745
1746        if (dev_state != SATA_READY)
1747                return 0;
1748
1749        buf_addr = (unsigned long)buffer;
1750        start = blknr;
1751        blks = blkcnt;
1752        do {
1753                pdata = (unsigned char *)buf_addr;
1754                if (blks > SATA_MAX_READ_BLK) {
1755                        datalen = sata_dev_desc[device].blksz * SATA_MAX_READ_BLK;
1756                        smallblks = SATA_MAX_READ_BLK;
1757
1758                        block = (u32)start;
1759                        n_block = (u32)smallblks;
1760
1761                        start += SATA_MAX_READ_BLK;
1762                        blks -= SATA_MAX_READ_BLK;
1763                } else {
1764                        datalen = sata_dev_desc[device].blksz * SATA_MAX_READ_BLK;
1765                        datalen = sata_dev_desc[device].blksz * blks;
1766                        smallblks = (unsigned short)blks;
1767
1768                        block = (u32)start;
1769                        n_block = (u32)smallblks;
1770
1771                        start += blks;
1772                        blks = 0;
1773                }
1774
1775                if (ata_dev_read_sectors(pdata, datalen, block, n_block) != true) {
1776                        printf("sata_dwc : Hard disk read error.\n");
1777                        blkcnt -= blks;
1778                        break;
1779                }
1780                buf_addr += datalen;
1781        } while (blks != 0);
1782
1783        return (blkcnt);
1784}
1785
1786static int ata_dev_read_sectors(unsigned char *pdata, unsigned long datalen,
1787                                                u32 block, u32 n_block)
1788{
1789        struct ata_port *ap = pap;
1790        struct ata_device *dev = &ata_device;
1791        struct ata_taskfile tf;
1792        unsigned int class = ATA_DEV_ATA;
1793        unsigned int err_mask = 0;
1794        const char *reason;
1795        int may_fallback = 1;
1796
1797        if (dev_state == SATA_ERROR)
1798                return false;
1799
1800        ata_dev_select(ap, dev->devno, 1, 1);
1801
1802retry:
1803        memset(&tf, 0, sizeof(tf));
1804        tf.ctl = ap->ctl;
1805        ap->print_id = 1;
1806        ap->flags &= ~ATA_FLAG_DISABLED;
1807
1808        ap->pdata = pdata;
1809
1810        tf.device = ATA_DEVICE_OBS;
1811
1812        temp_n_block = n_block;
1813
1814#ifdef CONFIG_LBA48
1815        tf.command = ATA_CMD_PIO_READ_EXT;
1816        tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1817
1818        tf.hob_feature = 31;
1819        tf.feature = 31;
1820        tf.hob_nsect = (n_block >> 8) & 0xff;
1821        tf.nsect = n_block & 0xff;
1822
1823        tf.hob_lbah = 0x0;
1824        tf.hob_lbam = 0x0;
1825        tf.hob_lbal = (block >> 24) & 0xff;
1826        tf.lbah = (block >> 16) & 0xff;
1827        tf.lbam = (block >> 8) & 0xff;
1828        tf.lbal = block & 0xff;
1829
1830        tf.device = 1 << 6;
1831        if (tf.flags & ATA_TFLAG_FUA)
1832                tf.device |= 1 << 7;
1833#else
1834        tf.command = ATA_CMD_PIO_READ;
1835        tf.flags |= ATA_TFLAG_LBA ;
1836
1837        tf.feature = 31;
1838        tf.nsect = n_block & 0xff;
1839
1840        tf.lbah = (block >> 16) & 0xff;
1841        tf.lbam = (block >> 8) & 0xff;
1842        tf.lbal = block & 0xff;
1843
1844        tf.device = (block >> 24) & 0xf;
1845
1846        tf.device |= 1 << 6;
1847        if (tf.flags & ATA_TFLAG_FUA)
1848                tf.device |= 1 << 7;
1849
1850#endif
1851
1852        tf.protocol = ATA_PROT_PIO;
1853
1854        /* Some devices choke if TF registers contain garbage.  Make
1855         * sure those are properly initialized.
1856         */
1857        tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1858        tf.flags |= ATA_TFLAG_POLLING;
1859
1860        err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 0, 0);
1861
1862        if (err_mask) {
1863                if (err_mask & AC_ERR_NODEV_HINT) {
1864                        printf("READ_SECTORS NODEV after polling detection\n");
1865                        return -ENOENT;
1866                }
1867
1868                if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1869                        /* Device or controller might have reported
1870                         * the wrong device class.  Give a shot at the
1871                         * other IDENTIFY if the current one is
1872                         * aborted by the device.
1873                         */
1874                        if (may_fallback) {
1875                                may_fallback = 0;
1876
1877                                if (class == ATA_DEV_ATA) {
1878                                        class = ATA_DEV_ATAPI;
1879                                } else {
1880                                        class = ATA_DEV_ATA;
1881                                }
1882                                goto retry;
1883                        }
1884                        /* Control reaches here iff the device aborted
1885                         * both flavors of IDENTIFYs which happens
1886                         * sometimes with phantom devices.
1887                         */
1888                        printf("both IDENTIFYs aborted, assuming NODEV\n");
1889                        return -ENOENT;
1890                }
1891
1892                reason = "I/O error";
1893                goto err_out;
1894        }
1895
1896        return true;
1897
1898err_out:
1899        printf("failed to READ SECTORS (%s, err_mask=0x%x)\n", reason, err_mask);
1900        return false;
1901}
1902
1903#if defined(CONFIG_SATA_DWC) && !defined(CONFIG_LBA48)
1904#define SATA_MAX_WRITE_BLK 0xFF
1905#else
1906#define SATA_MAX_WRITE_BLK 0xFFFF
1907#endif
1908
1909ulong sata_write(int device, ulong blknr, lbaint_t blkcnt, const void *buffer)
1910{
1911        ulong start,blks, buf_addr;
1912        unsigned short smallblks;
1913        unsigned long datalen;
1914        unsigned char *pdata;
1915        device &= 0xff;
1916
1917
1918        u32 block = 0;
1919        u32 n_block = 0;
1920
1921        if (dev_state != SATA_READY)
1922                return 0;
1923
1924        buf_addr = (unsigned long)buffer;
1925        start = blknr;
1926        blks = blkcnt;
1927        do {
1928                pdata = (unsigned char *)buf_addr;
1929                if (blks > SATA_MAX_WRITE_BLK) {
1930                        datalen = sata_dev_desc[device].blksz * SATA_MAX_WRITE_BLK;
1931                        smallblks = SATA_MAX_WRITE_BLK;
1932
1933                        block = (u32)start;
1934                        n_block = (u32)smallblks;
1935
1936                        start += SATA_MAX_WRITE_BLK;
1937                        blks -= SATA_MAX_WRITE_BLK;
1938                } else {
1939                        datalen = sata_dev_desc[device].blksz * blks;
1940                        smallblks = (unsigned short)blks;
1941
1942                        block = (u32)start;
1943                        n_block = (u32)smallblks;
1944
1945                        start += blks;
1946                        blks = 0;
1947                }
1948
1949                if (ata_dev_write_sectors(pdata, datalen, block, n_block) != true) {
1950                        printf("sata_dwc : Hard disk read error.\n");
1951                        blkcnt -= blks;
1952                        break;
1953                }
1954                buf_addr += datalen;
1955        } while (blks != 0);
1956
1957        return (blkcnt);
1958}
1959
1960static int ata_dev_write_sectors(unsigned char* pdata, unsigned long datalen,
1961                                                u32 block, u32 n_block)
1962{
1963        struct ata_port *ap = pap;
1964        struct ata_device *dev = &ata_device;
1965        struct ata_taskfile tf;
1966        unsigned int class = ATA_DEV_ATA;
1967        unsigned int err_mask = 0;
1968        const char *reason;
1969        int may_fallback = 1;
1970
1971        if (dev_state == SATA_ERROR)
1972                return false;
1973
1974        ata_dev_select(ap, dev->devno, 1, 1);
1975
1976retry:
1977        memset(&tf, 0, sizeof(tf));
1978        tf.ctl = ap->ctl;
1979        ap->print_id = 1;
1980        ap->flags &= ~ATA_FLAG_DISABLED;
1981
1982        ap->pdata = pdata;
1983
1984        tf.device = ATA_DEVICE_OBS;
1985
1986        temp_n_block = n_block;
1987
1988
1989#ifdef CONFIG_LBA48
1990        tf.command = ATA_CMD_PIO_WRITE_EXT;
1991        tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48 | ATA_TFLAG_WRITE;
1992
1993        tf.hob_feature = 31;
1994        tf.feature = 31;
1995        tf.hob_nsect = (n_block >> 8) & 0xff;
1996        tf.nsect = n_block & 0xff;
1997
1998        tf.hob_lbah = 0x0;
1999        tf.hob_lbam = 0x0;
2000        tf.hob_lbal = (block >> 24) & 0xff;
2001        tf.lbah = (block >> 16) & 0xff;
2002        tf.lbam = (block >> 8) & 0xff;
2003        tf.lbal = block & 0xff;
2004
2005        tf.device = 1 << 6;
2006        if (tf.flags & ATA_TFLAG_FUA)
2007                tf.device |= 1 << 7;
2008#else
2009        tf.command = ATA_CMD_PIO_WRITE;
2010        tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_WRITE;
2011
2012        tf.feature = 31;
2013        tf.nsect = n_block & 0xff;
2014
2015        tf.lbah = (block >> 16) & 0xff;
2016        tf.lbam = (block >> 8) & 0xff;
2017        tf.lbal = block & 0xff;
2018
2019        tf.device = (block >> 24) & 0xf;
2020
2021        tf.device |= 1 << 6;
2022        if (tf.flags & ATA_TFLAG_FUA)
2023                tf.device |= 1 << 7;
2024
2025#endif
2026
2027        tf.protocol = ATA_PROT_PIO;
2028
2029        /* Some devices choke if TF registers contain garbage.  Make
2030         * sure those are properly initialized.
2031         */
2032        tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2033        tf.flags |= ATA_TFLAG_POLLING;
2034
2035        err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 0, 0);
2036
2037        if (err_mask) {
2038                if (err_mask & AC_ERR_NODEV_HINT) {
2039                        printf("READ_SECTORS NODEV after polling detection\n");
2040                        return -ENOENT;
2041                }
2042
2043                if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2044                        /* Device or controller might have reported
2045                         * the wrong device class.  Give a shot at the
2046                         * other IDENTIFY if the current one is
2047                         * aborted by the device.
2048                         */
2049                        if (may_fallback) {
2050                                may_fallback = 0;
2051
2052                                if (class == ATA_DEV_ATA) {
2053                                        class = ATA_DEV_ATAPI;
2054                                } else {
2055                                        class = ATA_DEV_ATA;
2056                                }
2057                                goto retry;
2058                        }
2059                        /* Control reaches here iff the device aborted
2060                         * both flavors of IDENTIFYs which happens
2061                         * sometimes with phantom devices.
2062                         */
2063                        printf("both IDENTIFYs aborted, assuming NODEV\n");
2064                        return -ENOENT;
2065                }
2066
2067                reason = "I/O error";
2068                goto err_out;
2069        }
2070
2071        return true;
2072
2073err_out:
2074        printf("failed to WRITE SECTORS (%s, err_mask=0x%x)\n", reason, err_mask);
2075        return false;
2076}
2077