1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <common.h>
31#include <command.h>
32#include <pci.h>
33#include <asm/processor.h>
34#include <asm/errno.h>
35#include <asm/io.h>
36#include <malloc.h>
37#include <ata.h>
38#include <linux/ctype.h>
39
40#include "sata_dwc.h"
41
42#define DMA_NUM_CHANS 1
43#define DMA_NUM_CHAN_REGS 8
44
45#define AHB_DMA_BRST_DFLT 16
46
47struct dmareg {
48 u32 low;
49 u32 high;
50};
51
52struct dma_chan_regs {
53 struct dmareg sar;
54 struct dmareg dar;
55 struct dmareg llp;
56 struct dmareg ctl;
57 struct dmareg sstat;
58 struct dmareg dstat;
59 struct dmareg sstatar;
60 struct dmareg dstatar;
61 struct dmareg cfg;
62 struct dmareg sgr;
63 struct dmareg dsr;
64};
65
66struct dma_interrupt_regs {
67 struct dmareg tfr;
68 struct dmareg block;
69 struct dmareg srctran;
70 struct dmareg dsttran;
71 struct dmareg error;
72};
73
74struct ahb_dma_regs {
75 struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS];
76 struct dma_interrupt_regs interrupt_raw;
77 struct dma_interrupt_regs interrupt_status;
78 struct dma_interrupt_regs interrupt_mask;
79 struct dma_interrupt_regs interrupt_clear;
80 struct dmareg statusInt;
81 struct dmareg rq_srcreg;
82 struct dmareg rq_dstreg;
83 struct dmareg rq_sgl_srcreg;
84 struct dmareg rq_sgl_dstreg;
85 struct dmareg rq_lst_srcreg;
86 struct dmareg rq_lst_dstreg;
87 struct dmareg dma_cfg;
88 struct dmareg dma_chan_en;
89 struct dmareg dma_id;
90 struct dmareg dma_test;
91 struct dmareg res1;
92 struct dmareg res2;
93
94
95
96
97 struct dmareg dma_params[6];
98};
99
100#define DMA_EN 0x00000001
101#define DMA_DI 0x00000000
102#define DMA_CHANNEL(ch) (0x00000001 << (ch))
103#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \
104 ((0x000000001 << (ch)) << 8))
105#define DMA_DISABLE_CHAN(ch) (0x00000000 | \
106 ((0x000000001 << (ch)) << 8))
107
108#define SATA_DWC_MAX_PORTS 1
109#define SATA_DWC_SCR_OFFSET 0x24
110#define SATA_DWC_REG_OFFSET 0x64
111
112struct sata_dwc_regs {
113 u32 fptagr;
114 u32 fpbor;
115 u32 fptcr;
116 u32 dmacr;
117 u32 dbtsr;
118 u32 intpr;
119 u32 intmr;
120 u32 errmr;
121 u32 llcr;
122 u32 phycr;
123 u32 physr;
124 u32 rxbistpd;
125 u32 rxbistpd1;
126 u32 rxbistpd2;
127 u32 txbistpd;
128 u32 txbistpd1;
129 u32 txbistpd2;
130 u32 bistcr;
131 u32 bistfctr;
132 u32 bistsr;
133 u32 bistdecr;
134 u32 res[15];
135 u32 testr;
136 u32 versionr;
137 u32 idr;
138 u32 unimpl[192];
139 u32 dmadr[256];
140};
141
142#define SATA_DWC_TXFIFO_DEPTH 0x01FF
143#define SATA_DWC_RXFIFO_DEPTH 0x01FF
144
145#define SATA_DWC_DBTSR_MWR(size) ((size / 4) & SATA_DWC_TXFIFO_DEPTH)
146#define SATA_DWC_DBTSR_MRD(size) (((size / 4) & \
147 SATA_DWC_RXFIFO_DEPTH) << 16)
148#define SATA_DWC_INTPR_DMAT 0x00000001
149#define SATA_DWC_INTPR_NEWFP 0x00000002
150#define SATA_DWC_INTPR_PMABRT 0x00000004
151#define SATA_DWC_INTPR_ERR 0x00000008
152#define SATA_DWC_INTPR_NEWBIST 0x00000010
153#define SATA_DWC_INTPR_IPF 0x10000000
154#define SATA_DWC_INTMR_DMATM 0x00000001
155#define SATA_DWC_INTMR_NEWFPM 0x00000002
156#define SATA_DWC_INTMR_PMABRTM 0x00000004
157#define SATA_DWC_INTMR_ERRM 0x00000008
158#define SATA_DWC_INTMR_NEWBISTM 0x00000010
159
160#define SATA_DWC_DMACR_TMOD_TXCHEN 0x00000004
161#define SATA_DWC_DMACR_TXRXCH_CLEAR SATA_DWC_DMACR_TMOD_TXCHEN
162
163#define SATA_DWC_QCMD_MAX 32
164
165#define SATA_DWC_SERROR_ERR_BITS 0x0FFF0F03
166
167#define HSDEVP_FROM_AP(ap) (struct sata_dwc_device_port*) \
168 (ap)->private_data
169
170struct sata_dwc_device {
171 struct device *dev;
172 struct ata_probe_ent *pe;
173 struct ata_host *host;
174 u8 *reg_base;
175 struct sata_dwc_regs *sata_dwc_regs;
176 int irq_dma;
177};
178
179struct sata_dwc_device_port {
180 struct sata_dwc_device *hsdev;
181 int cmd_issued[SATA_DWC_QCMD_MAX];
182 u32 dma_chan[SATA_DWC_QCMD_MAX];
183 int dma_pending[SATA_DWC_QCMD_MAX];
184};
185
186enum {
187 SATA_DWC_CMD_ISSUED_NOT = 0,
188 SATA_DWC_CMD_ISSUED_PEND = 1,
189 SATA_DWC_CMD_ISSUED_EXEC = 2,
190 SATA_DWC_CMD_ISSUED_NODATA = 3,
191
192 SATA_DWC_DMA_PENDING_NONE = 0,
193 SATA_DWC_DMA_PENDING_TX = 1,
194 SATA_DWC_DMA_PENDING_RX = 2,
195};
196
197#define msleep(a) udelay(a * 1000)
198#define ssleep(a) msleep(a * 1000)
199
200static int ata_probe_timeout = (ATA_TMOUT_INTERNAL / 100);
201
202enum sata_dev_state {
203 SATA_INIT = 0,
204 SATA_READY = 1,
205 SATA_NODEVICE = 2,
206 SATA_ERROR = 3,
207};
208enum sata_dev_state dev_state = SATA_INIT;
209
210static struct ahb_dma_regs *sata_dma_regs = 0;
211static struct ata_host *phost;
212static struct ata_port ap;
213static struct ata_port *pap = ≈
214static struct ata_device ata_device;
215static struct sata_dwc_device_port dwc_devp;
216
217static void *scr_addr_sstatus;
218static u32 temp_n_block = 0;
219
220static unsigned ata_exec_internal(struct ata_device *dev,
221 struct ata_taskfile *tf, const u8 *cdb,
222 int dma_dir, unsigned int buflen,
223 unsigned long timeout);
224static unsigned int ata_dev_set_feature(struct ata_device *dev,
225 u8 enable,u8 feature);
226static unsigned int ata_dev_init_params(struct ata_device *dev,
227 u16 heads, u16 sectors);
228static u8 ata_irq_on(struct ata_port *ap);
229static struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
230 unsigned int tag);
231static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
232 u8 status, int in_wq);
233static void ata_tf_to_host(struct ata_port *ap,
234 const struct ata_taskfile *tf);
235static void ata_exec_command(struct ata_port *ap,
236 const struct ata_taskfile *tf);
237static unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
238static u8 ata_check_altstatus(struct ata_port *ap);
239static u8 ata_check_status(struct ata_port *ap);
240static void ata_dev_select(struct ata_port *ap, unsigned int device,
241 unsigned int wait, unsigned int can_sleep);
242static void ata_qc_issue(struct ata_queued_cmd *qc);
243static void ata_tf_load(struct ata_port *ap,
244 const struct ata_taskfile *tf);
245static int ata_dev_read_sectors(unsigned char* pdata,
246 unsigned long datalen, u32 block, u32 n_block);
247static int ata_dev_write_sectors(unsigned char* pdata,
248 unsigned long datalen , u32 block, u32 n_block);
249static void ata_std_dev_select(struct ata_port *ap, unsigned int device);
250static void ata_qc_complete(struct ata_queued_cmd *qc);
251static void __ata_qc_complete(struct ata_queued_cmd *qc);
252static void fill_result_tf(struct ata_queued_cmd *qc);
253static void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
254static void ata_mmio_data_xfer(struct ata_device *dev,
255 unsigned char *buf,
256 unsigned int buflen,int do_write);
257static void ata_pio_task(struct ata_port *arg_ap);
258static void __ata_port_freeze(struct ata_port *ap);
259static int ata_port_freeze(struct ata_port *ap);
260static void ata_qc_free(struct ata_queued_cmd *qc);
261static void ata_pio_sectors(struct ata_queued_cmd *qc);
262static void ata_pio_sector(struct ata_queued_cmd *qc);
263static void ata_pio_queue_task(struct ata_port *ap,
264 void *data,unsigned long delay);
265static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq);
266static int sata_dwc_softreset(struct ata_port *ap);
267static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
268 unsigned int flags, u16 *id);
269static int check_sata_dev_state(void);
270
271extern block_dev_desc_t sata_dev_desc[CONFIG_SYS_SATA_MAX_DEVICE];
272
273static const struct ata_port_info sata_dwc_port_info[] = {
274 {
275 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
276 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING |
277 ATA_FLAG_SRST | ATA_FLAG_NCQ,
278 .pio_mask = 0x1f,
279 .mwdma_mask = 0x07,
280 .udma_mask = 0x7f,
281 },
282};
283
284int init_sata(int dev)
285{
286 struct sata_dwc_device hsdev;
287 struct ata_host host;
288 struct ata_port_info pi = sata_dwc_port_info[0];
289 struct ata_link *link;
290 struct sata_dwc_device_port hsdevp = dwc_devp;
291 u8 *base = 0;
292 u8 *sata_dma_regs_addr = 0;
293 u8 status;
294 unsigned long base_addr = 0;
295 int chan = 0;
296 int rc;
297 int i;
298
299 phost = &host;
300
301 base = (u8*)SATA_BASE_ADDR;
302
303 hsdev.sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
304
305 host.n_ports = SATA_DWC_MAX_PORTS;
306
307 for (i = 0; i < SATA_DWC_MAX_PORTS; i++) {
308 ap.pflags |= ATA_PFLAG_INITIALIZING;
309 ap.flags = ATA_FLAG_DISABLED;
310 ap.print_id = -1;
311 ap.ctl = ATA_DEVCTL_OBS;
312 ap.host = &host;
313 ap.last_ctl = 0xFF;
314
315 link = &ap.link;
316 link->ap = ≈
317 link->pmp = 0;
318 link->active_tag = ATA_TAG_POISON;
319 link->hw_sata_spd_limit = 0;
320
321 ap.port_no = i;
322 host.ports[i] = ≈
323 }
324
325 ap.pio_mask = pi.pio_mask;
326 ap.mwdma_mask = pi.mwdma_mask;
327 ap.udma_mask = pi.udma_mask;
328 ap.flags |= pi.flags;
329 ap.link.flags |= pi.link_flags;
330
331 host.ports[0]->ioaddr.cmd_addr = base;
332 host.ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
333 scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
334
335 base_addr = (unsigned long)base;
336
337 host.ports[0]->ioaddr.cmd_addr = (void *)base_addr + 0x00;
338 host.ports[0]->ioaddr.data_addr = (void *)base_addr + 0x00;
339
340 host.ports[0]->ioaddr.error_addr = (void *)base_addr + 0x04;
341 host.ports[0]->ioaddr.feature_addr = (void *)base_addr + 0x04;
342
343 host.ports[0]->ioaddr.nsect_addr = (void *)base_addr + 0x08;
344
345 host.ports[0]->ioaddr.lbal_addr = (void *)base_addr + 0x0c;
346 host.ports[0]->ioaddr.lbam_addr = (void *)base_addr + 0x10;
347 host.ports[0]->ioaddr.lbah_addr = (void *)base_addr + 0x14;
348
349 host.ports[0]->ioaddr.device_addr = (void *)base_addr + 0x18;
350 host.ports[0]->ioaddr.command_addr = (void *)base_addr + 0x1c;
351 host.ports[0]->ioaddr.status_addr = (void *)base_addr + 0x1c;
352
353 host.ports[0]->ioaddr.altstatus_addr = (void *)base_addr + 0x20;
354 host.ports[0]->ioaddr.ctl_addr = (void *)base_addr + 0x20;
355
356 sata_dma_regs_addr = (u8*)SATA_DMA_REG_ADDR;
357 sata_dma_regs = (void *__iomem)sata_dma_regs_addr;
358
359 status = ata_check_altstatus(&ap);
360
361 if (status == 0x7f) {
362 printf("Hard Disk not found.\n");
363 dev_state = SATA_NODEVICE;
364 rc = FALSE;
365 return rc;
366 }
367
368 printf("Waiting for device...");
369 i = 0;
370 while (1) {
371 udelay(10000);
372
373 status = ata_check_altstatus(&ap);
374
375 if ((status & ATA_BUSY) == 0) {
376 printf("\n");
377 break;
378 }
379
380 i++;
381 if (i > (ATA_RESET_TIME * 100)) {
382 printf("** TimeOUT **\n");
383
384 dev_state = SATA_NODEVICE;
385 rc = FALSE;
386 return rc;
387 }
388 if ((i >= 100) && ((i % 100) == 0))
389 printf(".");
390 }
391
392 rc = sata_dwc_softreset(&ap);
393
394 if (rc) {
395 printf("sata_dwc : error. soft reset failed\n");
396 return rc;
397 }
398
399 for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
400 out_le32(&(sata_dma_regs->interrupt_mask.error.low),
401 DMA_DISABLE_CHAN(chan));
402
403 out_le32(&(sata_dma_regs->interrupt_mask.tfr.low),
404 DMA_DISABLE_CHAN(chan));
405 }
406
407 out_le32(&(sata_dma_regs->dma_cfg.low), DMA_DI);
408
409 out_le32(&hsdev.sata_dwc_regs->intmr,
410 SATA_DWC_INTMR_ERRM |
411 SATA_DWC_INTMR_PMABRTM);
412
413
414
415
416 out_le32(&hsdev.sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
417
418 hsdev.host = ap.host;
419 memset(&hsdevp, 0, sizeof(hsdevp));
420 hsdevp.hsdev = &hsdev;
421
422 for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
423 hsdevp.cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
424
425 out_le32((void __iomem *)scr_addr_sstatus + 4,
426 in_le32((void __iomem *)scr_addr_sstatus + 4));
427
428 rc = 0;
429 return rc;
430}
431
432static u8 ata_check_altstatus(struct ata_port *ap)
433{
434 u8 val = 0;
435 val = readb(ap->ioaddr.altstatus_addr);
436 return val;
437}
438
439static int sata_dwc_softreset(struct ata_port *ap)
440{
441 u8 nsect,lbal = 0;
442 u8 tmp = 0;
443 u32 serror = 0;
444 u8 status = 0;
445 struct ata_ioports *ioaddr = &ap->ioaddr;
446
447 serror = in_le32((void *)ap->ioaddr.scr_addr + (SCR_ERROR * 4));
448
449 writeb(0x55, ioaddr->nsect_addr);
450 writeb(0xaa, ioaddr->lbal_addr);
451 writeb(0xaa, ioaddr->nsect_addr);
452 writeb(0x55, ioaddr->lbal_addr);
453 writeb(0x55, ioaddr->nsect_addr);
454 writeb(0xaa, ioaddr->lbal_addr);
455
456 nsect = readb(ioaddr->nsect_addr);
457 lbal = readb(ioaddr->lbal_addr);
458
459 if ((nsect == 0x55) && (lbal == 0xaa)) {
460 printf("Device found\n");
461 } else {
462 printf("No device found\n");
463 dev_state = SATA_NODEVICE;
464 return FALSE;
465 }
466
467 tmp = ATA_DEVICE_OBS;
468 writeb(tmp, ioaddr->device_addr);
469 writeb(ap->ctl, ioaddr->ctl_addr);
470
471 udelay(200);
472
473 writeb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
474
475 udelay(200);
476 writeb(ap->ctl, ioaddr->ctl_addr);
477
478 msleep(150);
479 status = ata_check_status(ap);
480
481 msleep(50);
482 ata_check_status(ap);
483
484 while (1) {
485 u8 status = ata_check_status(ap);
486
487 if (!(status & ATA_BUSY))
488 break;
489
490 printf("Hard Disk status is BUSY.\n");
491 msleep(50);
492 }
493
494 tmp = ATA_DEVICE_OBS;
495 writeb(tmp, ioaddr->device_addr);
496
497 nsect = readb(ioaddr->nsect_addr);
498 lbal = readb(ioaddr->lbal_addr);
499
500 return 0;
501}
502
503static u8 ata_check_status(struct ata_port *ap)
504{
505 u8 val = 0;
506 val = readb(ap->ioaddr.status_addr);
507 return val;
508}
509
510static int ata_id_has_hipm(const u16 *id)
511{
512 u16 val = id[76];
513
514 if (val == 0 || val == 0xffff)
515 return -1;
516
517 return val & (1 << 9);
518}
519
520static int ata_id_has_dipm(const u16 *id)
521{
522 u16 val = id[78];
523
524 if (val == 0 || val == 0xffff)
525 return -1;
526
527 return val & (1 << 3);
528}
529
530int scan_sata(int dev)
531{
532 int i;
533 int rc;
534 u8 status;
535 const u16 *id;
536 struct ata_device *ata_dev = &ata_device;
537 unsigned long pio_mask, mwdma_mask, udma_mask;
538 unsigned long xfer_mask;
539 char revbuf[7];
540 u16 iobuf[ATA_SECTOR_WORDS];
541
542 memset(iobuf, 0, sizeof(iobuf));
543
544 if (dev_state == SATA_NODEVICE)
545 return 1;
546
547 printf("Waiting for device...");
548 i = 0;
549 while (1) {
550 udelay(10000);
551
552 status = ata_check_altstatus(&ap);
553
554 if ((status & ATA_BUSY) == 0) {
555 printf("\n");
556 break;
557 }
558
559 i++;
560 if (i > (ATA_RESET_TIME * 100)) {
561 printf("** TimeOUT **\n");
562
563 dev_state = SATA_NODEVICE;
564 return 1;
565 }
566 if ((i >= 100) && ((i % 100) == 0))
567 printf(".");
568 }
569
570 udelay(1000);
571
572 rc = ata_dev_read_id(ata_dev, &ata_dev->class,
573 ATA_READID_POSTRESET,ata_dev->id);
574 if (rc) {
575 printf("sata_dwc : error. failed sata scan\n");
576 return 1;
577 }
578
579
580
581
582 if (ata_id_is_sata(ata_dev->id))
583 ap.cbl = ATA_CBL_SATA;
584
585 id = ata_dev->id;
586
587 ata_dev->flags &= ~ATA_DFLAG_CFG_MASK;
588 ata_dev->max_sectors = 0;
589 ata_dev->cdb_len = 0;
590 ata_dev->n_sectors = 0;
591 ata_dev->cylinders = 0;
592 ata_dev->heads = 0;
593 ata_dev->sectors = 0;
594
595 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
596 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
597 pio_mask <<= 3;
598 pio_mask |= 0x7;
599 } else {
600
601
602
603
604 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
605 if (mode < 5) {
606 pio_mask = (2 << mode) - 1;
607 } else {
608 pio_mask = 1;
609 }
610 }
611
612 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
613
614 if (ata_id_is_cfa(id)) {
615 int pio = id[163] & 0x7;
616 int dma = (id[163] >> 3) & 7;
617
618 if (pio)
619 pio_mask |= (1 << 5);
620 if (pio > 1)
621 pio_mask |= (1 << 6);
622 if (dma)
623 mwdma_mask |= (1 << 3);
624 if (dma > 1)
625 mwdma_mask |= (1 << 4);
626 }
627
628 udma_mask = 0;
629 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
630 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
631
632 xfer_mask = ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
633 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
634 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
635
636 if (ata_dev->class == ATA_DEV_ATA) {
637 if (ata_id_is_cfa(id)) {
638 if (id[162] & 1)
639 printf("supports DRM functions and may "
640 "not be fully accessable.\n");
641 sprintf(revbuf, "%s", "CFA");
642 } else {
643 if (ata_id_has_tpm(id))
644 printf("supports DRM functions and may "
645 "not be fully accessable.\n");
646 }
647
648 ata_dev->n_sectors = ata_id_n_sectors((u16*)id);
649
650 if (ata_dev->id[59] & 0x100)
651 ata_dev->multi_count = ata_dev->id[59] & 0xff;
652
653 if (ata_id_has_lba(id)) {
654 const char *lba_desc;
655 char ncq_desc[20];
656
657 lba_desc = "LBA";
658 ata_dev->flags |= ATA_DFLAG_LBA;
659 if (ata_id_has_lba48(id)) {
660 ata_dev->flags |= ATA_DFLAG_LBA48;
661 lba_desc = "LBA48";
662
663 if (ata_dev->n_sectors >= (1UL << 28) &&
664 ata_id_has_flush_ext(id))
665 ata_dev->flags |= ATA_DFLAG_FLUSH_EXT;
666 }
667 if (!ata_id_has_ncq(ata_dev->id))
668 ncq_desc[0] = '\0';
669
670 if (ata_dev->horkage & ATA_HORKAGE_NONCQ)
671 sprintf(ncq_desc, "%s", "NCQ (not used)");
672
673 if (ap.flags & ATA_FLAG_NCQ)
674 ata_dev->flags |= ATA_DFLAG_NCQ;
675 }
676 ata_dev->cdb_len = 16;
677 }
678 ata_dev->max_sectors = ATA_MAX_SECTORS;
679 if (ata_dev->flags & ATA_DFLAG_LBA48)
680 ata_dev->max_sectors = ATA_MAX_SECTORS_LBA48;
681
682 if (!(ata_dev->horkage & ATA_HORKAGE_IPM)) {
683 if (ata_id_has_hipm(ata_dev->id))
684 ata_dev->flags |= ATA_DFLAG_HIPM;
685 if (ata_id_has_dipm(ata_dev->id))
686 ata_dev->flags |= ATA_DFLAG_DIPM;
687 }
688
689 if ((ap.cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ata_dev->id))) {
690 ata_dev->udma_mask &= ATA_UDMA5;
691 ata_dev->max_sectors = ATA_MAX_SECTORS;
692 }
693
694 if (ata_dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
695 printf("Drive reports diagnostics failure."
696 "This may indicate a drive\n");
697 printf("fault or invalid emulation."
698 "Contact drive vendor for information.\n");
699 }
700
701 rc = check_sata_dev_state();
702
703 ata_id_c_string(ata_dev->id,
704 (unsigned char *)sata_dev_desc[dev].revision,
705 ATA_ID_FW_REV, sizeof(sata_dev_desc[dev].revision));
706 ata_id_c_string(ata_dev->id,
707 (unsigned char *)sata_dev_desc[dev].vendor,
708 ATA_ID_PROD, sizeof(sata_dev_desc[dev].vendor));
709 ata_id_c_string(ata_dev->id,
710 (unsigned char *)sata_dev_desc[dev].product,
711 ATA_ID_SERNO, sizeof(sata_dev_desc[dev].product));
712
713 sata_dev_desc[dev].lba = (u32) ata_dev->n_sectors;
714
715#ifdef CONFIG_LBA48
716 if (ata_dev->id[83] & (1 << 10)) {
717 sata_dev_desc[dev].lba48 = 1;
718 } else {
719 sata_dev_desc[dev].lba48 = 0;
720 }
721#endif
722
723 return 0;
724}
725
726static u8 ata_busy_wait(struct ata_port *ap,
727 unsigned int bits,unsigned int max)
728{
729 u8 status;
730
731 do {
732 udelay(10);
733 status = ata_check_status(ap);
734 max--;
735 } while (status != 0xff && (status & bits) && (max > 0));
736
737 return status;
738}
739
740static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
741 unsigned int flags, u16 *id)
742{
743 struct ata_port *ap = pap;
744 unsigned int class = *p_class;
745 struct ata_taskfile tf;
746 unsigned int err_mask = 0;
747 const char *reason;
748 int may_fallback = 1, tried_spinup = 0;
749 u8 status;
750 int rc;
751
752 status = ata_busy_wait(ap, ATA_BUSY, 30000);
753 if (status & ATA_BUSY) {
754 printf("BSY = 0 check. timeout.\n");
755 rc = FALSE;
756 return rc;
757 }
758
759 ata_dev_select(ap, dev->devno, 1, 1);
760
761retry:
762 memset(&tf, 0, sizeof(tf));
763 ap->print_id = 1;
764 ap->flags &= ~ATA_FLAG_DISABLED;
765 tf.ctl = ap->ctl;
766 tf.device = ATA_DEVICE_OBS;
767 tf.command = ATA_CMD_ID_ATA;
768 tf.protocol = ATA_PROT_PIO;
769
770
771
772
773 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
774
775
776
777
778 tf.flags |= ATA_TFLAG_POLLING;
779
780 temp_n_block = 1;
781
782 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
783 sizeof(id[0]) * ATA_ID_WORDS, 0);
784
785 if (err_mask) {
786 if (err_mask & AC_ERR_NODEV_HINT) {
787 printf("NODEV after polling detection\n");
788 return -ENOENT;
789 }
790
791 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
792
793
794
795
796
797 if (may_fallback) {
798 may_fallback = 0;
799
800 if (class == ATA_DEV_ATA) {
801 class = ATA_DEV_ATAPI;
802 } else {
803 class = ATA_DEV_ATA;
804 }
805 goto retry;
806 }
807
808
809
810
811 printf("both IDENTIFYs aborted, assuming NODEV\n");
812 return -ENOENT;
813 }
814 rc = -EIO;
815 reason = "I/O error";
816 goto err_out;
817 }
818
819
820
821
822 may_fallback = 0;
823
824 unsigned int id_cnt;
825
826 for (id_cnt = 0; id_cnt < ATA_ID_WORDS; id_cnt++)
827 id[id_cnt] = le16_to_cpu(id[id_cnt]);
828
829
830 rc = -EINVAL;
831 reason = "device reports invalid type";
832
833 if (class == ATA_DEV_ATA) {
834 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
835 goto err_out;
836 } else {
837 if (ata_id_is_ata(id))
838 goto err_out;
839 }
840 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
841 tried_spinup = 1;
842
843
844
845
846
847 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
848 if (err_mask && id[2] != 0x738c) {
849 rc = -EIO;
850 reason = "SPINUP failed";
851 goto err_out;
852 }
853
854
855
856
857 if (id[2] == 0x37c8)
858 goto retry;
859 }
860
861 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
862
863
864
865
866
867
868
869
870
871
872
873 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
874 err_mask = ata_dev_init_params(dev, id[3], id[6]);
875 if (err_mask) {
876 rc = -EIO;
877 reason = "INIT_DEV_PARAMS failed";
878 goto err_out;
879 }
880
881
882
883
884 flags &= ~ATA_READID_POSTRESET;
885 goto retry;
886 }
887 }
888
889 *p_class = class;
890 return 0;
891
892err_out:
893 return rc;
894}
895
896static u8 ata_wait_idle(struct ata_port *ap)
897{
898 u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
899 return status;
900}
901
902static void ata_dev_select(struct ata_port *ap, unsigned int device,
903 unsigned int wait, unsigned int can_sleep)
904{
905 if (wait)
906 ata_wait_idle(ap);
907
908 ata_std_dev_select(ap, device);
909
910 if (wait)
911 ata_wait_idle(ap);
912}
913
914static void ata_std_dev_select(struct ata_port *ap, unsigned int device)
915{
916 u8 tmp;
917
918 if (device == 0) {
919 tmp = ATA_DEVICE_OBS;
920 } else {
921 tmp = ATA_DEVICE_OBS | ATA_DEV1;
922 }
923
924 writeb(tmp, ap->ioaddr.device_addr);
925
926 readb(ap->ioaddr.altstatus_addr);
927
928 udelay(1);
929}
930
931static int waiting_for_reg_state(volatile u8 *offset,
932 int timeout_msec,
933 u32 sign)
934{
935 int i;
936 u32 status;
937
938 for (i = 0; i < timeout_msec; i++) {
939 status = readl(offset);
940 if ((status & sign) != 0)
941 break;
942 msleep(1);
943 }
944
945 return (i < timeout_msec) ? 0 : -1;
946}
947
948static void ata_qc_reinit(struct ata_queued_cmd *qc)
949{
950 qc->dma_dir = DMA_NONE;
951 qc->flags = 0;
952 qc->nbytes = qc->extrabytes = qc->curbytes = 0;
953 qc->n_elem = 0;
954 qc->err_mask = 0;
955 qc->sect_size = ATA_SECT_SIZE;
956 qc->nbytes = ATA_SECT_SIZE * temp_n_block;
957
958 memset(&qc->tf, 0, sizeof(qc->tf));
959 qc->tf.ctl = 0;
960 qc->tf.device = ATA_DEVICE_OBS;
961
962 qc->result_tf.command = ATA_DRDY;
963 qc->result_tf.feature = 0;
964}
965
966struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
967 unsigned int tag)
968{
969 if (tag < ATA_MAX_QUEUE)
970 return &ap->qcmd[tag];
971 return NULL;
972}
973
974static void __ata_port_freeze(struct ata_port *ap)
975{
976 printf("set port freeze.\n");
977 ap->pflags |= ATA_PFLAG_FROZEN;
978}
979
980static int ata_port_freeze(struct ata_port *ap)
981{
982 __ata_port_freeze(ap);
983 return 0;
984}
985
986unsigned ata_exec_internal(struct ata_device *dev,
987 struct ata_taskfile *tf, const u8 *cdb,
988 int dma_dir, unsigned int buflen,
989 unsigned long timeout)
990{
991 struct ata_link *link = dev->link;
992 struct ata_port *ap = pap;
993 struct ata_queued_cmd *qc;
994 unsigned int tag, preempted_tag;
995 u32 preempted_sactive, preempted_qc_active;
996 int preempted_nr_active_links;
997 unsigned int err_mask;
998 int rc = 0;
999 u8 status;
1000
1001 status = ata_busy_wait(ap, ATA_BUSY, 300000);
1002 if (status & ATA_BUSY) {
1003 printf("BSY = 0 check. timeout.\n");
1004 rc = FALSE;
1005 return rc;
1006 }
1007
1008 if (ap->pflags & ATA_PFLAG_FROZEN)
1009 return AC_ERR_SYSTEM;
1010
1011 tag = ATA_TAG_INTERNAL;
1012
1013 if (test_and_set_bit(tag, &ap->qc_allocated)) {
1014 rc = FALSE;
1015 return rc;
1016 }
1017
1018 qc = __ata_qc_from_tag(ap, tag);
1019 qc->tag = tag;
1020 qc->ap = ap;
1021 qc->dev = dev;
1022
1023 ata_qc_reinit(qc);
1024
1025 preempted_tag = link->active_tag;
1026 preempted_sactive = link->sactive;
1027 preempted_qc_active = ap->qc_active;
1028 preempted_nr_active_links = ap->nr_active_links;
1029 link->active_tag = ATA_TAG_POISON;
1030 link->sactive = 0;
1031 ap->qc_active = 0;
1032 ap->nr_active_links = 0;
1033
1034 qc->tf = *tf;
1035 if (cdb)
1036 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1037 qc->flags |= ATA_QCFLAG_RESULT_TF;
1038 qc->dma_dir = dma_dir;
1039 qc->private_data = 0;
1040
1041 ata_qc_issue(qc);
1042
1043 if (!timeout)
1044 timeout = ata_probe_timeout * 1000 / HZ;
1045
1046 status = ata_busy_wait(ap, ATA_BUSY, 30000);
1047 if (status & ATA_BUSY) {
1048 printf("BSY = 0 check. timeout.\n");
1049 printf("altstatus = 0x%x.\n", status);
1050 qc->err_mask |= AC_ERR_OTHER;
1051 return qc->err_mask;
1052 }
1053
1054 if (waiting_for_reg_state(ap->ioaddr.altstatus_addr, 1000, 0x8)) {
1055 u8 status = 0;
1056 u8 errorStatus = 0;
1057
1058 status = readb(ap->ioaddr.altstatus_addr);
1059 if ((status & 0x01) != 0) {
1060 errorStatus = readb(ap->ioaddr.feature_addr);
1061 if (errorStatus == 0x04 &&
1062 qc->tf.command == ATA_CMD_PIO_READ_EXT){
1063 printf("Hard Disk doesn't support LBA48\n");
1064 dev_state = SATA_ERROR;
1065 qc->err_mask |= AC_ERR_OTHER;
1066 return qc->err_mask;
1067 }
1068 }
1069 qc->err_mask |= AC_ERR_OTHER;
1070 return qc->err_mask;
1071 }
1072
1073 status = ata_busy_wait(ap, ATA_BUSY, 10);
1074 if (status & ATA_BUSY) {
1075 printf("BSY = 0 check. timeout.\n");
1076 qc->err_mask |= AC_ERR_OTHER;
1077 return qc->err_mask;
1078 }
1079
1080 ata_pio_task(ap);
1081
1082 if (!rc) {
1083 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1084 qc->err_mask |= AC_ERR_TIMEOUT;
1085 ata_port_freeze(ap);
1086 }
1087 }
1088
1089 if (qc->flags & ATA_QCFLAG_FAILED) {
1090 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1091 qc->err_mask |= AC_ERR_DEV;
1092
1093 if (!qc->err_mask)
1094 qc->err_mask |= AC_ERR_OTHER;
1095
1096 if (qc->err_mask & ~AC_ERR_OTHER)
1097 qc->err_mask &= ~AC_ERR_OTHER;
1098 }
1099
1100 *tf = qc->result_tf;
1101 err_mask = qc->err_mask;
1102 ata_qc_free(qc);
1103 link->active_tag = preempted_tag;
1104 link->sactive = preempted_sactive;
1105 ap->qc_active = preempted_qc_active;
1106 ap->nr_active_links = preempted_nr_active_links;
1107
1108 if (ap->flags & ATA_FLAG_DISABLED) {
1109 err_mask |= AC_ERR_SYSTEM;
1110 ap->flags &= ~ATA_FLAG_DISABLED;
1111 }
1112
1113 return err_mask;
1114}
1115
1116static void ata_qc_issue(struct ata_queued_cmd *qc)
1117{
1118 struct ata_port *ap = qc->ap;
1119 struct ata_link *link = qc->dev->link;
1120 u8 prot = qc->tf.protocol;
1121
1122 if (ata_is_ncq(prot)) {
1123 if (!link->sactive)
1124 ap->nr_active_links++;
1125 link->sactive |= 1 << qc->tag;
1126 } else {
1127 ap->nr_active_links++;
1128 link->active_tag = qc->tag;
1129 }
1130
1131 qc->flags |= ATA_QCFLAG_ACTIVE;
1132 ap->qc_active |= 1 << qc->tag;
1133
1134 if (qc->dev->flags & ATA_DFLAG_SLEEPING) {
1135 msleep(1);
1136 return;
1137 }
1138
1139 qc->err_mask |= ata_qc_issue_prot(qc);
1140 if (qc->err_mask)
1141 goto err;
1142
1143 return;
1144err:
1145 ata_qc_complete(qc);
1146}
1147
1148static unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1149{
1150 struct ata_port *ap = qc->ap;
1151
1152 if (ap->flags & ATA_FLAG_PIO_POLLING) {
1153 switch (qc->tf.protocol) {
1154 case ATA_PROT_PIO:
1155 case ATA_PROT_NODATA:
1156 case ATAPI_PROT_PIO:
1157 case ATAPI_PROT_NODATA:
1158 qc->tf.flags |= ATA_TFLAG_POLLING;
1159 break;
1160 default:
1161 break;
1162 }
1163 }
1164
1165 ata_dev_select(ap, qc->dev->devno, 1, 0);
1166
1167 switch (qc->tf.protocol) {
1168 case ATA_PROT_PIO:
1169 if (qc->tf.flags & ATA_TFLAG_POLLING)
1170 qc->tf.ctl |= ATA_NIEN;
1171
1172 ata_tf_to_host(ap, &qc->tf);
1173
1174 ap->hsm_task_state = HSM_ST;
1175
1176 if (qc->tf.flags & ATA_TFLAG_POLLING)
1177 ata_pio_queue_task(ap, qc, 0);
1178
1179 break;
1180
1181 default:
1182 return AC_ERR_SYSTEM;
1183 }
1184
1185 return 0;
1186}
1187
1188static void ata_tf_to_host(struct ata_port *ap,
1189 const struct ata_taskfile *tf)
1190{
1191 ata_tf_load(ap, tf);
1192 ata_exec_command(ap, tf);
1193}
1194
1195static void ata_tf_load(struct ata_port *ap,
1196 const struct ata_taskfile *tf)
1197{
1198 struct ata_ioports *ioaddr = &ap->ioaddr;
1199 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
1200
1201 if (tf->ctl != ap->last_ctl) {
1202 if (ioaddr->ctl_addr)
1203 writeb(tf->ctl, ioaddr->ctl_addr);
1204 ap->last_ctl = tf->ctl;
1205 ata_wait_idle(ap);
1206 }
1207
1208 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
1209 writeb(tf->hob_feature, ioaddr->feature_addr);
1210 writeb(tf->hob_nsect, ioaddr->nsect_addr);
1211 writeb(tf->hob_lbal, ioaddr->lbal_addr);
1212 writeb(tf->hob_lbam, ioaddr->lbam_addr);
1213 writeb(tf->hob_lbah, ioaddr->lbah_addr);
1214 }
1215
1216 if (is_addr) {
1217 writeb(tf->feature, ioaddr->feature_addr);
1218 writeb(tf->nsect, ioaddr->nsect_addr);
1219 writeb(tf->lbal, ioaddr->lbal_addr);
1220 writeb(tf->lbam, ioaddr->lbam_addr);
1221 writeb(tf->lbah, ioaddr->lbah_addr);
1222 }
1223
1224 if (tf->flags & ATA_TFLAG_DEVICE)
1225 writeb(tf->device, ioaddr->device_addr);
1226
1227 ata_wait_idle(ap);
1228}
1229
1230static void ata_exec_command(struct ata_port *ap,
1231 const struct ata_taskfile *tf)
1232{
1233 writeb(tf->command, ap->ioaddr.command_addr);
1234
1235 readb(ap->ioaddr.altstatus_addr);
1236
1237 udelay(1);
1238}
1239
1240static void ata_pio_queue_task(struct ata_port *ap,
1241 void *data,unsigned long delay)
1242{
1243 ap->port_task_data = data;
1244}
1245
1246static unsigned int ac_err_mask(u8 status)
1247{
1248 if (status & (ATA_BUSY | ATA_DRQ))
1249 return AC_ERR_HSM;
1250 if (status & (ATA_ERR | ATA_DF))
1251 return AC_ERR_DEV;
1252 return 0;
1253}
1254
1255static unsigned int __ac_err_mask(u8 status)
1256{
1257 unsigned int mask = ac_err_mask(status);
1258 if (mask == 0)
1259 return AC_ERR_OTHER;
1260 return mask;
1261}
1262
1263static void ata_pio_task(struct ata_port *arg_ap)
1264{
1265 struct ata_port *ap = arg_ap;
1266 struct ata_queued_cmd *qc = ap->port_task_data;
1267 u8 status;
1268 int poll_next;
1269
1270fsm_start:
1271
1272
1273
1274
1275
1276
1277
1278 status = ata_busy_wait(ap, ATA_BUSY, 5);
1279 if (status & ATA_BUSY) {
1280 msleep(2);
1281 status = ata_busy_wait(ap, ATA_BUSY, 10);
1282 if (status & ATA_BUSY) {
1283 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
1284 return;
1285 }
1286 }
1287
1288 poll_next = ata_hsm_move(ap, qc, status, 1);
1289
1290
1291
1292
1293 if (poll_next)
1294 goto fsm_start;
1295}
1296
1297static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1298 u8 status, int in_wq)
1299{
1300 int poll_next;
1301
1302fsm_start:
1303 switch (ap->hsm_task_state) {
1304 case HSM_ST_FIRST:
1305 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1306
1307 if ((status & ATA_DRQ) == 0) {
1308 if (status & (ATA_ERR | ATA_DF)) {
1309 qc->err_mask |= AC_ERR_DEV;
1310 } else {
1311 qc->err_mask |= AC_ERR_HSM;
1312 }
1313 ap->hsm_task_state = HSM_ST_ERR;
1314 goto fsm_start;
1315 }
1316
1317
1318
1319
1320
1321
1322
1323 if (status & (ATA_ERR | ATA_DF)) {
1324 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1325 printf("DRQ=1 with device error, "
1326 "dev_stat 0x%X\n", status);
1327 qc->err_mask |= AC_ERR_HSM;
1328 ap->hsm_task_state = HSM_ST_ERR;
1329 goto fsm_start;
1330 }
1331 }
1332
1333 if (qc->tf.protocol == ATA_PROT_PIO) {
1334
1335
1336
1337
1338
1339
1340
1341 ap->hsm_task_state = HSM_ST;
1342 ata_pio_sectors(qc);
1343 } else {
1344 printf("protocol is not ATA_PROT_PIO \n");
1345 }
1346 break;
1347
1348 case HSM_ST:
1349 if ((status & ATA_DRQ) == 0) {
1350 if (status & (ATA_ERR | ATA_DF)) {
1351 qc->err_mask |= AC_ERR_DEV;
1352 } else {
1353
1354
1355
1356
1357 qc->err_mask |= AC_ERR_HSM | AC_ERR_NODEV_HINT;
1358 }
1359
1360 ap->hsm_task_state = HSM_ST_ERR;
1361 goto fsm_start;
1362 }
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373 if (status & (ATA_ERR | ATA_DF)) {
1374 qc->err_mask |= AC_ERR_DEV;
1375
1376 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1377 ata_pio_sectors(qc);
1378 status = ata_wait_idle(ap);
1379 }
1380
1381 if (status & (ATA_BUSY | ATA_DRQ))
1382 qc->err_mask |= AC_ERR_HSM;
1383
1384
1385
1386
1387
1388 ap->hsm_task_state = HSM_ST_ERR;
1389 goto fsm_start;
1390 }
1391
1392 ata_pio_sectors(qc);
1393 if (ap->hsm_task_state == HSM_ST_LAST &&
1394 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1395 status = ata_wait_idle(ap);
1396 goto fsm_start;
1397 }
1398
1399 poll_next = 1;
1400 break;
1401
1402 case HSM_ST_LAST:
1403 if (!ata_ok(status)) {
1404 qc->err_mask |= __ac_err_mask(status);
1405 ap->hsm_task_state = HSM_ST_ERR;
1406 goto fsm_start;
1407 }
1408
1409 ap->hsm_task_state = HSM_ST_IDLE;
1410
1411 ata_hsm_qc_complete(qc, in_wq);
1412
1413 poll_next = 0;
1414 break;
1415
1416 case HSM_ST_ERR:
1417
1418
1419
1420 ap->hsm_task_state = HSM_ST_IDLE;
1421
1422 ata_hsm_qc_complete(qc, in_wq);
1423
1424 poll_next = 0;
1425 break;
1426 default:
1427 poll_next = 0;
1428 }
1429
1430 return poll_next;
1431}
1432
1433static void ata_pio_sectors(struct ata_queued_cmd *qc)
1434{
1435 struct ata_port *ap;
1436 ap = pap;
1437 qc->pdata = ap->pdata;
1438
1439 ata_pio_sector(qc);
1440
1441 readb(qc->ap->ioaddr.altstatus_addr);
1442 udelay(1);
1443}
1444
1445static void ata_pio_sector(struct ata_queued_cmd *qc)
1446{
1447 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1448 struct ata_port *ap = qc->ap;
1449 unsigned int offset;
1450 unsigned char *buf;
1451 char temp_data_buf[512];
1452
1453 if (qc->curbytes == qc->nbytes - qc->sect_size)
1454 ap->hsm_task_state = HSM_ST_LAST;
1455
1456 offset = qc->curbytes;
1457
1458 switch (qc->tf.command) {
1459 case ATA_CMD_ID_ATA:
1460 buf = (unsigned char *)&ata_device.id[0];
1461 break;
1462 case ATA_CMD_PIO_READ_EXT:
1463 case ATA_CMD_PIO_READ:
1464 case ATA_CMD_PIO_WRITE_EXT:
1465 case ATA_CMD_PIO_WRITE:
1466 buf = qc->pdata + offset;
1467 break;
1468 default:
1469 buf = (unsigned char *)&temp_data_buf[0];
1470 }
1471
1472 ata_mmio_data_xfer(qc->dev, buf, qc->sect_size, do_write);
1473
1474 qc->curbytes += qc->sect_size;
1475
1476}
1477
1478static void ata_mmio_data_xfer(struct ata_device *dev, unsigned char *buf,
1479 unsigned int buflen, int do_write)
1480{
1481 struct ata_port *ap = pap;
1482 void __iomem *data_addr = ap->ioaddr.data_addr;
1483 unsigned int words = buflen >> 1;
1484 u16 *buf16 = (u16 *)buf;
1485 unsigned int i = 0;
1486
1487 udelay(100);
1488 if (do_write) {
1489 for (i = 0; i < words; i++)
1490 writew(le16_to_cpu(buf16[i]), data_addr);
1491 } else {
1492 for (i = 0; i < words; i++)
1493 buf16[i] = cpu_to_le16(readw(data_addr));
1494 }
1495
1496 if (buflen & 0x01) {
1497 __le16 align_buf[1] = { 0 };
1498 unsigned char *trailing_buf = buf + buflen - 1;
1499
1500 if (do_write) {
1501 memcpy(align_buf, trailing_buf, 1);
1502 writew(le16_to_cpu(align_buf[0]), data_addr);
1503 } else {
1504 align_buf[0] = cpu_to_le16(readw(data_addr));
1505 memcpy(trailing_buf, align_buf, 1);
1506 }
1507 }
1508}
1509
1510static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1511{
1512 struct ata_port *ap = qc->ap;
1513
1514 if (in_wq) {
1515
1516
1517
1518 qc = &ap->qcmd[qc->tag];
1519 if (qc) {
1520 if (!(qc->err_mask & AC_ERR_HSM)) {
1521 ata_irq_on(ap);
1522 ata_qc_complete(qc);
1523 } else {
1524 ata_port_freeze(ap);
1525 }
1526 }
1527 } else {
1528 if (!(qc->err_mask & AC_ERR_HSM)) {
1529 ata_qc_complete(qc);
1530 } else {
1531 ata_port_freeze(ap);
1532 }
1533 }
1534}
1535
1536static u8 ata_irq_on(struct ata_port *ap)
1537{
1538 struct ata_ioports *ioaddr = &ap->ioaddr;
1539 u8 tmp;
1540
1541 ap->ctl &= ~ATA_NIEN;
1542 ap->last_ctl = ap->ctl;
1543
1544 if (ioaddr->ctl_addr)
1545 writeb(ap->ctl, ioaddr->ctl_addr);
1546
1547 tmp = ata_wait_idle(ap);
1548
1549 return tmp;
1550}
1551
1552static unsigned int ata_tag_internal(unsigned int tag)
1553{
1554 return tag == ATA_MAX_QUEUE - 1;
1555}
1556
1557static void ata_qc_complete(struct ata_queued_cmd *qc)
1558{
1559 struct ata_device *dev = qc->dev;
1560 if (qc->err_mask)
1561 qc->flags |= ATA_QCFLAG_FAILED;
1562
1563 if (qc->flags & ATA_QCFLAG_FAILED) {
1564 if (!ata_tag_internal(qc->tag)) {
1565 fill_result_tf(qc);
1566 return;
1567 }
1568 }
1569 if (qc->flags & ATA_QCFLAG_RESULT_TF)
1570 fill_result_tf(qc);
1571
1572
1573
1574
1575 switch (qc->tf.command) {
1576 case ATA_CMD_SET_FEATURES:
1577 if (qc->tf.feature != SETFEATURES_WC_ON &&
1578 qc->tf.feature != SETFEATURES_WC_OFF)
1579 break;
1580 case ATA_CMD_INIT_DEV_PARAMS:
1581 case ATA_CMD_SET_MULTI:
1582 break;
1583
1584 case ATA_CMD_SLEEP:
1585 dev->flags |= ATA_DFLAG_SLEEPING;
1586 break;
1587 }
1588
1589 __ata_qc_complete(qc);
1590}
1591
1592static void fill_result_tf(struct ata_queued_cmd *qc)
1593{
1594 struct ata_port *ap = qc->ap;
1595
1596 qc->result_tf.flags = qc->tf.flags;
1597 ata_tf_read(ap, &qc->result_tf);
1598}
1599
1600static void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
1601{
1602 struct ata_ioports *ioaddr = &ap->ioaddr;
1603
1604 tf->command = ata_check_status(ap);
1605 tf->feature = readb(ioaddr->error_addr);
1606 tf->nsect = readb(ioaddr->nsect_addr);
1607 tf->lbal = readb(ioaddr->lbal_addr);
1608 tf->lbam = readb(ioaddr->lbam_addr);
1609 tf->lbah = readb(ioaddr->lbah_addr);
1610 tf->device = readb(ioaddr->device_addr);
1611
1612 if (tf->flags & ATA_TFLAG_LBA48) {
1613 if (ioaddr->ctl_addr) {
1614 writeb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
1615
1616 tf->hob_feature = readb(ioaddr->error_addr);
1617 tf->hob_nsect = readb(ioaddr->nsect_addr);
1618 tf->hob_lbal = readb(ioaddr->lbal_addr);
1619 tf->hob_lbam = readb(ioaddr->lbam_addr);
1620 tf->hob_lbah = readb(ioaddr->lbah_addr);
1621
1622 writeb(tf->ctl, ioaddr->ctl_addr);
1623 ap->last_ctl = tf->ctl;
1624 } else {
1625 printf("sata_dwc warnning register read.\n");
1626 }
1627 }
1628}
1629
1630static void __ata_qc_complete(struct ata_queued_cmd *qc)
1631{
1632 struct ata_port *ap = qc->ap;
1633 struct ata_link *link = qc->dev->link;
1634
1635 link->active_tag = ATA_TAG_POISON;
1636 ap->nr_active_links--;
1637
1638 if (qc->flags & ATA_QCFLAG_CLEAR_EXCL && ap->excl_link == link)
1639 ap->excl_link = NULL;
1640
1641 qc->flags &= ~ATA_QCFLAG_ACTIVE;
1642 ap->qc_active &= ~(1 << qc->tag);
1643}
1644
1645static void ata_qc_free(struct ata_queued_cmd *qc)
1646{
1647 struct ata_port *ap = qc->ap;
1648 unsigned int tag;
1649 qc->flags = 0;
1650 tag = qc->tag;
1651 if (tag < ATA_MAX_QUEUE) {
1652 qc->tag = ATA_TAG_POISON;
1653 clear_bit(tag, &ap->qc_allocated);
1654 }
1655}
1656
1657static int check_sata_dev_state(void)
1658{
1659 unsigned long datalen;
1660 unsigned char *pdata;
1661 int ret = 0;
1662 int i = 0;
1663 char temp_data_buf[512];
1664
1665 while (1) {
1666 udelay(10000);
1667
1668 pdata = (unsigned char*)&temp_data_buf[0];
1669 datalen = 512;
1670
1671 ret = ata_dev_read_sectors(pdata, datalen, 0, 1);
1672
1673 if (ret == TRUE)
1674 break;
1675
1676 i++;
1677 if (i > (ATA_RESET_TIME * 100)) {
1678 printf("** TimeOUT **\n");
1679 dev_state = SATA_NODEVICE;
1680 return FALSE;
1681 }
1682
1683 if ((i >= 100) && ((i % 100) == 0))
1684 printf(".");
1685 }
1686
1687 dev_state = SATA_READY;
1688
1689 return TRUE;
1690}
1691
1692static unsigned int ata_dev_set_feature(struct ata_device *dev,
1693 u8 enable, u8 feature)
1694{
1695 struct ata_taskfile tf;
1696 struct ata_port *ap;
1697 ap = pap;
1698 unsigned int err_mask;
1699
1700 memset(&tf, 0, sizeof(tf));
1701 tf.ctl = ap->ctl;
1702
1703 tf.device = ATA_DEVICE_OBS;
1704 tf.command = ATA_CMD_SET_FEATURES;
1705 tf.feature = enable;
1706 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1707 tf.protocol = ATA_PROT_NODATA;
1708 tf.nsect = feature;
1709
1710 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, 0, 0);
1711
1712 return err_mask;
1713}
1714
1715static unsigned int ata_dev_init_params(struct ata_device *dev,
1716 u16 heads, u16 sectors)
1717{
1718 struct ata_taskfile tf;
1719 struct ata_port *ap;
1720 ap = pap;
1721 unsigned int err_mask;
1722
1723 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
1724 return AC_ERR_INVALID;
1725
1726 memset(&tf, 0, sizeof(tf));
1727 tf.ctl = ap->ctl;
1728 tf.device = ATA_DEVICE_OBS;
1729 tf.command = ATA_CMD_INIT_DEV_PARAMS;
1730 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1731 tf.protocol = ATA_PROT_NODATA;
1732 tf.nsect = sectors;
1733 tf.device |= (heads - 1) & 0x0f;
1734
1735 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, 0, 0);
1736
1737 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1738 err_mask = 0;
1739
1740 return err_mask;
1741}
1742
1743#if defined(CONFIG_SATA_DWC) && !defined(CONFIG_LBA48)
1744#define SATA_MAX_READ_BLK 0xFF
1745#else
1746#define SATA_MAX_READ_BLK 0xFFFF
1747#endif
1748
1749ulong sata_read(int device, ulong blknr, lbaint_t blkcnt, void *buffer)
1750{
1751 ulong start,blks, buf_addr;
1752 unsigned short smallblks;
1753 unsigned long datalen;
1754 unsigned char *pdata;
1755 device &= 0xff;
1756
1757 u32 block = 0;
1758 u32 n_block = 0;
1759
1760 if (dev_state != SATA_READY)
1761 return 0;
1762
1763 buf_addr = (unsigned long)buffer;
1764 start = blknr;
1765 blks = blkcnt;
1766 do {
1767 pdata = (unsigned char *)buf_addr;
1768 if (blks > SATA_MAX_READ_BLK) {
1769 datalen = sata_dev_desc[device].blksz * SATA_MAX_READ_BLK;
1770 smallblks = SATA_MAX_READ_BLK;
1771
1772 block = (u32)start;
1773 n_block = (u32)smallblks;
1774
1775 start += SATA_MAX_READ_BLK;
1776 blks -= SATA_MAX_READ_BLK;
1777 } else {
1778 datalen = sata_dev_desc[device].blksz * SATA_MAX_READ_BLK;
1779 datalen = sata_dev_desc[device].blksz * blks;
1780 smallblks = (unsigned short)blks;
1781
1782 block = (u32)start;
1783 n_block = (u32)smallblks;
1784
1785 start += blks;
1786 blks = 0;
1787 }
1788
1789 if (ata_dev_read_sectors(pdata, datalen, block, n_block) != TRUE) {
1790 printf("sata_dwc : Hard disk read error.\n");
1791 blkcnt -= blks;
1792 break;
1793 }
1794 buf_addr += datalen;
1795 } while (blks != 0);
1796
1797 return (blkcnt);
1798}
1799
1800static int ata_dev_read_sectors(unsigned char *pdata, unsigned long datalen,
1801 u32 block, u32 n_block)
1802{
1803 struct ata_port *ap = pap;
1804 struct ata_device *dev = &ata_device;
1805 struct ata_taskfile tf;
1806 unsigned int class = ATA_DEV_ATA;
1807 unsigned int err_mask = 0;
1808 const char *reason;
1809 int may_fallback = 1;
1810 int rc;
1811
1812 if (dev_state == SATA_ERROR)
1813 return FALSE;
1814
1815 ata_dev_select(ap, dev->devno, 1, 1);
1816
1817retry:
1818 memset(&tf, 0, sizeof(tf));
1819 tf.ctl = ap->ctl;
1820 ap->print_id = 1;
1821 ap->flags &= ~ATA_FLAG_DISABLED;
1822
1823 ap->pdata = pdata;
1824
1825 tf.device = ATA_DEVICE_OBS;
1826
1827 temp_n_block = n_block;
1828
1829#ifdef CONFIG_LBA48
1830 tf.command = ATA_CMD_PIO_READ_EXT;
1831 tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1832
1833 tf.hob_feature = 31;
1834 tf.feature = 31;
1835 tf.hob_nsect = (n_block >> 8) & 0xff;
1836 tf.nsect = n_block & 0xff;
1837
1838 tf.hob_lbah = 0x0;
1839 tf.hob_lbam = 0x0;
1840 tf.hob_lbal = (block >> 24) & 0xff;
1841 tf.lbah = (block >> 16) & 0xff;
1842 tf.lbam = (block >> 8) & 0xff;
1843 tf.lbal = block & 0xff;
1844
1845 tf.device = 1 << 6;
1846 if (tf.flags & ATA_TFLAG_FUA)
1847 tf.device |= 1 << 7;
1848#else
1849 tf.command = ATA_CMD_PIO_READ;
1850 tf.flags |= ATA_TFLAG_LBA ;
1851
1852 tf.feature = 31;
1853 tf.nsect = n_block & 0xff;
1854
1855 tf.lbah = (block >> 16) & 0xff;
1856 tf.lbam = (block >> 8) & 0xff;
1857 tf.lbal = block & 0xff;
1858
1859 tf.device = (block >> 24) & 0xf;
1860
1861 tf.device |= 1 << 6;
1862 if (tf.flags & ATA_TFLAG_FUA)
1863 tf.device |= 1 << 7;
1864
1865#endif
1866
1867 tf.protocol = ATA_PROT_PIO;
1868
1869
1870
1871
1872 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1873 tf.flags |= ATA_TFLAG_POLLING;
1874
1875 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 0, 0);
1876
1877 if (err_mask) {
1878 if (err_mask & AC_ERR_NODEV_HINT) {
1879 printf("READ_SECTORS NODEV after polling detection\n");
1880 return -ENOENT;
1881 }
1882
1883 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1884
1885
1886
1887
1888
1889 if (may_fallback) {
1890 may_fallback = 0;
1891
1892 if (class == ATA_DEV_ATA) {
1893 class = ATA_DEV_ATAPI;
1894 } else {
1895 class = ATA_DEV_ATA;
1896 }
1897 goto retry;
1898 }
1899
1900
1901
1902
1903 printf("both IDENTIFYs aborted, assuming NODEV\n");
1904 return -ENOENT;
1905 }
1906
1907 rc = -EIO;
1908 reason = "I/O error";
1909 goto err_out;
1910 }
1911
1912
1913
1914
1915 may_fallback = 0;
1916
1917 rc = -EINVAL;
1918 reason = "device reports invalid type";
1919
1920 return TRUE;
1921
1922err_out:
1923 printf("failed to READ SECTORS (%s, err_mask=0x%x)\n", reason, err_mask);
1924 return FALSE;
1925}
1926
1927#if defined(CONFIG_SATA_DWC) && !defined(CONFIG_LBA48)
1928#define SATA_MAX_WRITE_BLK 0xFF
1929#else
1930#define SATA_MAX_WRITE_BLK 0xFFFF
1931#endif
1932
1933ulong sata_write(int device, ulong blknr, lbaint_t blkcnt, void *buffer)
1934{
1935 ulong start,blks, buf_addr;
1936 unsigned short smallblks;
1937 unsigned long datalen;
1938 unsigned char *pdata;
1939 device &= 0xff;
1940
1941
1942 u32 block = 0;
1943 u32 n_block = 0;
1944
1945 if (dev_state != SATA_READY)
1946 return 0;
1947
1948 buf_addr = (unsigned long)buffer;
1949 start = blknr;
1950 blks = blkcnt;
1951 do {
1952 pdata = (unsigned char *)buf_addr;
1953 if (blks > SATA_MAX_WRITE_BLK) {
1954 datalen = sata_dev_desc[device].blksz * SATA_MAX_WRITE_BLK;
1955 smallblks = SATA_MAX_WRITE_BLK;
1956
1957 block = (u32)start;
1958 n_block = (u32)smallblks;
1959
1960 start += SATA_MAX_WRITE_BLK;
1961 blks -= SATA_MAX_WRITE_BLK;
1962 } else {
1963 datalen = sata_dev_desc[device].blksz * blks;
1964 smallblks = (unsigned short)blks;
1965
1966 block = (u32)start;
1967 n_block = (u32)smallblks;
1968
1969 start += blks;
1970 blks = 0;
1971 }
1972
1973 if (ata_dev_write_sectors(pdata, datalen, block, n_block) != TRUE) {
1974 printf("sata_dwc : Hard disk read error.\n");
1975 blkcnt -= blks;
1976 break;
1977 }
1978 buf_addr += datalen;
1979 } while (blks != 0);
1980
1981 return (blkcnt);
1982}
1983
1984static int ata_dev_write_sectors(unsigned char* pdata, unsigned long datalen,
1985 u32 block, u32 n_block)
1986{
1987 struct ata_port *ap = pap;
1988 struct ata_device *dev = &ata_device;
1989 struct ata_taskfile tf;
1990 unsigned int class = ATA_DEV_ATA;
1991 unsigned int err_mask = 0;
1992 const char *reason;
1993 int may_fallback = 1;
1994 int rc;
1995
1996 if (dev_state == SATA_ERROR)
1997 return FALSE;
1998
1999 ata_dev_select(ap, dev->devno, 1, 1);
2000
2001retry:
2002 memset(&tf, 0, sizeof(tf));
2003 tf.ctl = ap->ctl;
2004 ap->print_id = 1;
2005 ap->flags &= ~ATA_FLAG_DISABLED;
2006
2007 ap->pdata = pdata;
2008
2009 tf.device = ATA_DEVICE_OBS;
2010
2011 temp_n_block = n_block;
2012
2013
2014#ifdef CONFIG_LBA48
2015 tf.command = ATA_CMD_PIO_WRITE_EXT;
2016 tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48 | ATA_TFLAG_WRITE;
2017
2018 tf.hob_feature = 31;
2019 tf.feature = 31;
2020 tf.hob_nsect = (n_block >> 8) & 0xff;
2021 tf.nsect = n_block & 0xff;
2022
2023 tf.hob_lbah = 0x0;
2024 tf.hob_lbam = 0x0;
2025 tf.hob_lbal = (block >> 24) & 0xff;
2026 tf.lbah = (block >> 16) & 0xff;
2027 tf.lbam = (block >> 8) & 0xff;
2028 tf.lbal = block & 0xff;
2029
2030 tf.device = 1 << 6;
2031 if (tf.flags & ATA_TFLAG_FUA)
2032 tf.device |= 1 << 7;
2033#else
2034 tf.command = ATA_CMD_PIO_WRITE;
2035 tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_WRITE;
2036
2037 tf.feature = 31;
2038 tf.nsect = n_block & 0xff;
2039
2040 tf.lbah = (block >> 16) & 0xff;
2041 tf.lbam = (block >> 8) & 0xff;
2042 tf.lbal = block & 0xff;
2043
2044 tf.device = (block >> 24) & 0xf;
2045
2046 tf.device |= 1 << 6;
2047 if (tf.flags & ATA_TFLAG_FUA)
2048 tf.device |= 1 << 7;
2049
2050#endif
2051
2052 tf.protocol = ATA_PROT_PIO;
2053
2054
2055
2056
2057 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2058 tf.flags |= ATA_TFLAG_POLLING;
2059
2060 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 0, 0);
2061
2062 if (err_mask) {
2063 if (err_mask & AC_ERR_NODEV_HINT) {
2064 printf("READ_SECTORS NODEV after polling detection\n");
2065 return -ENOENT;
2066 }
2067
2068 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2069
2070
2071
2072
2073
2074 if (may_fallback) {
2075 may_fallback = 0;
2076
2077 if (class == ATA_DEV_ATA) {
2078 class = ATA_DEV_ATAPI;
2079 } else {
2080 class = ATA_DEV_ATA;
2081 }
2082 goto retry;
2083 }
2084
2085
2086
2087
2088 printf("both IDENTIFYs aborted, assuming NODEV\n");
2089 return -ENOENT;
2090 }
2091
2092 rc = -EIO;
2093 reason = "I/O error";
2094 goto err_out;
2095 }
2096
2097
2098
2099
2100 may_fallback = 0;
2101
2102 rc = -EINVAL;
2103 reason = "device reports invalid type";
2104
2105 return TRUE;
2106
2107err_out:
2108 printf("failed to WRITE SECTORS (%s, err_mask=0x%x)\n", reason, err_mask);
2109 return FALSE;
2110}
2111