1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/kernel.h>
36#include <linux/pci.h>
37#include <linux/libata.h>
38#include <linux/highmem.h>
39
40#include "libata.h"
41
42const struct ata_port_operations ata_sff_port_ops = {
43 .inherits = &ata_base_port_ops,
44
45 .qc_prep = ata_sff_qc_prep,
46 .qc_issue = ata_sff_qc_issue,
47 .qc_fill_rtf = ata_sff_qc_fill_rtf,
48
49 .freeze = ata_sff_freeze,
50 .thaw = ata_sff_thaw,
51 .prereset = ata_sff_prereset,
52 .softreset = ata_sff_softreset,
53 .hardreset = sata_sff_hardreset,
54 .postreset = ata_sff_postreset,
55 .drain_fifo = ata_sff_drain_fifo,
56 .error_handler = ata_sff_error_handler,
57 .post_internal_cmd = ata_sff_post_internal_cmd,
58
59 .sff_dev_select = ata_sff_dev_select,
60 .sff_check_status = ata_sff_check_status,
61 .sff_tf_load = ata_sff_tf_load,
62 .sff_tf_read = ata_sff_tf_read,
63 .sff_exec_command = ata_sff_exec_command,
64 .sff_data_xfer = ata_sff_data_xfer,
65 .sff_irq_on = ata_sff_irq_on,
66 .sff_irq_clear = ata_sff_irq_clear,
67
68 .lost_interrupt = ata_sff_lost_interrupt,
69
70 .port_start = ata_sff_port_start,
71};
72EXPORT_SYMBOL_GPL(ata_sff_port_ops);
73
74const struct ata_port_operations ata_bmdma_port_ops = {
75 .inherits = &ata_sff_port_ops,
76
77 .mode_filter = ata_bmdma_mode_filter,
78
79 .bmdma_setup = ata_bmdma_setup,
80 .bmdma_start = ata_bmdma_start,
81 .bmdma_stop = ata_bmdma_stop,
82 .bmdma_status = ata_bmdma_status,
83};
84EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
85
86const struct ata_port_operations ata_bmdma32_port_ops = {
87 .inherits = &ata_bmdma_port_ops,
88
89 .sff_data_xfer = ata_sff_data_xfer32,
90 .port_start = ata_sff_port_start32,
91};
92EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
93
94
95
96
97
98
99
100
101
102
103
104
105static void ata_fill_sg(struct ata_queued_cmd *qc)
106{
107 struct ata_port *ap = qc->ap;
108 struct scatterlist *sg;
109 unsigned int si, pi;
110
111 pi = 0;
112 for_each_sg(qc->sg, sg, qc->n_elem, si) {
113 u32 addr, offset;
114 u32 sg_len, len;
115
116
117
118
119
120 addr = (u32) sg_dma_address(sg);
121 sg_len = sg_dma_len(sg);
122
123 while (sg_len) {
124 offset = addr & 0xffff;
125 len = sg_len;
126 if ((offset + sg_len) > 0x10000)
127 len = 0x10000 - offset;
128
129 ap->prd[pi].addr = cpu_to_le32(addr);
130 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
131 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
132
133 pi++;
134 sg_len -= len;
135 addr += len;
136 }
137 }
138
139 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
140}
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
156{
157 struct ata_port *ap = qc->ap;
158 struct scatterlist *sg;
159 unsigned int si, pi;
160
161 pi = 0;
162 for_each_sg(qc->sg, sg, qc->n_elem, si) {
163 u32 addr, offset;
164 u32 sg_len, len, blen;
165
166
167
168
169
170 addr = (u32) sg_dma_address(sg);
171 sg_len = sg_dma_len(sg);
172
173 while (sg_len) {
174 offset = addr & 0xffff;
175 len = sg_len;
176 if ((offset + sg_len) > 0x10000)
177 len = 0x10000 - offset;
178
179 blen = len & 0xffff;
180 ap->prd[pi].addr = cpu_to_le32(addr);
181 if (blen == 0) {
182
183
184
185 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
186 blen = 0x8000;
187 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
188 }
189 ap->prd[pi].flags_len = cpu_to_le32(blen);
190 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
191
192 pi++;
193 sg_len -= len;
194 addr += len;
195 }
196 }
197
198 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
199}
200
201
202
203
204
205
206
207
208
209
210void ata_sff_qc_prep(struct ata_queued_cmd *qc)
211{
212 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
213 return;
214
215 ata_fill_sg(qc);
216}
217EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
218
219
220
221
222
223
224
225
226
227
228void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
229{
230 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
231 return;
232
233 ata_fill_sg_dumb(qc);
234}
235EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
236
237
238
239
240
241
242
243
244
245
246
247
248u8 ata_sff_check_status(struct ata_port *ap)
249{
250 return ioread8(ap->ioaddr.status_addr);
251}
252EXPORT_SYMBOL_GPL(ata_sff_check_status);
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267static u8 ata_sff_altstatus(struct ata_port *ap)
268{
269 if (ap->ops->sff_check_altstatus)
270 return ap->ops->sff_check_altstatus(ap);
271
272 return ioread8(ap->ioaddr.altstatus_addr);
273}
274
275
276
277
278
279
280
281
282
283
284
285
286
287static u8 ata_sff_irq_status(struct ata_port *ap)
288{
289 u8 status;
290
291 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
292 status = ata_sff_altstatus(ap);
293
294 if (status & ATA_BUSY)
295 return status;
296 }
297
298 status = ap->ops->sff_check_status(ap);
299 return status;
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314static void ata_sff_sync(struct ata_port *ap)
315{
316 if (ap->ops->sff_check_altstatus)
317 ap->ops->sff_check_altstatus(ap);
318 else if (ap->ioaddr.altstatus_addr)
319 ioread8(ap->ioaddr.altstatus_addr);
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334void ata_sff_pause(struct ata_port *ap)
335{
336 ata_sff_sync(ap);
337 ndelay(400);
338}
339EXPORT_SYMBOL_GPL(ata_sff_pause);
340
341
342
343
344
345
346
347
348
349void ata_sff_dma_pause(struct ata_port *ap)
350{
351 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
352
353
354 ata_sff_altstatus(ap);
355 return;
356 }
357
358
359
360 BUG();
361}
362EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379int ata_sff_busy_sleep(struct ata_port *ap,
380 unsigned long tmout_pat, unsigned long tmout)
381{
382 unsigned long timer_start, timeout;
383 u8 status;
384
385 status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
386 timer_start = jiffies;
387 timeout = ata_deadline(timer_start, tmout_pat);
388 while (status != 0xff && (status & ATA_BUSY) &&
389 time_before(jiffies, timeout)) {
390 msleep(50);
391 status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
392 }
393
394 if (status != 0xff && (status & ATA_BUSY))
395 ata_port_printk(ap, KERN_WARNING,
396 "port is slow to respond, please be patient "
397 "(Status 0x%x)\n", status);
398
399 timeout = ata_deadline(timer_start, tmout);
400 while (status != 0xff && (status & ATA_BUSY) &&
401 time_before(jiffies, timeout)) {
402 msleep(50);
403 status = ap->ops->sff_check_status(ap);
404 }
405
406 if (status == 0xff)
407 return -ENODEV;
408
409 if (status & ATA_BUSY) {
410 ata_port_printk(ap, KERN_ERR, "port failed to respond "
411 "(%lu secs, Status 0x%x)\n",
412 DIV_ROUND_UP(tmout, 1000), status);
413 return -EBUSY;
414 }
415
416 return 0;
417}
418EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
419
420static int ata_sff_check_ready(struct ata_link *link)
421{
422 u8 status = link->ap->ops->sff_check_status(link->ap);
423
424 return ata_check_ready(status);
425}
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
442{
443 return ata_wait_ready(link, deadline, ata_sff_check_ready);
444}
445EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
462{
463 u8 tmp;
464
465 if (device == 0)
466 tmp = ATA_DEVICE_OBS;
467 else
468 tmp = ATA_DEVICE_OBS | ATA_DEV1;
469
470 iowrite8(tmp, ap->ioaddr.device_addr);
471 ata_sff_pause(ap);
472}
473EXPORT_SYMBOL_GPL(ata_sff_dev_select);
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493void ata_dev_select(struct ata_port *ap, unsigned int device,
494 unsigned int wait, unsigned int can_sleep)
495{
496 if (ata_msg_probe(ap))
497 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
498 "device %u, wait %u\n", device, wait);
499
500 if (wait)
501 ata_wait_idle(ap);
502
503 ap->ops->sff_dev_select(ap, device);
504
505 if (wait) {
506 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
507 msleep(150);
508 ata_wait_idle(ap);
509 }
510}
511
512
513
514
515
516
517
518
519
520
521
522u8 ata_sff_irq_on(struct ata_port *ap)
523{
524 struct ata_ioports *ioaddr = &ap->ioaddr;
525 u8 tmp;
526
527 ap->ctl &= ~ATA_NIEN;
528 ap->last_ctl = ap->ctl;
529
530 if (ioaddr->ctl_addr)
531 iowrite8(ap->ctl, ioaddr->ctl_addr);
532 tmp = ata_wait_idle(ap);
533
534 ap->ops->sff_irq_clear(ap);
535
536 return tmp;
537}
538EXPORT_SYMBOL_GPL(ata_sff_irq_on);
539
540
541
542
543
544
545
546
547
548
549
550
551void ata_sff_irq_clear(struct ata_port *ap)
552{
553 void __iomem *mmio = ap->ioaddr.bmdma_addr;
554
555 if (!mmio)
556 return;
557
558 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
559}
560EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
561
562
563
564
565
566
567
568
569
570
571
572void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
573{
574 struct ata_ioports *ioaddr = &ap->ioaddr;
575 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
576
577 if (tf->ctl != ap->last_ctl) {
578 if (ioaddr->ctl_addr)
579 iowrite8(tf->ctl, ioaddr->ctl_addr);
580 ap->last_ctl = tf->ctl;
581 ata_wait_idle(ap);
582 }
583
584 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
585 WARN_ON_ONCE(!ioaddr->ctl_addr);
586 iowrite8(tf->hob_feature, ioaddr->feature_addr);
587 iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
588 iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
589 iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
590 iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
591 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
592 tf->hob_feature,
593 tf->hob_nsect,
594 tf->hob_lbal,
595 tf->hob_lbam,
596 tf->hob_lbah);
597 }
598
599 if (is_addr) {
600 iowrite8(tf->feature, ioaddr->feature_addr);
601 iowrite8(tf->nsect, ioaddr->nsect_addr);
602 iowrite8(tf->lbal, ioaddr->lbal_addr);
603 iowrite8(tf->lbam, ioaddr->lbam_addr);
604 iowrite8(tf->lbah, ioaddr->lbah_addr);
605 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
606 tf->feature,
607 tf->nsect,
608 tf->lbal,
609 tf->lbam,
610 tf->lbah);
611 }
612
613 if (tf->flags & ATA_TFLAG_DEVICE) {
614 iowrite8(tf->device, ioaddr->device_addr);
615 VPRINTK("device 0x%X\n", tf->device);
616 }
617
618 ata_wait_idle(ap);
619}
620EXPORT_SYMBOL_GPL(ata_sff_tf_load);
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
636{
637 struct ata_ioports *ioaddr = &ap->ioaddr;
638
639 tf->command = ata_sff_check_status(ap);
640 tf->feature = ioread8(ioaddr->error_addr);
641 tf->nsect = ioread8(ioaddr->nsect_addr);
642 tf->lbal = ioread8(ioaddr->lbal_addr);
643 tf->lbam = ioread8(ioaddr->lbam_addr);
644 tf->lbah = ioread8(ioaddr->lbah_addr);
645 tf->device = ioread8(ioaddr->device_addr);
646
647 if (tf->flags & ATA_TFLAG_LBA48) {
648 if (likely(ioaddr->ctl_addr)) {
649 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
650 tf->hob_feature = ioread8(ioaddr->error_addr);
651 tf->hob_nsect = ioread8(ioaddr->nsect_addr);
652 tf->hob_lbal = ioread8(ioaddr->lbal_addr);
653 tf->hob_lbam = ioread8(ioaddr->lbam_addr);
654 tf->hob_lbah = ioread8(ioaddr->lbah_addr);
655 iowrite8(tf->ctl, ioaddr->ctl_addr);
656 ap->last_ctl = tf->ctl;
657 } else
658 WARN_ON_ONCE(1);
659 }
660}
661EXPORT_SYMBOL_GPL(ata_sff_tf_read);
662
663
664
665
666
667
668
669
670
671
672
673
674void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
675{
676 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
677
678 iowrite8(tf->command, ap->ioaddr.command_addr);
679 ata_sff_pause(ap);
680}
681EXPORT_SYMBOL_GPL(ata_sff_exec_command);
682
683
684
685
686
687
688
689
690
691
692
693
694
695static inline void ata_tf_to_host(struct ata_port *ap,
696 const struct ata_taskfile *tf)
697{
698 ap->ops->sff_tf_load(ap, tf);
699 ap->ops->sff_exec_command(ap, tf);
700}
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
718 unsigned int buflen, int rw)
719{
720 struct ata_port *ap = dev->link->ap;
721 void __iomem *data_addr = ap->ioaddr.data_addr;
722 unsigned int words = buflen >> 1;
723
724
725 if (rw == READ)
726 ioread16_rep(data_addr, buf, words);
727 else
728 iowrite16_rep(data_addr, buf, words);
729
730
731 if (unlikely(buflen & 0x01)) {
732 unsigned char pad[2];
733
734
735 buf += buflen - 1;
736
737
738
739
740
741 if (rw == READ) {
742 ioread16_rep(data_addr, pad, 1);
743 *buf = pad[0];
744 } else {
745 pad[0] = *buf;
746 iowrite16_rep(data_addr, pad, 1);
747 }
748 words++;
749 }
750
751 return words << 1;
752}
753EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
773 unsigned int buflen, int rw)
774{
775 struct ata_port *ap = dev->link->ap;
776 void __iomem *data_addr = ap->ioaddr.data_addr;
777 unsigned int words = buflen >> 2;
778 int slop = buflen & 3;
779
780 if (!(ap->pflags & ATA_PFLAG_PIO32))
781 return ata_sff_data_xfer(dev, buf, buflen, rw);
782
783
784 if (rw == READ)
785 ioread32_rep(data_addr, buf, words);
786 else
787 iowrite32_rep(data_addr, buf, words);
788
789
790 if (unlikely(slop)) {
791 unsigned char pad[4];
792
793
794 buf += buflen - slop;
795
796
797
798
799
800 if (rw == READ) {
801 if (slop < 3)
802 ioread16_rep(data_addr, pad, 1);
803 else
804 ioread32_rep(data_addr, pad, 1);
805 memcpy(buf, pad, slop);
806 } else {
807 memcpy(pad, buf, slop);
808 if (slop < 3)
809 iowrite16_rep(data_addr, pad, 1);
810 else
811 iowrite32_rep(data_addr, pad, 1);
812 }
813 }
814 return (buflen + 1) & ~1;
815}
816EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
835 unsigned int buflen, int rw)
836{
837 unsigned long flags;
838 unsigned int consumed;
839
840 local_irq_save(flags);
841 consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
842 local_irq_restore(flags);
843
844 return consumed;
845}
846EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
847
848
849
850
851
852
853
854
855
856
857static void ata_pio_sector(struct ata_queued_cmd *qc)
858{
859 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
860 struct ata_port *ap = qc->ap;
861 struct page *page;
862 unsigned int offset;
863 unsigned char *buf;
864
865 if (qc->curbytes == qc->nbytes - qc->sect_size)
866 ap->hsm_task_state = HSM_ST_LAST;
867
868 page = sg_page(qc->cursg);
869 offset = qc->cursg->offset + qc->cursg_ofs;
870
871
872 page = nth_page(page, (offset >> PAGE_SHIFT));
873 offset %= PAGE_SIZE;
874
875 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
876
877 if (PageHighMem(page)) {
878 unsigned long flags;
879
880
881 local_irq_save(flags);
882 buf = kmap_atomic(page, KM_IRQ0);
883
884
885 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
886 do_write);
887
888 kunmap_atomic(buf, KM_IRQ0);
889 local_irq_restore(flags);
890 } else {
891 buf = page_address(page);
892 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
893 do_write);
894 }
895
896 qc->curbytes += qc->sect_size;
897 qc->cursg_ofs += qc->sect_size;
898
899 if (qc->cursg_ofs == qc->cursg->length) {
900 qc->cursg = sg_next(qc->cursg);
901 qc->cursg_ofs = 0;
902 }
903}
904
905
906
907
908
909
910
911
912
913
914
915static void ata_pio_sectors(struct ata_queued_cmd *qc)
916{
917 if (is_multi_taskfile(&qc->tf)) {
918
919 unsigned int nsect;
920
921 WARN_ON_ONCE(qc->dev->multi_count == 0);
922
923 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
924 qc->dev->multi_count);
925 while (nsect--)
926 ata_pio_sector(qc);
927 } else
928 ata_pio_sector(qc);
929
930 ata_sff_sync(qc->ap);
931}
932
933
934
935
936
937
938
939
940
941
942
943
944static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
945{
946
947 DPRINTK("send cdb\n");
948 WARN_ON_ONCE(qc->dev->cdb_len < 12);
949
950 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
951 ata_sff_sync(ap);
952
953
954 switch (qc->tf.protocol) {
955 case ATAPI_PROT_PIO:
956 ap->hsm_task_state = HSM_ST;
957 break;
958 case ATAPI_PROT_NODATA:
959 ap->hsm_task_state = HSM_ST_LAST;
960 break;
961 case ATAPI_PROT_DMA:
962 ap->hsm_task_state = HSM_ST_LAST;
963
964 ap->ops->bmdma_start(qc);
965 break;
966 }
967}
968
969
970
971
972
973
974
975
976
977
978
979
980static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
981{
982 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
983 struct ata_port *ap = qc->ap;
984 struct ata_device *dev = qc->dev;
985 struct ata_eh_info *ehi = &dev->link->eh_info;
986 struct scatterlist *sg;
987 struct page *page;
988 unsigned char *buf;
989 unsigned int offset, count, consumed;
990
991next_sg:
992 sg = qc->cursg;
993 if (unlikely(!sg)) {
994 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
995 "buf=%u cur=%u bytes=%u",
996 qc->nbytes, qc->curbytes, bytes);
997 return -1;
998 }
999
1000 page = sg_page(sg);
1001 offset = sg->offset + qc->cursg_ofs;
1002
1003
1004 page = nth_page(page, (offset >> PAGE_SHIFT));
1005 offset %= PAGE_SIZE;
1006
1007
1008 count = min(sg->length - qc->cursg_ofs, bytes);
1009
1010
1011 count = min(count, (unsigned int)PAGE_SIZE - offset);
1012
1013 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
1014
1015 if (PageHighMem(page)) {
1016 unsigned long flags;
1017
1018
1019 local_irq_save(flags);
1020 buf = kmap_atomic(page, KM_IRQ0);
1021
1022
1023 consumed = ap->ops->sff_data_xfer(dev, buf + offset,
1024 count, rw);
1025
1026 kunmap_atomic(buf, KM_IRQ0);
1027 local_irq_restore(flags);
1028 } else {
1029 buf = page_address(page);
1030 consumed = ap->ops->sff_data_xfer(dev, buf + offset,
1031 count, rw);
1032 }
1033
1034 bytes -= min(bytes, consumed);
1035 qc->curbytes += count;
1036 qc->cursg_ofs += count;
1037
1038 if (qc->cursg_ofs == sg->length) {
1039 qc->cursg = sg_next(qc->cursg);
1040 qc->cursg_ofs = 0;
1041 }
1042
1043
1044
1045
1046
1047
1048
1049 if (bytes)
1050 goto next_sg;
1051 return 0;
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063static void atapi_pio_bytes(struct ata_queued_cmd *qc)
1064{
1065 struct ata_port *ap = qc->ap;
1066 struct ata_device *dev = qc->dev;
1067 struct ata_eh_info *ehi = &dev->link->eh_info;
1068 unsigned int ireason, bc_lo, bc_hi, bytes;
1069 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
1070
1071
1072
1073
1074
1075
1076
1077 ap->ops->sff_tf_read(ap, &qc->result_tf);
1078 ireason = qc->result_tf.nsect;
1079 bc_lo = qc->result_tf.lbam;
1080 bc_hi = qc->result_tf.lbah;
1081 bytes = (bc_hi << 8) | bc_lo;
1082
1083
1084 if (unlikely(ireason & (1 << 0)))
1085 goto atapi_check;
1086
1087
1088 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
1089 if (unlikely(do_write != i_write))
1090 goto atapi_check;
1091
1092 if (unlikely(!bytes))
1093 goto atapi_check;
1094
1095 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
1096
1097 if (unlikely(__atapi_pio_bytes(qc, bytes)))
1098 goto err_out;
1099 ata_sff_sync(ap);
1100
1101 return;
1102
1103 atapi_check:
1104 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
1105 ireason, bytes);
1106 err_out:
1107 qc->err_mask |= AC_ERR_HSM;
1108 ap->hsm_task_state = HSM_ST_ERR;
1109}
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
1120 struct ata_queued_cmd *qc)
1121{
1122 if (qc->tf.flags & ATA_TFLAG_POLLING)
1123 return 1;
1124
1125 if (ap->hsm_task_state == HSM_ST_FIRST) {
1126 if (qc->tf.protocol == ATA_PROT_PIO &&
1127 (qc->tf.flags & ATA_TFLAG_WRITE))
1128 return 1;
1129
1130 if (ata_is_atapi(qc->tf.protocol) &&
1131 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1132 return 1;
1133 }
1134
1135 return 0;
1136}
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1150{
1151 struct ata_port *ap = qc->ap;
1152 unsigned long flags;
1153
1154 if (ap->ops->error_handler) {
1155 if (in_wq) {
1156 spin_lock_irqsave(ap->lock, flags);
1157
1158
1159
1160
1161 qc = ata_qc_from_tag(ap, qc->tag);
1162 if (qc) {
1163 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
1164 ap->ops->sff_irq_on(ap);
1165 ata_qc_complete(qc);
1166 } else
1167 ata_port_freeze(ap);
1168 }
1169
1170 spin_unlock_irqrestore(ap->lock, flags);
1171 } else {
1172 if (likely(!(qc->err_mask & AC_ERR_HSM)))
1173 ata_qc_complete(qc);
1174 else
1175 ata_port_freeze(ap);
1176 }
1177 } else {
1178 if (in_wq) {
1179 spin_lock_irqsave(ap->lock, flags);
1180 ap->ops->sff_irq_on(ap);
1181 ata_qc_complete(qc);
1182 spin_unlock_irqrestore(ap->lock, flags);
1183 } else
1184 ata_qc_complete(qc);
1185 }
1186}
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1199 u8 status, int in_wq)
1200{
1201 struct ata_eh_info *ehi = &ap->link.eh_info;
1202 unsigned long flags = 0;
1203 int poll_next;
1204
1205 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1206
1207
1208
1209
1210
1211 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
1212
1213fsm_start:
1214 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
1215 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
1216
1217 switch (ap->hsm_task_state) {
1218 case HSM_ST_FIRST:
1219
1220
1221
1222
1223
1224
1225 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1226
1227
1228 if (unlikely((status & ATA_DRQ) == 0)) {
1229
1230 if (likely(status & (ATA_ERR | ATA_DF)))
1231
1232 qc->err_mask |= AC_ERR_DEV;
1233 else {
1234
1235 ata_ehi_push_desc(ehi,
1236 "ST_FIRST: !(DRQ|ERR|DF)");
1237 qc->err_mask |= AC_ERR_HSM;
1238 }
1239
1240 ap->hsm_task_state = HSM_ST_ERR;
1241 goto fsm_start;
1242 }
1243
1244
1245
1246
1247
1248
1249
1250 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1251
1252
1253
1254
1255
1256 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1257 ata_ehi_push_desc(ehi, "ST_FIRST: "
1258 "DRQ=1 with device error, "
1259 "dev_stat 0x%X", status);
1260 qc->err_mask |= AC_ERR_HSM;
1261 ap->hsm_task_state = HSM_ST_ERR;
1262 goto fsm_start;
1263 }
1264 }
1265
1266
1267
1268
1269
1270
1271 if (in_wq)
1272 spin_lock_irqsave(ap->lock, flags);
1273
1274 if (qc->tf.protocol == ATA_PROT_PIO) {
1275
1276
1277
1278
1279
1280
1281
1282
1283 ap->hsm_task_state = HSM_ST;
1284 ata_pio_sectors(qc);
1285 } else
1286
1287 atapi_send_cdb(ap, qc);
1288
1289 if (in_wq)
1290 spin_unlock_irqrestore(ap->lock, flags);
1291
1292
1293
1294
1295 break;
1296
1297 case HSM_ST:
1298
1299 if (qc->tf.protocol == ATAPI_PROT_PIO) {
1300
1301 if ((status & ATA_DRQ) == 0) {
1302
1303
1304
1305 ap->hsm_task_state = HSM_ST_LAST;
1306 goto fsm_start;
1307 }
1308
1309
1310
1311
1312
1313
1314
1315 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1316 ata_ehi_push_desc(ehi, "ST-ATAPI: "
1317 "DRQ=1 with device error, "
1318 "dev_stat 0x%X", status);
1319 qc->err_mask |= AC_ERR_HSM;
1320 ap->hsm_task_state = HSM_ST_ERR;
1321 goto fsm_start;
1322 }
1323
1324 atapi_pio_bytes(qc);
1325
1326 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1327
1328 goto fsm_start;
1329
1330 } else {
1331
1332 if (unlikely((status & ATA_DRQ) == 0)) {
1333
1334 if (likely(status & (ATA_ERR | ATA_DF))) {
1335
1336 qc->err_mask |= AC_ERR_DEV;
1337
1338
1339
1340
1341
1342 if (qc->dev->horkage &
1343 ATA_HORKAGE_DIAGNOSTIC)
1344 qc->err_mask |=
1345 AC_ERR_NODEV_HINT;
1346 } else {
1347
1348
1349
1350
1351 ata_ehi_push_desc(ehi, "ST-ATA: "
1352 "DRQ=0 without device error, "
1353 "dev_stat 0x%X", status);
1354 qc->err_mask |= AC_ERR_HSM |
1355 AC_ERR_NODEV_HINT;
1356 }
1357
1358 ap->hsm_task_state = HSM_ST_ERR;
1359 goto fsm_start;
1360 }
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1373
1374 qc->err_mask |= AC_ERR_DEV;
1375
1376 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1377 ata_pio_sectors(qc);
1378 status = ata_wait_idle(ap);
1379 }
1380
1381 if (status & (ATA_BUSY | ATA_DRQ)) {
1382 ata_ehi_push_desc(ehi, "ST-ATA: "
1383 "BUSY|DRQ persists on ERR|DF, "
1384 "dev_stat 0x%X", status);
1385 qc->err_mask |= AC_ERR_HSM;
1386 }
1387
1388
1389
1390
1391
1392
1393
1394
1395 if (status == 0x7f)
1396 qc->err_mask |= AC_ERR_NODEV_HINT;
1397
1398
1399
1400
1401
1402 ap->hsm_task_state = HSM_ST_ERR;
1403 goto fsm_start;
1404 }
1405
1406 ata_pio_sectors(qc);
1407
1408 if (ap->hsm_task_state == HSM_ST_LAST &&
1409 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1410
1411 status = ata_wait_idle(ap);
1412 goto fsm_start;
1413 }
1414 }
1415
1416 poll_next = 1;
1417 break;
1418
1419 case HSM_ST_LAST:
1420 if (unlikely(!ata_ok(status))) {
1421 qc->err_mask |= __ac_err_mask(status);
1422 ap->hsm_task_state = HSM_ST_ERR;
1423 goto fsm_start;
1424 }
1425
1426
1427 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1428 ap->print_id, qc->dev->devno, status);
1429
1430 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1431
1432 ap->hsm_task_state = HSM_ST_IDLE;
1433
1434
1435 ata_hsm_qc_complete(qc, in_wq);
1436
1437 poll_next = 0;
1438 break;
1439
1440 case HSM_ST_ERR:
1441 ap->hsm_task_state = HSM_ST_IDLE;
1442
1443
1444 ata_hsm_qc_complete(qc, in_wq);
1445
1446 poll_next = 0;
1447 break;
1448 default:
1449 poll_next = 0;
1450 BUG();
1451 }
1452
1453 return poll_next;
1454}
1455EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1456
1457void ata_pio_task(struct work_struct *work)
1458{
1459 struct ata_port *ap =
1460 container_of(work, struct ata_port, port_task.work);
1461 struct ata_queued_cmd *qc = ap->port_task_data;
1462 u8 status;
1463 int poll_next;
1464
1465fsm_start:
1466 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1467
1468
1469
1470
1471
1472
1473
1474
1475 status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1476 if (status & ATA_BUSY) {
1477 msleep(2);
1478 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1479 if (status & ATA_BUSY) {
1480 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
1481 return;
1482 }
1483 }
1484
1485
1486 poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1487
1488
1489
1490
1491 if (poll_next)
1492 goto fsm_start;
1493}
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1513{
1514 struct ata_port *ap = qc->ap;
1515
1516
1517
1518
1519 if (ap->flags & ATA_FLAG_PIO_POLLING) {
1520 switch (qc->tf.protocol) {
1521 case ATA_PROT_PIO:
1522 case ATA_PROT_NODATA:
1523 case ATAPI_PROT_PIO:
1524 case ATAPI_PROT_NODATA:
1525 qc->tf.flags |= ATA_TFLAG_POLLING;
1526 break;
1527 case ATAPI_PROT_DMA:
1528 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
1529
1530 BUG();
1531 break;
1532 default:
1533 break;
1534 }
1535 }
1536
1537
1538 ata_dev_select(ap, qc->dev->devno, 1, 0);
1539
1540
1541 switch (qc->tf.protocol) {
1542 case ATA_PROT_NODATA:
1543 if (qc->tf.flags & ATA_TFLAG_POLLING)
1544 ata_qc_set_polling(qc);
1545
1546 ata_tf_to_host(ap, &qc->tf);
1547 ap->hsm_task_state = HSM_ST_LAST;
1548
1549 if (qc->tf.flags & ATA_TFLAG_POLLING)
1550 ata_pio_queue_task(ap, qc, 0);
1551
1552 break;
1553
1554 case ATA_PROT_DMA:
1555 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
1556
1557 ap->ops->sff_tf_load(ap, &qc->tf);
1558 ap->ops->bmdma_setup(qc);
1559 ap->ops->bmdma_start(qc);
1560 ap->hsm_task_state = HSM_ST_LAST;
1561 break;
1562
1563 case ATA_PROT_PIO:
1564 if (qc->tf.flags & ATA_TFLAG_POLLING)
1565 ata_qc_set_polling(qc);
1566
1567 ata_tf_to_host(ap, &qc->tf);
1568
1569 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1570
1571 ap->hsm_task_state = HSM_ST_FIRST;
1572 ata_pio_queue_task(ap, qc, 0);
1573
1574
1575
1576
1577 } else {
1578
1579 ap->hsm_task_state = HSM_ST;
1580
1581 if (qc->tf.flags & ATA_TFLAG_POLLING)
1582 ata_pio_queue_task(ap, qc, 0);
1583
1584
1585
1586
1587 }
1588
1589 break;
1590
1591 case ATAPI_PROT_PIO:
1592 case ATAPI_PROT_NODATA:
1593 if (qc->tf.flags & ATA_TFLAG_POLLING)
1594 ata_qc_set_polling(qc);
1595
1596 ata_tf_to_host(ap, &qc->tf);
1597
1598 ap->hsm_task_state = HSM_ST_FIRST;
1599
1600
1601 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1602 (qc->tf.flags & ATA_TFLAG_POLLING))
1603 ata_pio_queue_task(ap, qc, 0);
1604 break;
1605
1606 case ATAPI_PROT_DMA:
1607 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
1608
1609 ap->ops->sff_tf_load(ap, &qc->tf);
1610 ap->ops->bmdma_setup(qc);
1611 ap->hsm_task_state = HSM_ST_FIRST;
1612
1613
1614 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1615 ata_pio_queue_task(ap, qc, 0);
1616 break;
1617
1618 default:
1619 WARN_ON_ONCE(1);
1620 return AC_ERR_SYSTEM;
1621 }
1622
1623 return 0;
1624}
1625EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1641{
1642 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1643 return true;
1644}
1645EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662unsigned int ata_sff_host_intr(struct ata_port *ap,
1663 struct ata_queued_cmd *qc)
1664{
1665 struct ata_eh_info *ehi = &ap->link.eh_info;
1666 u8 status, host_stat = 0;
1667
1668 VPRINTK("ata%u: protocol %d task_state %d\n",
1669 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1670
1671
1672 switch (ap->hsm_task_state) {
1673 case HSM_ST_FIRST:
1674
1675
1676
1677
1678
1679
1680
1681
1682 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1683 goto idle_irq;
1684 break;
1685 case HSM_ST_LAST:
1686 if (qc->tf.protocol == ATA_PROT_DMA ||
1687 qc->tf.protocol == ATAPI_PROT_DMA) {
1688
1689 host_stat = ap->ops->bmdma_status(ap);
1690 VPRINTK("ata%u: host_stat 0x%X\n",
1691 ap->print_id, host_stat);
1692
1693
1694 if (!(host_stat & ATA_DMA_INTR))
1695 goto idle_irq;
1696
1697
1698 ap->ops->bmdma_stop(qc);
1699
1700 if (unlikely(host_stat & ATA_DMA_ERR)) {
1701
1702 qc->err_mask |= AC_ERR_HOST_BUS;
1703 ap->hsm_task_state = HSM_ST_ERR;
1704 }
1705 }
1706 break;
1707 case HSM_ST:
1708 break;
1709 default:
1710 goto idle_irq;
1711 }
1712
1713
1714
1715 status = ata_sff_irq_status(ap);
1716 if (status & ATA_BUSY)
1717 goto idle_irq;
1718
1719
1720 ap->ops->sff_irq_clear(ap);
1721
1722 ata_sff_hsm_move(ap, qc, status, 0);
1723
1724 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
1725 qc->tf.protocol == ATAPI_PROT_DMA))
1726 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
1727
1728 return 1;
1729
1730idle_irq:
1731 ap->stats.idle_irq++;
1732
1733#ifdef ATA_IRQ_TRAP
1734 if ((ap->stats.idle_irq % 1000) == 0) {
1735 ap->ops->sff_check_status(ap);
1736 ap->ops->sff_irq_clear(ap);
1737 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
1738 return 1;
1739 }
1740#endif
1741 return 0;
1742}
1743EXPORT_SYMBOL_GPL(ata_sff_host_intr);
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1760{
1761 struct ata_host *host = dev_instance;
1762 unsigned int i;
1763 unsigned int handled = 0;
1764 unsigned long flags;
1765
1766
1767 spin_lock_irqsave(&host->lock, flags);
1768
1769 for (i = 0; i < host->n_ports; i++) {
1770 struct ata_port *ap;
1771
1772 ap = host->ports[i];
1773 if (ap &&
1774 !(ap->flags & ATA_FLAG_DISABLED)) {
1775 struct ata_queued_cmd *qc;
1776
1777 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1778 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
1779 (qc->flags & ATA_QCFLAG_ACTIVE))
1780 handled |= ata_sff_host_intr(ap, qc);
1781 }
1782 }
1783
1784 spin_unlock_irqrestore(&host->lock, flags);
1785
1786 return IRQ_RETVAL(handled);
1787}
1788EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803void ata_sff_lost_interrupt(struct ata_port *ap)
1804{
1805 u8 status;
1806 struct ata_queued_cmd *qc;
1807
1808
1809 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1810
1811 if (qc == NULL || !(qc->flags & ATA_QCFLAG_ACTIVE))
1812 return;
1813
1814 if (qc->tf.flags & ATA_TFLAG_POLLING)
1815 return;
1816
1817
1818 status = ata_sff_altstatus(ap);
1819 if (status & ATA_BUSY)
1820 return;
1821
1822
1823
1824 ata_port_printk(ap, KERN_WARNING, "lost interrupt (Status 0x%x)\n",
1825 status);
1826
1827
1828 ata_sff_host_intr(ap, qc);
1829}
1830EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841void ata_sff_freeze(struct ata_port *ap)
1842{
1843 struct ata_ioports *ioaddr = &ap->ioaddr;
1844
1845 ap->ctl |= ATA_NIEN;
1846 ap->last_ctl = ap->ctl;
1847
1848 if (ioaddr->ctl_addr)
1849 iowrite8(ap->ctl, ioaddr->ctl_addr);
1850
1851
1852
1853
1854
1855 ap->ops->sff_check_status(ap);
1856
1857 ap->ops->sff_irq_clear(ap);
1858}
1859EXPORT_SYMBOL_GPL(ata_sff_freeze);
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870void ata_sff_thaw(struct ata_port *ap)
1871{
1872
1873 ap->ops->sff_check_status(ap);
1874 ap->ops->sff_irq_clear(ap);
1875 ap->ops->sff_irq_on(ap);
1876}
1877EXPORT_SYMBOL_GPL(ata_sff_thaw);
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1895{
1896 struct ata_eh_context *ehc = &link->eh_context;
1897 int rc;
1898
1899 rc = ata_std_prereset(link, deadline);
1900 if (rc)
1901 return rc;
1902
1903
1904 if (ehc->i.action & ATA_EH_HARDRESET)
1905 return 0;
1906
1907
1908 if (!ata_link_offline(link)) {
1909 rc = ata_sff_wait_ready(link, deadline);
1910 if (rc && rc != -ENODEV) {
1911 ata_link_printk(link, KERN_WARNING, "device not ready "
1912 "(errno=%d), forcing hardreset\n", rc);
1913 ehc->i.action |= ATA_EH_HARDRESET;
1914 }
1915 }
1916
1917 return 0;
1918}
1919EXPORT_SYMBOL_GPL(ata_sff_prereset);
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1939{
1940 struct ata_ioports *ioaddr = &ap->ioaddr;
1941 u8 nsect, lbal;
1942
1943 ap->ops->sff_dev_select(ap, device);
1944
1945 iowrite8(0x55, ioaddr->nsect_addr);
1946 iowrite8(0xaa, ioaddr->lbal_addr);
1947
1948 iowrite8(0xaa, ioaddr->nsect_addr);
1949 iowrite8(0x55, ioaddr->lbal_addr);
1950
1951 iowrite8(0x55, ioaddr->nsect_addr);
1952 iowrite8(0xaa, ioaddr->lbal_addr);
1953
1954 nsect = ioread8(ioaddr->nsect_addr);
1955 lbal = ioread8(ioaddr->lbal_addr);
1956
1957 if ((nsect == 0x55) && (lbal == 0xaa))
1958 return 1;
1959
1960 return 0;
1961}
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1985 u8 *r_err)
1986{
1987 struct ata_port *ap = dev->link->ap;
1988 struct ata_taskfile tf;
1989 unsigned int class;
1990 u8 err;
1991
1992 ap->ops->sff_dev_select(ap, dev->devno);
1993
1994 memset(&tf, 0, sizeof(tf));
1995
1996 ap->ops->sff_tf_read(ap, &tf);
1997 err = tf.feature;
1998 if (r_err)
1999 *r_err = err;
2000
2001
2002 if (err == 0)
2003
2004 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
2005 else if (err == 1)
2006 ;
2007 else if ((dev->devno == 0) && (err == 0x81))
2008 ;
2009 else
2010 return ATA_DEV_NONE;
2011
2012
2013 class = ata_dev_classify(&tf);
2014
2015 if (class == ATA_DEV_UNKNOWN) {
2016
2017
2018
2019
2020
2021
2022 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
2023 class = ATA_DEV_ATA;
2024 else
2025 class = ATA_DEV_NONE;
2026 } else if ((class == ATA_DEV_ATA) &&
2027 (ap->ops->sff_check_status(ap) == 0))
2028 class = ATA_DEV_NONE;
2029
2030 return class;
2031}
2032EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
2052 unsigned long deadline)
2053{
2054 struct ata_port *ap = link->ap;
2055 struct ata_ioports *ioaddr = &ap->ioaddr;
2056 unsigned int dev0 = devmask & (1 << 0);
2057 unsigned int dev1 = devmask & (1 << 1);
2058 int rc, ret = 0;
2059
2060 msleep(ATA_WAIT_AFTER_RESET);
2061
2062
2063 rc = ata_sff_wait_ready(link, deadline);
2064
2065
2066
2067 if (rc)
2068 return rc;
2069
2070
2071
2072
2073 if (dev1) {
2074 int i;
2075
2076 ap->ops->sff_dev_select(ap, 1);
2077
2078
2079
2080
2081
2082 for (i = 0; i < 2; i++) {
2083 u8 nsect, lbal;
2084
2085 nsect = ioread8(ioaddr->nsect_addr);
2086 lbal = ioread8(ioaddr->lbal_addr);
2087 if ((nsect == 1) && (lbal == 1))
2088 break;
2089 msleep(50);
2090 }
2091
2092 rc = ata_sff_wait_ready(link, deadline);
2093 if (rc) {
2094 if (rc != -ENODEV)
2095 return rc;
2096 ret = rc;
2097 }
2098 }
2099
2100
2101 ap->ops->sff_dev_select(ap, 0);
2102 if (dev1)
2103 ap->ops->sff_dev_select(ap, 1);
2104 if (dev0)
2105 ap->ops->sff_dev_select(ap, 0);
2106
2107 return ret;
2108}
2109EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
2110
2111static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
2112 unsigned long deadline)
2113{
2114 struct ata_ioports *ioaddr = &ap->ioaddr;
2115
2116 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
2117
2118
2119 iowrite8(ap->ctl, ioaddr->ctl_addr);
2120 udelay(20);
2121 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2122 udelay(20);
2123 iowrite8(ap->ctl, ioaddr->ctl_addr);
2124 ap->last_ctl = ap->ctl;
2125
2126
2127 return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
2128}
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
2145 unsigned long deadline)
2146{
2147 struct ata_port *ap = link->ap;
2148 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2149 unsigned int devmask = 0;
2150 int rc;
2151 u8 err;
2152
2153 DPRINTK("ENTER\n");
2154
2155
2156 if (ata_devchk(ap, 0))
2157 devmask |= (1 << 0);
2158 if (slave_possible && ata_devchk(ap, 1))
2159 devmask |= (1 << 1);
2160
2161
2162 ap->ops->sff_dev_select(ap, 0);
2163
2164
2165 DPRINTK("about to softreset, devmask=%x\n", devmask);
2166 rc = ata_bus_softreset(ap, devmask, deadline);
2167
2168 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
2169 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
2170 return rc;
2171 }
2172
2173
2174 classes[0] = ata_sff_dev_classify(&link->device[0],
2175 devmask & (1 << 0), &err);
2176 if (slave_possible && err != 0x81)
2177 classes[1] = ata_sff_dev_classify(&link->device[1],
2178 devmask & (1 << 1), &err);
2179
2180 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2181 return 0;
2182}
2183EXPORT_SYMBOL_GPL(ata_sff_softreset);
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
2201 unsigned long deadline)
2202{
2203 struct ata_eh_context *ehc = &link->eh_context;
2204 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2205 bool online;
2206 int rc;
2207
2208 rc = sata_link_hardreset(link, timing, deadline, &online,
2209 ata_sff_check_ready);
2210 if (online)
2211 *class = ata_sff_dev_classify(link->device, 1, NULL);
2212
2213 DPRINTK("EXIT, class=%u\n", *class);
2214 return rc;
2215}
2216EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2231{
2232 struct ata_port *ap = link->ap;
2233
2234 ata_std_postreset(link, classes);
2235
2236
2237 if (classes[0] != ATA_DEV_NONE)
2238 ap->ops->sff_dev_select(ap, 1);
2239 if (classes[1] != ATA_DEV_NONE)
2240 ap->ops->sff_dev_select(ap, 0);
2241
2242
2243 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2244 DPRINTK("EXIT, no device\n");
2245 return;
2246 }
2247
2248
2249 if (ap->ioaddr.ctl_addr) {
2250 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2251 ap->last_ctl = ap->ctl;
2252 }
2253}
2254EXPORT_SYMBOL_GPL(ata_sff_postreset);
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2267{
2268 int count;
2269 struct ata_port *ap;
2270
2271
2272 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2273 return;
2274
2275 ap = qc->ap;
2276
2277 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2278 && count < 32768; count++)
2279 ioread16(ap->ioaddr.data_addr);
2280
2281
2282 if (count)
2283 ata_port_printk(ap, KERN_DEBUG,
2284 "drained %d bytes to clear DRQ.\n", count);
2285
2286}
2287EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301void ata_sff_error_handler(struct ata_port *ap)
2302{
2303 ata_reset_fn_t softreset = ap->ops->softreset;
2304 ata_reset_fn_t hardreset = ap->ops->hardreset;
2305 struct ata_queued_cmd *qc;
2306 unsigned long flags;
2307 int thaw = 0;
2308
2309 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2310 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2311 qc = NULL;
2312
2313
2314 spin_lock_irqsave(ap->lock, flags);
2315
2316 ap->hsm_task_state = HSM_ST_IDLE;
2317
2318 if (ap->ioaddr.bmdma_addr &&
2319 qc && (qc->tf.protocol == ATA_PROT_DMA ||
2320 qc->tf.protocol == ATAPI_PROT_DMA)) {
2321 u8 host_stat;
2322
2323 host_stat = ap->ops->bmdma_status(ap);
2324
2325
2326
2327
2328
2329
2330 if (qc->err_mask == AC_ERR_TIMEOUT
2331 && (host_stat & ATA_DMA_ERR)) {
2332 qc->err_mask = AC_ERR_HOST_BUS;
2333 thaw = 1;
2334 }
2335
2336 ap->ops->bmdma_stop(qc);
2337 }
2338
2339 ata_sff_sync(ap);
2340 ap->ops->sff_check_status(ap);
2341 ap->ops->sff_irq_clear(ap);
2342
2343
2344
2345
2346
2347 if (ap->ops->drain_fifo)
2348 ap->ops->drain_fifo(qc);
2349
2350 spin_unlock_irqrestore(ap->lock, flags);
2351
2352 if (thaw)
2353 ata_eh_thaw_port(ap);
2354
2355
2356
2357
2358
2359
2360 if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
2361 softreset = NULL;
2362 if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
2363 hardreset = NULL;
2364
2365 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2366 ap->ops->postreset);
2367}
2368EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2369
2370
2371
2372
2373
2374
2375
2376
2377void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
2378{
2379 struct ata_port *ap = qc->ap;
2380 unsigned long flags;
2381
2382 spin_lock_irqsave(ap->lock, flags);
2383
2384 ap->hsm_task_state = HSM_ST_IDLE;
2385
2386 if (ap->ioaddr.bmdma_addr)
2387 ata_bmdma_stop(qc);
2388
2389 spin_unlock_irqrestore(ap->lock, flags);
2390}
2391EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406int ata_sff_port_start(struct ata_port *ap)
2407{
2408 if (ap->ioaddr.bmdma_addr)
2409 return ata_port_start(ap);
2410 return 0;
2411}
2412EXPORT_SYMBOL_GPL(ata_sff_port_start);
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428int ata_sff_port_start32(struct ata_port *ap)
2429{
2430 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
2431 if (ap->ioaddr.bmdma_addr)
2432 return ata_port_start(ap);
2433 return 0;
2434}
2435EXPORT_SYMBOL_GPL(ata_sff_port_start32);
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448void ata_sff_std_ports(struct ata_ioports *ioaddr)
2449{
2450 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2451 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2452 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2453 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2454 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2455 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2456 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2457 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2458 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2459 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2460}
2461EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2462
2463unsigned long ata_bmdma_mode_filter(struct ata_device *adev,
2464 unsigned long xfer_mask)
2465{
2466
2467
2468
2469 if (adev->link->ap->ioaddr.bmdma_addr == NULL)
2470 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2471 return xfer_mask;
2472}
2473EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
2474
2475
2476
2477
2478
2479
2480
2481
2482void ata_bmdma_setup(struct ata_queued_cmd *qc)
2483{
2484 struct ata_port *ap = qc->ap;
2485 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2486 u8 dmactl;
2487
2488
2489 mb();
2490 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2491
2492
2493 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2494 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2495 if (!rw)
2496 dmactl |= ATA_DMA_WR;
2497 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2498
2499
2500 ap->ops->sff_exec_command(ap, &qc->tf);
2501}
2502EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2503
2504
2505
2506
2507
2508
2509
2510
2511void ata_bmdma_start(struct ata_queued_cmd *qc)
2512{
2513 struct ata_port *ap = qc->ap;
2514 u8 dmactl;
2515
2516
2517 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2518 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534}
2535EXPORT_SYMBOL_GPL(ata_bmdma_start);
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548void ata_bmdma_stop(struct ata_queued_cmd *qc)
2549{
2550 struct ata_port *ap = qc->ap;
2551 void __iomem *mmio = ap->ioaddr.bmdma_addr;
2552
2553
2554 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
2555 mmio + ATA_DMA_CMD);
2556
2557
2558 ata_sff_dma_pause(ap);
2559}
2560EXPORT_SYMBOL_GPL(ata_bmdma_stop);
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573u8 ata_bmdma_status(struct ata_port *ap)
2574{
2575 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
2576}
2577EXPORT_SYMBOL_GPL(ata_bmdma_status);
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602void ata_bus_reset(struct ata_port *ap)
2603{
2604 struct ata_device *device = ap->link.device;
2605 struct ata_ioports *ioaddr = &ap->ioaddr;
2606 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2607 u8 err;
2608 unsigned int dev0, dev1 = 0, devmask = 0;
2609 int rc;
2610
2611 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
2612
2613
2614 if (ap->flags & ATA_FLAG_SATA_RESET)
2615 dev0 = 1;
2616 else {
2617 dev0 = ata_devchk(ap, 0);
2618 if (slave_possible)
2619 dev1 = ata_devchk(ap, 1);
2620 }
2621
2622 if (dev0)
2623 devmask |= (1 << 0);
2624 if (dev1)
2625 devmask |= (1 << 1);
2626
2627
2628 ap->ops->sff_dev_select(ap, 0);
2629
2630
2631 if (ap->flags & ATA_FLAG_SRST) {
2632 rc = ata_bus_softreset(ap, devmask,
2633 ata_deadline(jiffies, 40000));
2634 if (rc && rc != -ENODEV)
2635 goto err_out;
2636 }
2637
2638
2639
2640
2641 device[0].class = ata_sff_dev_classify(&device[0], dev0, &err);
2642 if ((slave_possible) && (err != 0x81))
2643 device[1].class = ata_sff_dev_classify(&device[1], dev1, &err);
2644
2645
2646 if (device[1].class != ATA_DEV_NONE)
2647 ap->ops->sff_dev_select(ap, 1);
2648 if (device[0].class != ATA_DEV_NONE)
2649 ap->ops->sff_dev_select(ap, 0);
2650
2651
2652 if ((device[0].class == ATA_DEV_NONE) &&
2653 (device[1].class == ATA_DEV_NONE))
2654 goto err_out;
2655
2656 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2657
2658 iowrite8(ap->ctl, ioaddr->ctl_addr);
2659 ap->last_ctl = ap->ctl;
2660 }
2661
2662 DPRINTK("EXIT\n");
2663 return;
2664
2665err_out:
2666 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2667 ata_port_disable(ap);
2668
2669 DPRINTK("EXIT\n");
2670}
2671EXPORT_SYMBOL_GPL(ata_bus_reset);
2672
2673#ifdef CONFIG_PCI
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
2685{
2686 unsigned long bmdma = pci_resource_start(pdev, 4);
2687 u8 simplex;
2688
2689 if (bmdma == 0)
2690 return -ENOENT;
2691
2692 simplex = inb(bmdma + 0x02);
2693 outb(simplex & 0x60, bmdma + 0x02);
2694 simplex = inb(bmdma + 0x02);
2695 if (simplex & 0x80)
2696 return -EOPNOTSUPP;
2697 return 0;
2698}
2699EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713int ata_pci_bmdma_init(struct ata_host *host)
2714{
2715 struct device *gdev = host->dev;
2716 struct pci_dev *pdev = to_pci_dev(gdev);
2717 int i, rc;
2718
2719
2720 if (pci_resource_start(pdev, 4) == 0)
2721 return 0;
2722
2723
2724 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
2725 if (rc)
2726 return rc;
2727 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
2728 if (rc)
2729 return rc;
2730
2731
2732 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
2733 if (rc) {
2734 dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
2735 return -ENOMEM;
2736 }
2737 host->iomap = pcim_iomap_table(pdev);
2738
2739 for (i = 0; i < 2; i++) {
2740 struct ata_port *ap = host->ports[i];
2741 void __iomem *bmdma = host->iomap[4] + 8 * i;
2742
2743 if (ata_port_is_dummy(ap))
2744 continue;
2745
2746 ap->ioaddr.bmdma_addr = bmdma;
2747 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
2748 (ioread8(bmdma + 2) & 0x80))
2749 host->flags |= ATA_HOST_SIMPLEX;
2750
2751 ata_port_desc(ap, "bmdma 0x%llx",
2752 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
2753 }
2754
2755 return 0;
2756}
2757EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
2758
2759static int ata_resources_present(struct pci_dev *pdev, int port)
2760{
2761 int i;
2762
2763
2764 port = port * 2;
2765 for (i = 0; i < 2; i++) {
2766 if (pci_resource_start(pdev, port + i) == 0 ||
2767 pci_resource_len(pdev, port + i) == 0)
2768 return 0;
2769 }
2770 return 1;
2771}
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792int ata_pci_sff_init_host(struct ata_host *host)
2793{
2794 struct device *gdev = host->dev;
2795 struct pci_dev *pdev = to_pci_dev(gdev);
2796 unsigned int mask = 0;
2797 int i, rc;
2798
2799
2800 for (i = 0; i < 2; i++) {
2801 struct ata_port *ap = host->ports[i];
2802 int base = i * 2;
2803 void __iomem * const *iomap;
2804
2805 if (ata_port_is_dummy(ap))
2806 continue;
2807
2808
2809
2810
2811
2812 if (!ata_resources_present(pdev, i)) {
2813 ap->ops = &ata_dummy_port_ops;
2814 continue;
2815 }
2816
2817 rc = pcim_iomap_regions(pdev, 0x3 << base,
2818 dev_driver_string(gdev));
2819 if (rc) {
2820 dev_printk(KERN_WARNING, gdev,
2821 "failed to request/iomap BARs for port %d "
2822 "(errno=%d)\n", i, rc);
2823 if (rc == -EBUSY)
2824 pcim_pin_device(pdev);
2825 ap->ops = &ata_dummy_port_ops;
2826 continue;
2827 }
2828 host->iomap = iomap = pcim_iomap_table(pdev);
2829
2830 ap->ioaddr.cmd_addr = iomap[base];
2831 ap->ioaddr.altstatus_addr =
2832 ap->ioaddr.ctl_addr = (void __iomem *)
2833 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2834 ata_sff_std_ports(&ap->ioaddr);
2835
2836 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2837 (unsigned long long)pci_resource_start(pdev, base),
2838 (unsigned long long)pci_resource_start(pdev, base + 1));
2839
2840 mask |= 1 << i;
2841 }
2842
2843 if (!mask) {
2844 dev_printk(KERN_ERR, gdev, "no available native port\n");
2845 return -ENODEV;
2846 }
2847
2848 return 0;
2849}
2850EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2868 const struct ata_port_info * const *ppi,
2869 struct ata_host **r_host)
2870{
2871 struct ata_host *host;
2872 int rc;
2873
2874 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2875 return -ENOMEM;
2876
2877 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2878 if (!host) {
2879 dev_printk(KERN_ERR, &pdev->dev,
2880 "failed to allocate ATA host\n");
2881 rc = -ENOMEM;
2882 goto err_out;
2883 }
2884
2885 rc = ata_pci_sff_init_host(host);
2886 if (rc)
2887 goto err_out;
2888
2889
2890 rc = ata_pci_bmdma_init(host);
2891 if (rc)
2892 goto err_bmdma;
2893
2894 devres_remove_group(&pdev->dev, NULL);
2895 *r_host = host;
2896 return 0;
2897
2898err_bmdma:
2899
2900
2901
2902
2903
2904 pcim_iounmap_regions(pdev, 0xf);
2905err_out:
2906 devres_release_group(&pdev->dev, NULL);
2907 return rc;
2908}
2909EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927int ata_pci_sff_activate_host(struct ata_host *host,
2928 irq_handler_t irq_handler,
2929 struct scsi_host_template *sht)
2930{
2931 struct device *dev = host->dev;
2932 struct pci_dev *pdev = to_pci_dev(dev);
2933 const char *drv_name = dev_driver_string(host->dev);
2934 int legacy_mode = 0, rc;
2935
2936 rc = ata_host_start(host);
2937 if (rc)
2938 return rc;
2939
2940 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2941 u8 tmp8, mask;
2942
2943
2944 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2945 mask = (1 << 2) | (1 << 0);
2946 if ((tmp8 & mask) != mask)
2947 legacy_mode = 1;
2948#if defined(CONFIG_NO_ATA_LEGACY)
2949
2950
2951
2952 if (legacy_mode) {
2953 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
2954 return -EOPNOTSUPP;
2955 }
2956#endif
2957 }
2958
2959 if (!devres_open_group(dev, NULL, GFP_KERNEL))
2960 return -ENOMEM;
2961
2962 if (!legacy_mode && pdev->irq) {
2963 rc = devm_request_irq(dev, pdev->irq, irq_handler,
2964 IRQF_SHARED, drv_name, host);
2965 if (rc)
2966 goto out;
2967
2968 ata_port_desc(host->ports[0], "irq %d", pdev->irq);
2969 ata_port_desc(host->ports[1], "irq %d", pdev->irq);
2970 } else if (legacy_mode) {
2971 if (!ata_port_is_dummy(host->ports[0])) {
2972 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2973 irq_handler, IRQF_SHARED,
2974 drv_name, host);
2975 if (rc)
2976 goto out;
2977
2978 ata_port_desc(host->ports[0], "irq %d",
2979 ATA_PRIMARY_IRQ(pdev));
2980 }
2981
2982 if (!ata_port_is_dummy(host->ports[1])) {
2983 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2984 irq_handler, IRQF_SHARED,
2985 drv_name, host);
2986 if (rc)
2987 goto out;
2988
2989 ata_port_desc(host->ports[1], "irq %d",
2990 ATA_SECONDARY_IRQ(pdev));
2991 }
2992 }
2993
2994 rc = ata_host_register(host, sht);
2995out:
2996 if (rc == 0)
2997 devres_remove_group(dev, NULL);
2998 else
2999 devres_release_group(dev, NULL);
3000
3001 return rc;
3002}
3003EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030int ata_pci_sff_init_one(struct pci_dev *pdev,
3031 const struct ata_port_info * const *ppi,
3032 struct scsi_host_template *sht, void *host_priv)
3033{
3034 struct device *dev = &pdev->dev;
3035 const struct ata_port_info *pi = NULL;
3036 struct ata_host *host = NULL;
3037 int i, rc;
3038
3039 DPRINTK("ENTER\n");
3040
3041
3042 for (i = 0; i < 2 && ppi[i]; i++) {
3043 if (ppi[i]->port_ops != &ata_dummy_port_ops) {
3044 pi = ppi[i];
3045 break;
3046 }
3047 }
3048
3049 if (!pi) {
3050 dev_printk(KERN_ERR, &pdev->dev,
3051 "no valid port_info specified\n");
3052 return -EINVAL;
3053 }
3054
3055 if (!devres_open_group(dev, NULL, GFP_KERNEL))
3056 return -ENOMEM;
3057
3058 rc = pcim_enable_device(pdev);
3059 if (rc)
3060 goto out;
3061
3062
3063 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
3064 if (rc)
3065 goto out;
3066 host->private_data = host_priv;
3067
3068 pci_set_master(pdev);
3069 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
3070out:
3071 if (rc == 0)
3072 devres_remove_group(&pdev->dev, NULL);
3073 else
3074 devres_release_group(&pdev->dev, NULL);
3075
3076 return rc;
3077}
3078EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
3079
3080#endif
3081