1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
57#include <linux/scatterlist.h>
58#include <linux/io.h>
59#include <linux/async.h>
60#include <linux/log2.h>
61#include <linux/slab.h>
62#include <scsi/scsi.h>
63#include <scsi/scsi_cmnd.h>
64#include <scsi/scsi_host.h>
65#include <linux/libata.h>
66#include <asm/byteorder.h>
67#include <linux/cdrom.h>
68#include <linux/ratelimit.h>
69#include <linux/pm_runtime.h>
70#include <linux/platform_device.h>
71
72#include "libata.h"
73#include "libata-transport.h"
74
75
76const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
77const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
78const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
79
80const struct ata_port_operations ata_base_port_ops = {
81 .prereset = ata_std_prereset,
82 .postreset = ata_std_postreset,
83 .error_handler = ata_std_error_handler,
84 .sched_eh = ata_std_sched_eh,
85 .end_eh = ata_std_end_eh,
86};
87
88const struct ata_port_operations sata_port_ops = {
89 .inherits = &ata_base_port_ops,
90
91 .qc_defer = ata_std_qc_defer,
92 .hardreset = sata_std_hardreset,
93};
94
95static unsigned int ata_dev_init_params(struct ata_device *dev,
96 u16 heads, u16 sectors);
97static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
98static void ata_dev_xfermask(struct ata_device *dev);
99static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
100
101atomic_t ata_print_id = ATOMIC_INIT(0);
102
103struct ata_force_param {
104 const char *name;
105 unsigned int cbl;
106 int spd_limit;
107 unsigned long xfer_mask;
108 unsigned int horkage_on;
109 unsigned int horkage_off;
110 unsigned int lflags;
111};
112
113struct ata_force_ent {
114 int port;
115 int device;
116 struct ata_force_param param;
117};
118
119static struct ata_force_ent *ata_force_tbl;
120static int ata_force_tbl_size;
121
122static char ata_force_param_buf[PAGE_SIZE] __initdata;
123
124module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
125MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
126
127static int atapi_enabled = 1;
128module_param(atapi_enabled, int, 0444);
129MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
130
131static int atapi_dmadir = 0;
132module_param(atapi_dmadir, int, 0444);
133MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
134
135int atapi_passthru16 = 1;
136module_param(atapi_passthru16, int, 0444);
137MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
138
139int libata_fua = 0;
140module_param_named(fua, libata_fua, int, 0444);
141MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
142
143static int ata_ignore_hpa;
144module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
145MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
146
147static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
148module_param_named(dma, libata_dma_mask, int, 0444);
149MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
150
151static int ata_probe_timeout;
152module_param(ata_probe_timeout, int, 0444);
153MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
154
155int libata_noacpi = 0;
156module_param_named(noacpi, libata_noacpi, int, 0444);
157MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
158
159int libata_allow_tpm = 0;
160module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
161MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
162
163static int atapi_an;
164module_param(atapi_an, int, 0444);
165MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
166
167MODULE_AUTHOR("Jeff Garzik");
168MODULE_DESCRIPTION("Library module for ATA devices");
169MODULE_LICENSE("GPL");
170MODULE_VERSION(DRV_VERSION);
171
172
173static bool ata_sstatus_online(u32 sstatus)
174{
175 return (sstatus & 0xf) == 0x3;
176}
177
178
179
180
181
182
183
184
185
186
187
188
189
190struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
191 enum ata_link_iter_mode mode)
192{
193 BUG_ON(mode != ATA_LITER_EDGE &&
194 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
195
196
197 if (!link)
198 switch (mode) {
199 case ATA_LITER_EDGE:
200 case ATA_LITER_PMP_FIRST:
201 if (sata_pmp_attached(ap))
202 return ap->pmp_link;
203
204 case ATA_LITER_HOST_FIRST:
205 return &ap->link;
206 }
207
208
209 if (link == &ap->link)
210 switch (mode) {
211 case ATA_LITER_HOST_FIRST:
212 if (sata_pmp_attached(ap))
213 return ap->pmp_link;
214
215 case ATA_LITER_PMP_FIRST:
216 if (unlikely(ap->slave_link))
217 return ap->slave_link;
218
219 case ATA_LITER_EDGE:
220 return NULL;
221 }
222
223
224 if (unlikely(link == ap->slave_link))
225 return NULL;
226
227
228 if (++link < ap->pmp_link + ap->nr_pmp_links)
229 return link;
230
231 if (mode == ATA_LITER_PMP_FIRST)
232 return &ap->link;
233
234 return NULL;
235}
236
237
238
239
240
241
242
243
244
245
246
247
248
249struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
250 enum ata_dev_iter_mode mode)
251{
252 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
253 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
254
255
256 if (!dev)
257 switch (mode) {
258 case ATA_DITER_ENABLED:
259 case ATA_DITER_ALL:
260 dev = link->device;
261 goto check;
262 case ATA_DITER_ENABLED_REVERSE:
263 case ATA_DITER_ALL_REVERSE:
264 dev = link->device + ata_link_max_devices(link) - 1;
265 goto check;
266 }
267
268 next:
269
270 switch (mode) {
271 case ATA_DITER_ENABLED:
272 case ATA_DITER_ALL:
273 if (++dev < link->device + ata_link_max_devices(link))
274 goto check;
275 return NULL;
276 case ATA_DITER_ENABLED_REVERSE:
277 case ATA_DITER_ALL_REVERSE:
278 if (--dev >= link->device)
279 goto check;
280 return NULL;
281 }
282
283 check:
284 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
285 !ata_dev_enabled(dev))
286 goto next;
287 return dev;
288}
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304struct ata_link *ata_dev_phys_link(struct ata_device *dev)
305{
306 struct ata_port *ap = dev->link->ap;
307
308 if (!ap->slave_link)
309 return dev->link;
310 if (!dev->devno)
311 return &ap->link;
312 return ap->slave_link;
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328void ata_force_cbl(struct ata_port *ap)
329{
330 int i;
331
332 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
333 const struct ata_force_ent *fe = &ata_force_tbl[i];
334
335 if (fe->port != -1 && fe->port != ap->print_id)
336 continue;
337
338 if (fe->param.cbl == ATA_CBL_NONE)
339 continue;
340
341 ap->cbl = fe->param.cbl;
342 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
343 return;
344 }
345}
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363static void ata_force_link_limits(struct ata_link *link)
364{
365 bool did_spd = false;
366 int linkno = link->pmp;
367 int i;
368
369 if (ata_is_host_link(link))
370 linkno += 15;
371
372 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
373 const struct ata_force_ent *fe = &ata_force_tbl[i];
374
375 if (fe->port != -1 && fe->port != link->ap->print_id)
376 continue;
377
378 if (fe->device != -1 && fe->device != linkno)
379 continue;
380
381
382 if (!did_spd && fe->param.spd_limit) {
383 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
384 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
385 fe->param.name);
386 did_spd = true;
387 }
388
389
390 if (fe->param.lflags) {
391 link->flags |= fe->param.lflags;
392 ata_link_notice(link,
393 "FORCE: link flag 0x%x forced -> 0x%x\n",
394 fe->param.lflags, link->flags);
395 }
396 }
397}
398
399
400
401
402
403
404
405
406
407
408
409
410static void ata_force_xfermask(struct ata_device *dev)
411{
412 int devno = dev->link->pmp + dev->devno;
413 int alt_devno = devno;
414 int i;
415
416
417 if (ata_is_host_link(dev->link))
418 alt_devno += 15;
419
420 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
421 const struct ata_force_ent *fe = &ata_force_tbl[i];
422 unsigned long pio_mask, mwdma_mask, udma_mask;
423
424 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
425 continue;
426
427 if (fe->device != -1 && fe->device != devno &&
428 fe->device != alt_devno)
429 continue;
430
431 if (!fe->param.xfer_mask)
432 continue;
433
434 ata_unpack_xfermask(fe->param.xfer_mask,
435 &pio_mask, &mwdma_mask, &udma_mask);
436 if (udma_mask)
437 dev->udma_mask = udma_mask;
438 else if (mwdma_mask) {
439 dev->udma_mask = 0;
440 dev->mwdma_mask = mwdma_mask;
441 } else {
442 dev->udma_mask = 0;
443 dev->mwdma_mask = 0;
444 dev->pio_mask = pio_mask;
445 }
446
447 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
448 fe->param.name);
449 return;
450 }
451}
452
453
454
455
456
457
458
459
460
461
462
463
464static void ata_force_horkage(struct ata_device *dev)
465{
466 int devno = dev->link->pmp + dev->devno;
467 int alt_devno = devno;
468 int i;
469
470
471 if (ata_is_host_link(dev->link))
472 alt_devno += 15;
473
474 for (i = 0; i < ata_force_tbl_size; i++) {
475 const struct ata_force_ent *fe = &ata_force_tbl[i];
476
477 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
478 continue;
479
480 if (fe->device != -1 && fe->device != devno &&
481 fe->device != alt_devno)
482 continue;
483
484 if (!(~dev->horkage & fe->param.horkage_on) &&
485 !(dev->horkage & fe->param.horkage_off))
486 continue;
487
488 dev->horkage |= fe->param.horkage_on;
489 dev->horkage &= ~fe->param.horkage_off;
490
491 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
492 fe->param.name);
493 }
494}
495
496
497
498
499
500
501
502
503
504
505
506
507
508int atapi_cmd_type(u8 opcode)
509{
510 switch (opcode) {
511 case GPCMD_READ_10:
512 case GPCMD_READ_12:
513 return ATAPI_READ;
514
515 case GPCMD_WRITE_10:
516 case GPCMD_WRITE_12:
517 case GPCMD_WRITE_AND_VERIFY_10:
518 return ATAPI_WRITE;
519
520 case GPCMD_READ_CD:
521 case GPCMD_READ_CD_MSF:
522 return ATAPI_READ_CD;
523
524 case ATA_16:
525 case ATA_12:
526 if (atapi_passthru16)
527 return ATAPI_PASS_THRU;
528
529 default:
530 return ATAPI_MISC;
531 }
532}
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
548{
549 fis[0] = 0x27;
550 fis[1] = pmp & 0xf;
551 if (is_cmd)
552 fis[1] |= (1 << 7);
553
554 fis[2] = tf->command;
555 fis[3] = tf->feature;
556
557 fis[4] = tf->lbal;
558 fis[5] = tf->lbam;
559 fis[6] = tf->lbah;
560 fis[7] = tf->device;
561
562 fis[8] = tf->hob_lbal;
563 fis[9] = tf->hob_lbam;
564 fis[10] = tf->hob_lbah;
565 fis[11] = tf->hob_feature;
566
567 fis[12] = tf->nsect;
568 fis[13] = tf->hob_nsect;
569 fis[14] = 0;
570 fis[15] = tf->ctl;
571
572 fis[16] = tf->auxiliary & 0xff;
573 fis[17] = (tf->auxiliary >> 8) & 0xff;
574 fis[18] = (tf->auxiliary >> 16) & 0xff;
575 fis[19] = (tf->auxiliary >> 24) & 0xff;
576}
577
578
579
580
581
582
583
584
585
586
587
588
589void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
590{
591 tf->command = fis[2];
592 tf->feature = fis[3];
593
594 tf->lbal = fis[4];
595 tf->lbam = fis[5];
596 tf->lbah = fis[6];
597 tf->device = fis[7];
598
599 tf->hob_lbal = fis[8];
600 tf->hob_lbam = fis[9];
601 tf->hob_lbah = fis[10];
602
603 tf->nsect = fis[12];
604 tf->hob_nsect = fis[13];
605}
606
607static const u8 ata_rw_cmds[] = {
608
609 ATA_CMD_READ_MULTI,
610 ATA_CMD_WRITE_MULTI,
611 ATA_CMD_READ_MULTI_EXT,
612 ATA_CMD_WRITE_MULTI_EXT,
613 0,
614 0,
615 0,
616 ATA_CMD_WRITE_MULTI_FUA_EXT,
617
618 ATA_CMD_PIO_READ,
619 ATA_CMD_PIO_WRITE,
620 ATA_CMD_PIO_READ_EXT,
621 ATA_CMD_PIO_WRITE_EXT,
622 0,
623 0,
624 0,
625 0,
626
627 ATA_CMD_READ,
628 ATA_CMD_WRITE,
629 ATA_CMD_READ_EXT,
630 ATA_CMD_WRITE_EXT,
631 0,
632 0,
633 0,
634 ATA_CMD_WRITE_FUA_EXT
635};
636
637
638
639
640
641
642
643
644
645
646
647
648static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
649{
650 u8 cmd;
651
652 int index, fua, lba48, write;
653
654 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
655 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
656 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
657
658 if (dev->flags & ATA_DFLAG_PIO) {
659 tf->protocol = ATA_PROT_PIO;
660 index = dev->multi_count ? 0 : 8;
661 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
662
663 tf->protocol = ATA_PROT_PIO;
664 index = dev->multi_count ? 0 : 8;
665 } else {
666 tf->protocol = ATA_PROT_DMA;
667 index = 16;
668 }
669
670 cmd = ata_rw_cmds[index + fua + lba48 + write];
671 if (cmd) {
672 tf->command = cmd;
673 return 0;
674 }
675 return -1;
676}
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
694{
695 u64 block = 0;
696
697 if (tf->flags & ATA_TFLAG_LBA) {
698 if (tf->flags & ATA_TFLAG_LBA48) {
699 block |= (u64)tf->hob_lbah << 40;
700 block |= (u64)tf->hob_lbam << 32;
701 block |= (u64)tf->hob_lbal << 24;
702 } else
703 block |= (tf->device & 0xf) << 24;
704
705 block |= tf->lbah << 16;
706 block |= tf->lbam << 8;
707 block |= tf->lbal;
708 } else {
709 u32 cyl, head, sect;
710
711 cyl = tf->lbam | (tf->lbah << 8);
712 head = tf->device & 0xf;
713 sect = tf->lbal;
714
715 if (!sect) {
716 ata_dev_warn(dev,
717 "device reported invalid CHS sector 0\n");
718 sect = 1;
719 }
720
721 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
722 }
723
724 return block;
725}
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
748 u64 block, u32 n_block, unsigned int tf_flags,
749 unsigned int tag)
750{
751 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
752 tf->flags |= tf_flags;
753
754 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
755
756 if (!lba_48_ok(block, n_block))
757 return -ERANGE;
758
759 tf->protocol = ATA_PROT_NCQ;
760 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
761
762 if (tf->flags & ATA_TFLAG_WRITE)
763 tf->command = ATA_CMD_FPDMA_WRITE;
764 else
765 tf->command = ATA_CMD_FPDMA_READ;
766
767 tf->nsect = tag << 3;
768 tf->hob_feature = (n_block >> 8) & 0xff;
769 tf->feature = n_block & 0xff;
770
771 tf->hob_lbah = (block >> 40) & 0xff;
772 tf->hob_lbam = (block >> 32) & 0xff;
773 tf->hob_lbal = (block >> 24) & 0xff;
774 tf->lbah = (block >> 16) & 0xff;
775 tf->lbam = (block >> 8) & 0xff;
776 tf->lbal = block & 0xff;
777
778 tf->device = ATA_LBA;
779 if (tf->flags & ATA_TFLAG_FUA)
780 tf->device |= 1 << 7;
781 } else if (dev->flags & ATA_DFLAG_LBA) {
782 tf->flags |= ATA_TFLAG_LBA;
783
784 if (lba_28_ok(block, n_block)) {
785
786 tf->device |= (block >> 24) & 0xf;
787 } else if (lba_48_ok(block, n_block)) {
788 if (!(dev->flags & ATA_DFLAG_LBA48))
789 return -ERANGE;
790
791
792 tf->flags |= ATA_TFLAG_LBA48;
793
794 tf->hob_nsect = (n_block >> 8) & 0xff;
795
796 tf->hob_lbah = (block >> 40) & 0xff;
797 tf->hob_lbam = (block >> 32) & 0xff;
798 tf->hob_lbal = (block >> 24) & 0xff;
799 } else
800
801 return -ERANGE;
802
803 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
804 return -EINVAL;
805
806 tf->nsect = n_block & 0xff;
807
808 tf->lbah = (block >> 16) & 0xff;
809 tf->lbam = (block >> 8) & 0xff;
810 tf->lbal = block & 0xff;
811
812 tf->device |= ATA_LBA;
813 } else {
814
815 u32 sect, head, cyl, track;
816
817
818 if (!lba_28_ok(block, n_block))
819 return -ERANGE;
820
821 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
822 return -EINVAL;
823
824
825 track = (u32)block / dev->sectors;
826 cyl = track / dev->heads;
827 head = track % dev->heads;
828 sect = (u32)block % dev->sectors + 1;
829
830 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
831 (u32)block, track, cyl, head, sect);
832
833
834
835
836
837 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
838 return -ERANGE;
839
840 tf->nsect = n_block & 0xff;
841 tf->lbal = sect;
842 tf->lbam = cyl;
843 tf->lbah = cyl >> 8;
844 tf->device |= head;
845 }
846
847 return 0;
848}
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865unsigned long ata_pack_xfermask(unsigned long pio_mask,
866 unsigned long mwdma_mask,
867 unsigned long udma_mask)
868{
869 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
870 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
871 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
872}
873
874
875
876
877
878
879
880
881
882
883
884void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
885 unsigned long *mwdma_mask, unsigned long *udma_mask)
886{
887 if (pio_mask)
888 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
889 if (mwdma_mask)
890 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
891 if (udma_mask)
892 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
893}
894
895static const struct ata_xfer_ent {
896 int shift, bits;
897 u8 base;
898} ata_xfer_tbl[] = {
899 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
900 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
901 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
902 { -1, },
903};
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918u8 ata_xfer_mask2mode(unsigned long xfer_mask)
919{
920 int highbit = fls(xfer_mask) - 1;
921 const struct ata_xfer_ent *ent;
922
923 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
924 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
925 return ent->base + highbit - ent->shift;
926 return 0xff;
927}
928
929
930
931
932
933
934
935
936
937
938
939
940
941unsigned long ata_xfer_mode2mask(u8 xfer_mode)
942{
943 const struct ata_xfer_ent *ent;
944
945 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
946 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
947 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
948 & ~((1 << ent->shift) - 1);
949 return 0;
950}
951
952
953
954
955
956
957
958
959
960
961
962
963
964int ata_xfer_mode2shift(unsigned long xfer_mode)
965{
966 const struct ata_xfer_ent *ent;
967
968 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
969 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
970 return ent->shift;
971 return -1;
972}
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988const char *ata_mode_string(unsigned long xfer_mask)
989{
990 static const char * const xfer_mode_str[] = {
991 "PIO0",
992 "PIO1",
993 "PIO2",
994 "PIO3",
995 "PIO4",
996 "PIO5",
997 "PIO6",
998 "MWDMA0",
999 "MWDMA1",
1000 "MWDMA2",
1001 "MWDMA3",
1002 "MWDMA4",
1003 "UDMA/16",
1004 "UDMA/25",
1005 "UDMA/33",
1006 "UDMA/44",
1007 "UDMA/66",
1008 "UDMA/100",
1009 "UDMA/133",
1010 "UDMA7",
1011 };
1012 int highbit;
1013
1014 highbit = fls(xfer_mask) - 1;
1015 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1016 return xfer_mode_str[highbit];
1017 return "<n/a>";
1018}
1019
1020const char *sata_spd_string(unsigned int spd)
1021{
1022 static const char * const spd_str[] = {
1023 "1.5 Gbps",
1024 "3.0 Gbps",
1025 "6.0 Gbps",
1026 };
1027
1028 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1029 return "<unknown>";
1030 return spd_str[spd - 1];
1031}
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1049{
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1072 DPRINTK("found ATA device by sig\n");
1073 return ATA_DEV_ATA;
1074 }
1075
1076 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1077 DPRINTK("found ATAPI device by sig\n");
1078 return ATA_DEV_ATAPI;
1079 }
1080
1081 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1082 DPRINTK("found PMP device by sig\n");
1083 return ATA_DEV_PMP;
1084 }
1085
1086 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1087 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1088 return ATA_DEV_SEMB;
1089 }
1090
1091 DPRINTK("unknown device\n");
1092 return ATA_DEV_UNKNOWN;
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110void ata_id_string(const u16 *id, unsigned char *s,
1111 unsigned int ofs, unsigned int len)
1112{
1113 unsigned int c;
1114
1115 BUG_ON(len & 1);
1116
1117 while (len > 0) {
1118 c = id[ofs] >> 8;
1119 *s = c;
1120 s++;
1121
1122 c = id[ofs] & 0xff;
1123 *s = c;
1124 s++;
1125
1126 ofs++;
1127 len -= 2;
1128 }
1129}
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145void ata_id_c_string(const u16 *id, unsigned char *s,
1146 unsigned int ofs, unsigned int len)
1147{
1148 unsigned char *p;
1149
1150 ata_id_string(id, s, ofs, len - 1);
1151
1152 p = s + strnlen(s, len - 1);
1153 while (p > s && p[-1] == ' ')
1154 p--;
1155 *p = '\0';
1156}
1157
1158static u64 ata_id_n_sectors(const u16 *id)
1159{
1160 if (ata_id_has_lba(id)) {
1161 if (ata_id_has_lba48(id))
1162 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1163 else
1164 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1165 } else {
1166 if (ata_id_current_chs_valid(id))
1167 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1168 id[ATA_ID_CUR_SECTORS];
1169 else
1170 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1171 id[ATA_ID_SECTORS];
1172 }
1173}
1174
1175u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1176{
1177 u64 sectors = 0;
1178
1179 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1180 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1181 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1182 sectors |= (tf->lbah & 0xff) << 16;
1183 sectors |= (tf->lbam & 0xff) << 8;
1184 sectors |= (tf->lbal & 0xff);
1185
1186 return sectors;
1187}
1188
1189u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1190{
1191 u64 sectors = 0;
1192
1193 sectors |= (tf->device & 0x0f) << 24;
1194 sectors |= (tf->lbah & 0xff) << 16;
1195 sectors |= (tf->lbam & 0xff) << 8;
1196 sectors |= (tf->lbal & 0xff);
1197
1198 return sectors;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1214{
1215 unsigned int err_mask;
1216 struct ata_taskfile tf;
1217 int lba48 = ata_id_has_lba48(dev->id);
1218
1219 ata_tf_init(dev, &tf);
1220
1221
1222 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1223
1224 if (lba48) {
1225 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1226 tf.flags |= ATA_TFLAG_LBA48;
1227 } else
1228 tf.command = ATA_CMD_READ_NATIVE_MAX;
1229
1230 tf.protocol |= ATA_PROT_NODATA;
1231 tf.device |= ATA_LBA;
1232
1233 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1234 if (err_mask) {
1235 ata_dev_warn(dev,
1236 "failed to read native max address (err_mask=0x%x)\n",
1237 err_mask);
1238 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1239 return -EACCES;
1240 return -EIO;
1241 }
1242
1243 if (lba48)
1244 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1245 else
1246 *max_sectors = ata_tf_to_lba(&tf) + 1;
1247 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1248 (*max_sectors)--;
1249 return 0;
1250}
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1265{
1266 unsigned int err_mask;
1267 struct ata_taskfile tf;
1268 int lba48 = ata_id_has_lba48(dev->id);
1269
1270 new_sectors--;
1271
1272 ata_tf_init(dev, &tf);
1273
1274 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1275
1276 if (lba48) {
1277 tf.command = ATA_CMD_SET_MAX_EXT;
1278 tf.flags |= ATA_TFLAG_LBA48;
1279
1280 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1281 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1282 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1283 } else {
1284 tf.command = ATA_CMD_SET_MAX;
1285
1286 tf.device |= (new_sectors >> 24) & 0xf;
1287 }
1288
1289 tf.protocol |= ATA_PROT_NODATA;
1290 tf.device |= ATA_LBA;
1291
1292 tf.lbal = (new_sectors >> 0) & 0xff;
1293 tf.lbam = (new_sectors >> 8) & 0xff;
1294 tf.lbah = (new_sectors >> 16) & 0xff;
1295
1296 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1297 if (err_mask) {
1298 ata_dev_warn(dev,
1299 "failed to set max address (err_mask=0x%x)\n",
1300 err_mask);
1301 if (err_mask == AC_ERR_DEV &&
1302 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1303 return -EACCES;
1304 return -EIO;
1305 }
1306
1307 return 0;
1308}
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321static int ata_hpa_resize(struct ata_device *dev)
1322{
1323 struct ata_eh_context *ehc = &dev->link->eh_context;
1324 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1325 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1326 u64 sectors = ata_id_n_sectors(dev->id);
1327 u64 native_sectors;
1328 int rc;
1329
1330
1331 if (dev->class != ATA_DEV_ATA ||
1332 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1333 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1334 return 0;
1335
1336
1337 rc = ata_read_native_max_address(dev, &native_sectors);
1338 if (rc) {
1339
1340
1341
1342 if (rc == -EACCES || !unlock_hpa) {
1343 ata_dev_warn(dev,
1344 "HPA support seems broken, skipping HPA handling\n");
1345 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1346
1347
1348 if (rc == -EACCES)
1349 rc = 0;
1350 }
1351
1352 return rc;
1353 }
1354 dev->n_native_sectors = native_sectors;
1355
1356
1357 if (native_sectors <= sectors || !unlock_hpa) {
1358 if (!print_info || native_sectors == sectors)
1359 return 0;
1360
1361 if (native_sectors > sectors)
1362 ata_dev_info(dev,
1363 "HPA detected: current %llu, native %llu\n",
1364 (unsigned long long)sectors,
1365 (unsigned long long)native_sectors);
1366 else if (native_sectors < sectors)
1367 ata_dev_warn(dev,
1368 "native sectors (%llu) is smaller than sectors (%llu)\n",
1369 (unsigned long long)native_sectors,
1370 (unsigned long long)sectors);
1371 return 0;
1372 }
1373
1374
1375 rc = ata_set_max_sectors(dev, native_sectors);
1376 if (rc == -EACCES) {
1377
1378 ata_dev_warn(dev,
1379 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1380 (unsigned long long)sectors,
1381 (unsigned long long)native_sectors);
1382 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1383 return 0;
1384 } else if (rc)
1385 return rc;
1386
1387
1388 rc = ata_dev_reread_id(dev, 0);
1389 if (rc) {
1390 ata_dev_err(dev,
1391 "failed to re-read IDENTIFY data after HPA resizing\n");
1392 return rc;
1393 }
1394
1395 if (print_info) {
1396 u64 new_sectors = ata_id_n_sectors(dev->id);
1397 ata_dev_info(dev,
1398 "HPA unlocked: %llu -> %llu, native %llu\n",
1399 (unsigned long long)sectors,
1400 (unsigned long long)new_sectors,
1401 (unsigned long long)native_sectors);
1402 }
1403
1404 return 0;
1405}
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418static inline void ata_dump_id(const u16 *id)
1419{
1420 DPRINTK("49==0x%04x "
1421 "53==0x%04x "
1422 "63==0x%04x "
1423 "64==0x%04x "
1424 "75==0x%04x \n",
1425 id[49],
1426 id[53],
1427 id[63],
1428 id[64],
1429 id[75]);
1430 DPRINTK("80==0x%04x "
1431 "81==0x%04x "
1432 "82==0x%04x "
1433 "83==0x%04x "
1434 "84==0x%04x \n",
1435 id[80],
1436 id[81],
1437 id[82],
1438 id[83],
1439 id[84]);
1440 DPRINTK("88==0x%04x "
1441 "93==0x%04x\n",
1442 id[88],
1443 id[93]);
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461unsigned long ata_id_xfermask(const u16 *id)
1462{
1463 unsigned long pio_mask, mwdma_mask, udma_mask;
1464
1465
1466 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1467 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1468 pio_mask <<= 3;
1469 pio_mask |= 0x7;
1470 } else {
1471
1472
1473
1474
1475 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1476 if (mode < 5)
1477 pio_mask = (2 << mode) - 1;
1478 else
1479 pio_mask = 1;
1480
1481
1482
1483
1484
1485
1486
1487 }
1488
1489 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1490
1491 if (ata_id_is_cfa(id)) {
1492
1493
1494
1495 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1496 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1497
1498 if (pio)
1499 pio_mask |= (1 << 5);
1500 if (pio > 1)
1501 pio_mask |= (1 << 6);
1502 if (dma)
1503 mwdma_mask |= (1 << 3);
1504 if (dma > 1)
1505 mwdma_mask |= (1 << 4);
1506 }
1507
1508 udma_mask = 0;
1509 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1510 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1511
1512 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1513}
1514
1515static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1516{
1517 struct completion *waiting = qc->private_data;
1518
1519 complete(waiting);
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544unsigned ata_exec_internal_sg(struct ata_device *dev,
1545 struct ata_taskfile *tf, const u8 *cdb,
1546 int dma_dir, struct scatterlist *sgl,
1547 unsigned int n_elem, unsigned long timeout)
1548{
1549 struct ata_link *link = dev->link;
1550 struct ata_port *ap = link->ap;
1551 u8 command = tf->command;
1552 int auto_timeout = 0;
1553 struct ata_queued_cmd *qc;
1554 unsigned int tag, preempted_tag;
1555 u32 preempted_sactive, preempted_qc_active;
1556 int preempted_nr_active_links;
1557 DECLARE_COMPLETION_ONSTACK(wait);
1558 unsigned long flags;
1559 unsigned int err_mask;
1560 int rc;
1561
1562 spin_lock_irqsave(ap->lock, flags);
1563
1564
1565 if (ap->pflags & ATA_PFLAG_FROZEN) {
1566 spin_unlock_irqrestore(ap->lock, flags);
1567 return AC_ERR_SYSTEM;
1568 }
1569
1570
1571
1572
1573
1574
1575
1576
1577 if (ap->ops->error_handler)
1578 tag = ATA_TAG_INTERNAL;
1579 else
1580 tag = 0;
1581
1582 if (test_and_set_bit(tag, &ap->qc_allocated))
1583 BUG();
1584 qc = __ata_qc_from_tag(ap, tag);
1585
1586 qc->tag = tag;
1587 qc->scsicmd = NULL;
1588 qc->ap = ap;
1589 qc->dev = dev;
1590 ata_qc_reinit(qc);
1591
1592 preempted_tag = link->active_tag;
1593 preempted_sactive = link->sactive;
1594 preempted_qc_active = ap->qc_active;
1595 preempted_nr_active_links = ap->nr_active_links;
1596 link->active_tag = ATA_TAG_POISON;
1597 link->sactive = 0;
1598 ap->qc_active = 0;
1599 ap->nr_active_links = 0;
1600
1601
1602 qc->tf = *tf;
1603 if (cdb)
1604 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1605
1606
1607 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1608 dma_dir == DMA_FROM_DEVICE)
1609 qc->tf.feature |= ATAPI_DMADIR;
1610
1611 qc->flags |= ATA_QCFLAG_RESULT_TF;
1612 qc->dma_dir = dma_dir;
1613 if (dma_dir != DMA_NONE) {
1614 unsigned int i, buflen = 0;
1615 struct scatterlist *sg;
1616
1617 for_each_sg(sgl, sg, n_elem, i)
1618 buflen += sg->length;
1619
1620 ata_sg_init(qc, sgl, n_elem);
1621 qc->nbytes = buflen;
1622 }
1623
1624 qc->private_data = &wait;
1625 qc->complete_fn = ata_qc_complete_internal;
1626
1627 ata_qc_issue(qc);
1628
1629 spin_unlock_irqrestore(ap->lock, flags);
1630
1631 if (!timeout) {
1632 if (ata_probe_timeout)
1633 timeout = ata_probe_timeout * 1000;
1634 else {
1635 timeout = ata_internal_cmd_timeout(dev, command);
1636 auto_timeout = 1;
1637 }
1638 }
1639
1640 if (ap->ops->error_handler)
1641 ata_eh_release(ap);
1642
1643 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1644
1645 if (ap->ops->error_handler)
1646 ata_eh_acquire(ap);
1647
1648 ata_sff_flush_pio_task(ap);
1649
1650 if (!rc) {
1651 spin_lock_irqsave(ap->lock, flags);
1652
1653
1654
1655
1656
1657
1658 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1659 qc->err_mask |= AC_ERR_TIMEOUT;
1660
1661 if (ap->ops->error_handler)
1662 ata_port_freeze(ap);
1663 else
1664 ata_qc_complete(qc);
1665
1666 if (ata_msg_warn(ap))
1667 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1668 command);
1669 }
1670
1671 spin_unlock_irqrestore(ap->lock, flags);
1672 }
1673
1674
1675 if (ap->ops->post_internal_cmd)
1676 ap->ops->post_internal_cmd(qc);
1677
1678
1679 if (qc->flags & ATA_QCFLAG_FAILED) {
1680 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1681 qc->err_mask |= AC_ERR_DEV;
1682
1683 if (!qc->err_mask)
1684 qc->err_mask |= AC_ERR_OTHER;
1685
1686 if (qc->err_mask & ~AC_ERR_OTHER)
1687 qc->err_mask &= ~AC_ERR_OTHER;
1688 }
1689
1690
1691 spin_lock_irqsave(ap->lock, flags);
1692
1693 *tf = qc->result_tf;
1694 err_mask = qc->err_mask;
1695
1696 ata_qc_free(qc);
1697 link->active_tag = preempted_tag;
1698 link->sactive = preempted_sactive;
1699 ap->qc_active = preempted_qc_active;
1700 ap->nr_active_links = preempted_nr_active_links;
1701
1702 spin_unlock_irqrestore(ap->lock, flags);
1703
1704 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1705 ata_internal_cmd_timed_out(dev, command);
1706
1707 return err_mask;
1708}
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729unsigned ata_exec_internal(struct ata_device *dev,
1730 struct ata_taskfile *tf, const u8 *cdb,
1731 int dma_dir, void *buf, unsigned int buflen,
1732 unsigned long timeout)
1733{
1734 struct scatterlist *psg = NULL, sg;
1735 unsigned int n_elem = 0;
1736
1737 if (dma_dir != DMA_NONE) {
1738 WARN_ON(!buf);
1739 sg_init_one(&sg, buf, buflen);
1740 psg = &sg;
1741 n_elem++;
1742 }
1743
1744 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1745 timeout);
1746}
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1763{
1764 struct ata_taskfile tf;
1765
1766 ata_tf_init(dev, &tf);
1767
1768 tf.command = cmd;
1769 tf.flags |= ATA_TFLAG_DEVICE;
1770 tf.protocol = ATA_PROT_NODATA;
1771
1772 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1783{
1784
1785
1786
1787
1788 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1789 return 0;
1790
1791
1792
1793 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1794 return 0;
1795
1796 if (ata_id_is_cfa(adev->id)
1797 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1798 return 0;
1799
1800 if (adev->pio_mode > XFER_PIO_2)
1801 return 1;
1802
1803 if (ata_id_has_iordy(adev->id))
1804 return 1;
1805 return 0;
1806}
1807
1808
1809
1810
1811
1812
1813
1814
1815static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1816{
1817
1818 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
1819 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1820
1821 if (pio) {
1822
1823 if (pio > 240)
1824 return 3 << ATA_SHIFT_PIO;
1825 return 7 << ATA_SHIFT_PIO;
1826 }
1827 }
1828 return 3 << ATA_SHIFT_PIO;
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841unsigned int ata_do_dev_read_id(struct ata_device *dev,
1842 struct ata_taskfile *tf, u16 *id)
1843{
1844 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1845 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1846}
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1870 unsigned int flags, u16 *id)
1871{
1872 struct ata_port *ap = dev->link->ap;
1873 unsigned int class = *p_class;
1874 struct ata_taskfile tf;
1875 unsigned int err_mask = 0;
1876 const char *reason;
1877 bool is_semb = class == ATA_DEV_SEMB;
1878 int may_fallback = 1, tried_spinup = 0;
1879 int rc;
1880
1881 if (ata_msg_ctl(ap))
1882 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1883
1884retry:
1885 ata_tf_init(dev, &tf);
1886
1887 switch (class) {
1888 case ATA_DEV_SEMB:
1889 class = ATA_DEV_ATA;
1890 case ATA_DEV_ATA:
1891 tf.command = ATA_CMD_ID_ATA;
1892 break;
1893 case ATA_DEV_ATAPI:
1894 tf.command = ATA_CMD_ID_ATAPI;
1895 break;
1896 default:
1897 rc = -ENODEV;
1898 reason = "unsupported class";
1899 goto err_out;
1900 }
1901
1902 tf.protocol = ATA_PROT_PIO;
1903
1904
1905
1906
1907 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1908
1909
1910
1911
1912 tf.flags |= ATA_TFLAG_POLLING;
1913
1914 if (ap->ops->read_id)
1915 err_mask = ap->ops->read_id(dev, &tf, id);
1916 else
1917 err_mask = ata_do_dev_read_id(dev, &tf, id);
1918
1919 if (err_mask) {
1920 if (err_mask & AC_ERR_NODEV_HINT) {
1921 ata_dev_dbg(dev, "NODEV after polling detection\n");
1922 return -ENOENT;
1923 }
1924
1925 if (is_semb) {
1926 ata_dev_info(dev,
1927 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1928
1929 *p_class = ATA_DEV_SEMB_UNSUP;
1930 return 0;
1931 }
1932
1933 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1934
1935
1936
1937
1938
1939 if (may_fallback) {
1940 may_fallback = 0;
1941
1942 if (class == ATA_DEV_ATA)
1943 class = ATA_DEV_ATAPI;
1944 else
1945 class = ATA_DEV_ATA;
1946 goto retry;
1947 }
1948
1949
1950
1951
1952
1953 ata_dev_dbg(dev,
1954 "both IDENTIFYs aborted, assuming NODEV\n");
1955 return -ENOENT;
1956 }
1957
1958 rc = -EIO;
1959 reason = "I/O error";
1960 goto err_out;
1961 }
1962
1963 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1964 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1965 "class=%d may_fallback=%d tried_spinup=%d\n",
1966 class, may_fallback, tried_spinup);
1967 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1968 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1969 }
1970
1971
1972
1973
1974 may_fallback = 0;
1975
1976 swap_buf_le16(id, ATA_ID_WORDS);
1977
1978
1979 rc = -EINVAL;
1980 reason = "device reports invalid type";
1981
1982 if (class == ATA_DEV_ATA) {
1983 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1984 goto err_out;
1985 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1986 ata_id_is_ata(id)) {
1987 ata_dev_dbg(dev,
1988 "host indicates ignore ATA devices, ignored\n");
1989 return -ENOENT;
1990 }
1991 } else {
1992 if (ata_id_is_ata(id))
1993 goto err_out;
1994 }
1995
1996 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1997 tried_spinup = 1;
1998
1999
2000
2001
2002
2003 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2004 if (err_mask && id[2] != 0x738c) {
2005 rc = -EIO;
2006 reason = "SPINUP failed";
2007 goto err_out;
2008 }
2009
2010
2011
2012
2013 if (id[2] == 0x37c8)
2014 goto retry;
2015 }
2016
2017 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2030 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2031 if (err_mask) {
2032 rc = -EIO;
2033 reason = "INIT_DEV_PARAMS failed";
2034 goto err_out;
2035 }
2036
2037
2038
2039
2040 flags &= ~ATA_READID_POSTRESET;
2041 goto retry;
2042 }
2043 }
2044
2045 *p_class = class;
2046
2047 return 0;
2048
2049 err_out:
2050 if (ata_msg_warn(ap))
2051 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2052 reason, err_mask);
2053 return rc;
2054}
2055
2056static int ata_do_link_spd_horkage(struct ata_device *dev)
2057{
2058 struct ata_link *plink = ata_dev_phys_link(dev);
2059 u32 target, target_limit;
2060
2061 if (!sata_scr_valid(plink))
2062 return 0;
2063
2064 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2065 target = 1;
2066 else
2067 return 0;
2068
2069 target_limit = (1 << target) - 1;
2070
2071
2072 if (plink->sata_spd_limit <= target_limit)
2073 return 0;
2074
2075 plink->sata_spd_limit = target_limit;
2076
2077
2078
2079
2080
2081 if (plink->sata_spd > target) {
2082 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2083 sata_spd_string(target));
2084 return -EAGAIN;
2085 }
2086 return 0;
2087}
2088
2089static inline u8 ata_dev_knobble(struct ata_device *dev)
2090{
2091 struct ata_port *ap = dev->link->ap;
2092
2093 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2094 return 0;
2095
2096 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2097}
2098
2099static int ata_dev_config_ncq(struct ata_device *dev,
2100 char *desc, size_t desc_sz)
2101{
2102 struct ata_port *ap = dev->link->ap;
2103 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2104 unsigned int err_mask;
2105 char *aa_desc = "";
2106
2107 if (!ata_id_has_ncq(dev->id)) {
2108 desc[0] = '\0';
2109 return 0;
2110 }
2111 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2112 snprintf(desc, desc_sz, "NCQ (not used)");
2113 return 0;
2114 }
2115 if (ap->flags & ATA_FLAG_NCQ) {
2116 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2117 dev->flags |= ATA_DFLAG_NCQ;
2118 }
2119
2120 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2121 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2122 ata_id_has_fpdma_aa(dev->id)) {
2123 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2124 SATA_FPDMA_AA);
2125 if (err_mask) {
2126 ata_dev_err(dev,
2127 "failed to enable AA (error_mask=0x%x)\n",
2128 err_mask);
2129 if (err_mask != AC_ERR_DEV) {
2130 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2131 return -EIO;
2132 }
2133 } else
2134 aa_desc = ", AA";
2135 }
2136
2137 if (hdepth >= ddepth)
2138 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2139 else
2140 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2141 ddepth, aa_desc);
2142
2143 if ((ap->flags & ATA_FLAG_FPDMA_AUX) &&
2144 ata_id_has_ncq_send_and_recv(dev->id)) {
2145 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2146 0, ap->sector_buf, 1);
2147 if (err_mask) {
2148 ata_dev_dbg(dev,
2149 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2150 err_mask);
2151 } else {
2152 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2153 memcpy(dev->ncq_send_recv_cmds, ap->sector_buf,
2154 ATA_LOG_NCQ_SEND_RECV_SIZE);
2155 }
2156 }
2157
2158 return 0;
2159}
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174int ata_dev_configure(struct ata_device *dev)
2175{
2176 struct ata_port *ap = dev->link->ap;
2177 struct ata_eh_context *ehc = &dev->link->eh_context;
2178 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2179 const u16 *id = dev->id;
2180 unsigned long xfer_mask;
2181 unsigned int err_mask;
2182 char revbuf[7];
2183 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2184 char modelbuf[ATA_ID_PROD_LEN+1];
2185 int rc;
2186
2187 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2188 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2189 return 0;
2190 }
2191
2192 if (ata_msg_probe(ap))
2193 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2194
2195
2196 dev->horkage |= ata_dev_blacklisted(dev);
2197 ata_force_horkage(dev);
2198
2199 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2200 ata_dev_info(dev, "unsupported device, disabling\n");
2201 ata_dev_disable(dev);
2202 return 0;
2203 }
2204
2205 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2206 dev->class == ATA_DEV_ATAPI) {
2207 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2208 atapi_enabled ? "not supported with this driver"
2209 : "disabled");
2210 ata_dev_disable(dev);
2211 return 0;
2212 }
2213
2214 rc = ata_do_link_spd_horkage(dev);
2215 if (rc)
2216 return rc;
2217
2218
2219 rc = ata_acpi_on_devcfg(dev);
2220 if (rc)
2221 return rc;
2222
2223
2224 rc = ata_hpa_resize(dev);
2225 if (rc)
2226 return rc;
2227
2228
2229 if (ata_msg_probe(ap))
2230 ata_dev_dbg(dev,
2231 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2232 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2233 __func__,
2234 id[49], id[82], id[83], id[84],
2235 id[85], id[86], id[87], id[88]);
2236
2237
2238 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2239 dev->max_sectors = 0;
2240 dev->cdb_len = 0;
2241 dev->n_sectors = 0;
2242 dev->cylinders = 0;
2243 dev->heads = 0;
2244 dev->sectors = 0;
2245 dev->multi_count = 0;
2246
2247
2248
2249
2250
2251
2252 xfer_mask = ata_id_xfermask(id);
2253
2254 if (ata_msg_probe(ap))
2255 ata_dump_id(id);
2256
2257
2258 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2259 sizeof(fwrevbuf));
2260
2261 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2262 sizeof(modelbuf));
2263
2264
2265 if (dev->class == ATA_DEV_ATA) {
2266 if (ata_id_is_cfa(id)) {
2267
2268 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2269 ata_dev_warn(dev,
2270 "supports DRM functions and may not be fully accessible\n");
2271 snprintf(revbuf, 7, "CFA");
2272 } else {
2273 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2274
2275 if (ata_id_has_tpm(id))
2276 ata_dev_warn(dev,
2277 "supports DRM functions and may not be fully accessible\n");
2278 }
2279
2280 dev->n_sectors = ata_id_n_sectors(id);
2281
2282
2283 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2284 unsigned int max = dev->id[47] & 0xff;
2285 unsigned int cnt = dev->id[59] & 0xff;
2286
2287 if (is_power_of_2(max) && is_power_of_2(cnt))
2288 if (cnt <= max)
2289 dev->multi_count = cnt;
2290 }
2291
2292 if (ata_id_has_lba(id)) {
2293 const char *lba_desc;
2294 char ncq_desc[24];
2295
2296 lba_desc = "LBA";
2297 dev->flags |= ATA_DFLAG_LBA;
2298 if (ata_id_has_lba48(id)) {
2299 dev->flags |= ATA_DFLAG_LBA48;
2300 lba_desc = "LBA48";
2301
2302 if (dev->n_sectors >= (1UL << 28) &&
2303 ata_id_has_flush_ext(id))
2304 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2305 }
2306
2307
2308 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2309 if (rc)
2310 return rc;
2311
2312
2313 if (ata_msg_drv(ap) && print_info) {
2314 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2315 revbuf, modelbuf, fwrevbuf,
2316 ata_mode_string(xfer_mask));
2317 ata_dev_info(dev,
2318 "%llu sectors, multi %u: %s %s\n",
2319 (unsigned long long)dev->n_sectors,
2320 dev->multi_count, lba_desc, ncq_desc);
2321 }
2322 } else {
2323
2324
2325
2326 dev->cylinders = id[1];
2327 dev->heads = id[3];
2328 dev->sectors = id[6];
2329
2330 if (ata_id_current_chs_valid(id)) {
2331
2332 dev->cylinders = id[54];
2333 dev->heads = id[55];
2334 dev->sectors = id[56];
2335 }
2336
2337
2338 if (ata_msg_drv(ap) && print_info) {
2339 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2340 revbuf, modelbuf, fwrevbuf,
2341 ata_mode_string(xfer_mask));
2342 ata_dev_info(dev,
2343 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2344 (unsigned long long)dev->n_sectors,
2345 dev->multi_count, dev->cylinders,
2346 dev->heads, dev->sectors);
2347 }
2348 }
2349
2350
2351
2352
2353 if (ata_id_has_devslp(dev->id)) {
2354 u8 *sata_setting = ap->sector_buf;
2355 int i, j;
2356
2357 dev->flags |= ATA_DFLAG_DEVSLP;
2358 err_mask = ata_read_log_page(dev,
2359 ATA_LOG_SATA_ID_DEV_DATA,
2360 ATA_LOG_SATA_SETTINGS,
2361 sata_setting,
2362 1);
2363 if (err_mask)
2364 ata_dev_dbg(dev,
2365 "failed to get Identify Device Data, Emask 0x%x\n",
2366 err_mask);
2367 else
2368 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2369 j = ATA_LOG_DEVSLP_OFFSET + i;
2370 dev->devslp_timing[i] = sata_setting[j];
2371 }
2372 }
2373
2374 dev->cdb_len = 16;
2375 }
2376
2377
2378 else if (dev->class == ATA_DEV_ATAPI) {
2379 const char *cdb_intr_string = "";
2380 const char *atapi_an_string = "";
2381 const char *dma_dir_string = "";
2382 u32 sntf;
2383
2384 rc = atapi_cdb_len(id);
2385 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2386 if (ata_msg_warn(ap))
2387 ata_dev_warn(dev, "unsupported CDB len\n");
2388 rc = -EINVAL;
2389 goto err_out_nosup;
2390 }
2391 dev->cdb_len = (unsigned int) rc;
2392
2393
2394
2395
2396
2397
2398 if (atapi_an &&
2399 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2400 (!sata_pmp_attached(ap) ||
2401 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2402
2403 err_mask = ata_dev_set_feature(dev,
2404 SETFEATURES_SATA_ENABLE, SATA_AN);
2405 if (err_mask)
2406 ata_dev_err(dev,
2407 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2408 err_mask);
2409 else {
2410 dev->flags |= ATA_DFLAG_AN;
2411 atapi_an_string = ", ATAPI AN";
2412 }
2413 }
2414
2415 if (ata_id_cdb_intr(dev->id)) {
2416 dev->flags |= ATA_DFLAG_CDB_INTR;
2417 cdb_intr_string = ", CDB intr";
2418 }
2419
2420 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2421 dev->flags |= ATA_DFLAG_DMADIR;
2422 dma_dir_string = ", DMADIR";
2423 }
2424
2425 if (ata_id_has_da(dev->id)) {
2426 dev->flags |= ATA_DFLAG_DA;
2427 zpodd_init(dev);
2428 }
2429
2430
2431 if (ata_msg_drv(ap) && print_info)
2432 ata_dev_info(dev,
2433 "ATAPI: %s, %s, max %s%s%s%s\n",
2434 modelbuf, fwrevbuf,
2435 ata_mode_string(xfer_mask),
2436 cdb_intr_string, atapi_an_string,
2437 dma_dir_string);
2438 }
2439
2440
2441 dev->max_sectors = ATA_MAX_SECTORS;
2442 if (dev->flags & ATA_DFLAG_LBA48)
2443 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2444
2445
2446
2447 if (ata_dev_knobble(dev)) {
2448 if (ata_msg_drv(ap) && print_info)
2449 ata_dev_info(dev, "applying bridge limits\n");
2450 dev->udma_mask &= ATA_UDMA5;
2451 dev->max_sectors = ATA_MAX_SECTORS;
2452 }
2453
2454 if ((dev->class == ATA_DEV_ATAPI) &&
2455 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2456 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2457 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2458 }
2459
2460 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2461 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2462 dev->max_sectors);
2463
2464 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2465 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2466
2467 if (ap->ops->dev_config)
2468 ap->ops->dev_config(dev);
2469
2470 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2471
2472
2473
2474
2475
2476
2477 if (print_info) {
2478 ata_dev_warn(dev,
2479"Drive reports diagnostics failure. This may indicate a drive\n");
2480 ata_dev_warn(dev,
2481"fault or invalid emulation. Contact drive vendor for information.\n");
2482 }
2483 }
2484
2485 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2486 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2487 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2488 }
2489
2490 return 0;
2491
2492err_out_nosup:
2493 if (ata_msg_probe(ap))
2494 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2495 return rc;
2496}
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506int ata_cable_40wire(struct ata_port *ap)
2507{
2508 return ATA_CBL_PATA40;
2509}
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519int ata_cable_80wire(struct ata_port *ap)
2520{
2521 return ATA_CBL_PATA80;
2522}
2523
2524
2525
2526
2527
2528
2529
2530
2531int ata_cable_unknown(struct ata_port *ap)
2532{
2533 return ATA_CBL_PATA_UNK;
2534}
2535
2536
2537
2538
2539
2540
2541
2542
2543int ata_cable_ignore(struct ata_port *ap)
2544{
2545 return ATA_CBL_PATA_IGN;
2546}
2547
2548
2549
2550
2551
2552
2553
2554
2555int ata_cable_sata(struct ata_port *ap)
2556{
2557 return ATA_CBL_SATA;
2558}
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575int ata_bus_probe(struct ata_port *ap)
2576{
2577 unsigned int classes[ATA_MAX_DEVICES];
2578 int tries[ATA_MAX_DEVICES];
2579 int rc;
2580 struct ata_device *dev;
2581
2582 ata_for_each_dev(dev, &ap->link, ALL)
2583 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2584
2585 retry:
2586 ata_for_each_dev(dev, &ap->link, ALL) {
2587
2588
2589
2590
2591
2592
2593
2594 dev->pio_mode = XFER_PIO_0;
2595 dev->dma_mode = 0xff;
2596
2597
2598
2599
2600
2601
2602 if (ap->ops->set_piomode)
2603 ap->ops->set_piomode(ap, dev);
2604 }
2605
2606
2607 ap->ops->phy_reset(ap);
2608
2609 ata_for_each_dev(dev, &ap->link, ALL) {
2610 if (dev->class != ATA_DEV_UNKNOWN)
2611 classes[dev->devno] = dev->class;
2612 else
2613 classes[dev->devno] = ATA_DEV_NONE;
2614
2615 dev->class = ATA_DEV_UNKNOWN;
2616 }
2617
2618
2619
2620
2621
2622 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2623 if (tries[dev->devno])
2624 dev->class = classes[dev->devno];
2625
2626 if (!ata_dev_enabled(dev))
2627 continue;
2628
2629 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2630 dev->id);
2631 if (rc)
2632 goto fail;
2633 }
2634
2635
2636 if (ap->ops->cable_detect)
2637 ap->cbl = ap->ops->cable_detect(ap);
2638
2639
2640
2641
2642
2643
2644 ata_for_each_dev(dev, &ap->link, ENABLED)
2645 if (ata_id_is_sata(dev->id))
2646 ap->cbl = ATA_CBL_SATA;
2647
2648
2649
2650
2651 ata_for_each_dev(dev, &ap->link, ENABLED) {
2652 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2653 rc = ata_dev_configure(dev);
2654 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2655 if (rc)
2656 goto fail;
2657 }
2658
2659
2660 rc = ata_set_mode(&ap->link, &dev);
2661 if (rc)
2662 goto fail;
2663
2664 ata_for_each_dev(dev, &ap->link, ENABLED)
2665 return 0;
2666
2667 return -ENODEV;
2668
2669 fail:
2670 tries[dev->devno]--;
2671
2672 switch (rc) {
2673 case -EINVAL:
2674
2675 tries[dev->devno] = 0;
2676 break;
2677
2678 case -ENODEV:
2679
2680 tries[dev->devno] = min(tries[dev->devno], 1);
2681 case -EIO:
2682 if (tries[dev->devno] == 1) {
2683
2684
2685
2686 sata_down_spd_limit(&ap->link, 0);
2687 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2688 }
2689 }
2690
2691 if (!tries[dev->devno])
2692 ata_dev_disable(dev);
2693
2694 goto retry;
2695}
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706static void sata_print_link_status(struct ata_link *link)
2707{
2708 u32 sstatus, scontrol, tmp;
2709
2710 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2711 return;
2712 sata_scr_read(link, SCR_CONTROL, &scontrol);
2713
2714 if (ata_phys_link_online(link)) {
2715 tmp = (sstatus >> 4) & 0xf;
2716 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2717 sata_spd_string(tmp), sstatus, scontrol);
2718 } else {
2719 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2720 sstatus, scontrol);
2721 }
2722}
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732struct ata_device *ata_dev_pair(struct ata_device *adev)
2733{
2734 struct ata_link *link = adev->link;
2735 struct ata_device *pair = &link->device[1 - adev->devno];
2736 if (!ata_dev_enabled(pair))
2737 return NULL;
2738 return pair;
2739}
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2762{
2763 u32 sstatus, spd, mask;
2764 int rc, bit;
2765
2766 if (!sata_scr_valid(link))
2767 return -EOPNOTSUPP;
2768
2769
2770
2771
2772 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2773 if (rc == 0 && ata_sstatus_online(sstatus))
2774 spd = (sstatus >> 4) & 0xf;
2775 else
2776 spd = link->sata_spd;
2777
2778 mask = link->sata_spd_limit;
2779 if (mask <= 1)
2780 return -EINVAL;
2781
2782
2783 bit = fls(mask) - 1;
2784 mask &= ~(1 << bit);
2785
2786
2787
2788
2789 if (spd > 1)
2790 mask &= (1 << (spd - 1)) - 1;
2791 else
2792 mask &= 1;
2793
2794
2795 if (!mask)
2796 return -EINVAL;
2797
2798 if (spd_limit) {
2799 if (mask & ((1 << spd_limit) - 1))
2800 mask &= (1 << spd_limit) - 1;
2801 else {
2802 bit = ffs(mask) - 1;
2803 mask = 1 << bit;
2804 }
2805 }
2806
2807 link->sata_spd_limit = mask;
2808
2809 ata_link_warn(link, "limiting SATA link speed to %s\n",
2810 sata_spd_string(fls(mask)));
2811
2812 return 0;
2813}
2814
2815static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2816{
2817 struct ata_link *host_link = &link->ap->link;
2818 u32 limit, target, spd;
2819
2820 limit = link->sata_spd_limit;
2821
2822
2823
2824
2825
2826 if (!ata_is_host_link(link) && host_link->sata_spd)
2827 limit &= (1 << host_link->sata_spd) - 1;
2828
2829 if (limit == UINT_MAX)
2830 target = 0;
2831 else
2832 target = fls(limit);
2833
2834 spd = (*scontrol >> 4) & 0xf;
2835 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2836
2837 return spd != target;
2838}
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855static int sata_set_spd_needed(struct ata_link *link)
2856{
2857 u32 scontrol;
2858
2859 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2860 return 1;
2861
2862 return __sata_set_spd_needed(link, &scontrol);
2863}
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878int sata_set_spd(struct ata_link *link)
2879{
2880 u32 scontrol;
2881 int rc;
2882
2883 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2884 return rc;
2885
2886 if (!__sata_set_spd_needed(link, &scontrol))
2887 return 0;
2888
2889 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2890 return rc;
2891
2892 return 1;
2893}
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907static const struct ata_timing ata_timing[] = {
2908
2909 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2910 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2911 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2912 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2913 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2914 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2915 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2916
2917 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2918 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2919 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2920
2921 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2922 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2923 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2924 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2925 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2926
2927
2928 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2929 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2930 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2931 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2932 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2933 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2934 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
2935
2936 { 0xFF }
2937};
2938
2939#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2940#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2941
2942static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2943{
2944 q->setup = EZ(t->setup * 1000, T);
2945 q->act8b = EZ(t->act8b * 1000, T);
2946 q->rec8b = EZ(t->rec8b * 1000, T);
2947 q->cyc8b = EZ(t->cyc8b * 1000, T);
2948 q->active = EZ(t->active * 1000, T);
2949 q->recover = EZ(t->recover * 1000, T);
2950 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2951 q->cycle = EZ(t->cycle * 1000, T);
2952 q->udma = EZ(t->udma * 1000, UT);
2953}
2954
2955void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2956 struct ata_timing *m, unsigned int what)
2957{
2958 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2959 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2960 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2961 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2962 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2963 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2964 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2965 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2966 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2967}
2968
2969const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2970{
2971 const struct ata_timing *t = ata_timing;
2972
2973 while (xfer_mode > t->mode)
2974 t++;
2975
2976 if (xfer_mode == t->mode)
2977 return t;
2978
2979 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
2980 __func__, xfer_mode);
2981
2982 return NULL;
2983}
2984
2985int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2986 struct ata_timing *t, int T, int UT)
2987{
2988 const u16 *id = adev->id;
2989 const struct ata_timing *s;
2990 struct ata_timing p;
2991
2992
2993
2994
2995
2996 if (!(s = ata_timing_find_mode(speed)))
2997 return -EINVAL;
2998
2999 memcpy(t, s, sizeof(*s));
3000
3001
3002
3003
3004
3005
3006 if (id[ATA_ID_FIELD_VALID] & 2) {
3007 memset(&p, 0, sizeof(p));
3008
3009 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3010 if (speed <= XFER_PIO_2)
3011 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3012 else if ((speed <= XFER_PIO_4) ||
3013 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3014 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3015 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3016 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3017
3018 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3019 }
3020
3021
3022
3023
3024
3025 ata_timing_quantize(t, t, T, UT);
3026
3027
3028
3029
3030
3031
3032
3033 if (speed > XFER_PIO_6) {
3034 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3035 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3036 }
3037
3038
3039
3040
3041
3042 if (t->act8b + t->rec8b < t->cyc8b) {
3043 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3044 t->rec8b = t->cyc8b - t->act8b;
3045 }
3046
3047 if (t->active + t->recover < t->cycle) {
3048 t->active += (t->cycle - (t->active + t->recover)) / 2;
3049 t->recover = t->cycle - t->active;
3050 }
3051
3052
3053
3054
3055 if (t->active + t->recover > t->cycle)
3056 t->cycle = t->active + t->recover;
3057
3058 return 0;
3059}
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3078{
3079 u8 base_mode = 0xff, last_mode = 0xff;
3080 const struct ata_xfer_ent *ent;
3081 const struct ata_timing *t;
3082
3083 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3084 if (ent->shift == xfer_shift)
3085 base_mode = ent->base;
3086
3087 for (t = ata_timing_find_mode(base_mode);
3088 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3089 unsigned short this_cycle;
3090
3091 switch (xfer_shift) {
3092 case ATA_SHIFT_PIO:
3093 case ATA_SHIFT_MWDMA:
3094 this_cycle = t->cycle;
3095 break;
3096 case ATA_SHIFT_UDMA:
3097 this_cycle = t->udma;
3098 break;
3099 default:
3100 return 0xff;
3101 }
3102
3103 if (cycle > this_cycle)
3104 break;
3105
3106 last_mode = t->mode;
3107 }
3108
3109 return last_mode;
3110}
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3128{
3129 char buf[32];
3130 unsigned long orig_mask, xfer_mask;
3131 unsigned long pio_mask, mwdma_mask, udma_mask;
3132 int quiet, highbit;
3133
3134 quiet = !!(sel & ATA_DNXFER_QUIET);
3135 sel &= ~ATA_DNXFER_QUIET;
3136
3137 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3138 dev->mwdma_mask,
3139 dev->udma_mask);
3140 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3141
3142 switch (sel) {
3143 case ATA_DNXFER_PIO:
3144 highbit = fls(pio_mask) - 1;
3145 pio_mask &= ~(1 << highbit);
3146 break;
3147
3148 case ATA_DNXFER_DMA:
3149 if (udma_mask) {
3150 highbit = fls(udma_mask) - 1;
3151 udma_mask &= ~(1 << highbit);
3152 if (!udma_mask)
3153 return -ENOENT;
3154 } else if (mwdma_mask) {
3155 highbit = fls(mwdma_mask) - 1;
3156 mwdma_mask &= ~(1 << highbit);
3157 if (!mwdma_mask)
3158 return -ENOENT;
3159 }
3160 break;
3161
3162 case ATA_DNXFER_40C:
3163 udma_mask &= ATA_UDMA_MASK_40C;
3164 break;
3165
3166 case ATA_DNXFER_FORCE_PIO0:
3167 pio_mask &= 1;
3168 case ATA_DNXFER_FORCE_PIO:
3169 mwdma_mask = 0;
3170 udma_mask = 0;
3171 break;
3172
3173 default:
3174 BUG();
3175 }
3176
3177 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3178
3179 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3180 return -ENOENT;
3181
3182 if (!quiet) {
3183 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3184 snprintf(buf, sizeof(buf), "%s:%s",
3185 ata_mode_string(xfer_mask),
3186 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3187 else
3188 snprintf(buf, sizeof(buf), "%s",
3189 ata_mode_string(xfer_mask));
3190
3191 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3192 }
3193
3194 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3195 &dev->udma_mask);
3196
3197 return 0;
3198}
3199
3200static int ata_dev_set_mode(struct ata_device *dev)
3201{
3202 struct ata_port *ap = dev->link->ap;
3203 struct ata_eh_context *ehc = &dev->link->eh_context;
3204 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3205 const char *dev_err_whine = "";
3206 int ign_dev_err = 0;
3207 unsigned int err_mask = 0;
3208 int rc;
3209
3210 dev->flags &= ~ATA_DFLAG_PIO;
3211 if (dev->xfer_shift == ATA_SHIFT_PIO)
3212 dev->flags |= ATA_DFLAG_PIO;
3213
3214 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3215 dev_err_whine = " (SET_XFERMODE skipped)";
3216 else {
3217 if (nosetxfer)
3218 ata_dev_warn(dev,
3219 "NOSETXFER but PATA detected - can't "
3220 "skip SETXFER, might malfunction\n");
3221 err_mask = ata_dev_set_xfermode(dev);
3222 }
3223
3224 if (err_mask & ~AC_ERR_DEV)
3225 goto fail;
3226
3227
3228 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3229 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3230 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3231 if (rc)
3232 return rc;
3233
3234 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3235
3236 if (ata_id_is_cfa(dev->id))
3237 ign_dev_err = 1;
3238
3239
3240 if (ata_id_major_version(dev->id) == 0 &&
3241 dev->pio_mode <= XFER_PIO_2)
3242 ign_dev_err = 1;
3243
3244
3245
3246 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3247 ign_dev_err = 1;
3248 }
3249
3250
3251 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3252 dev->dma_mode == XFER_MW_DMA_0 &&
3253 (dev->id[63] >> 8) & 1)
3254 ign_dev_err = 1;
3255
3256
3257 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3258 ign_dev_err = 1;
3259
3260 if (err_mask & AC_ERR_DEV) {
3261 if (!ign_dev_err)
3262 goto fail;
3263 else
3264 dev_err_whine = " (device error ignored)";
3265 }
3266
3267 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3268 dev->xfer_shift, (int)dev->xfer_mode);
3269
3270 ata_dev_info(dev, "configured for %s%s\n",
3271 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3272 dev_err_whine);
3273
3274 return 0;
3275
3276 fail:
3277 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3278 return -EIO;
3279}
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3299{
3300 struct ata_port *ap = link->ap;
3301 struct ata_device *dev;
3302 int rc = 0, used_dma = 0, found = 0;
3303
3304
3305 ata_for_each_dev(dev, link, ENABLED) {
3306 unsigned long pio_mask, dma_mask;
3307 unsigned int mode_mask;
3308
3309 mode_mask = ATA_DMA_MASK_ATA;
3310 if (dev->class == ATA_DEV_ATAPI)
3311 mode_mask = ATA_DMA_MASK_ATAPI;
3312 else if (ata_id_is_cfa(dev->id))
3313 mode_mask = ATA_DMA_MASK_CFA;
3314
3315 ata_dev_xfermask(dev);
3316 ata_force_xfermask(dev);
3317
3318 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3319
3320 if (libata_dma_mask & mode_mask)
3321 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3322 dev->udma_mask);
3323 else
3324 dma_mask = 0;
3325
3326 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3327 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3328
3329 found = 1;
3330 if (ata_dma_enabled(dev))
3331 used_dma = 1;
3332 }
3333 if (!found)
3334 goto out;
3335
3336
3337 ata_for_each_dev(dev, link, ENABLED) {
3338 if (dev->pio_mode == 0xff) {
3339 ata_dev_warn(dev, "no PIO support\n");
3340 rc = -EINVAL;
3341 goto out;
3342 }
3343
3344 dev->xfer_mode = dev->pio_mode;
3345 dev->xfer_shift = ATA_SHIFT_PIO;
3346 if (ap->ops->set_piomode)
3347 ap->ops->set_piomode(ap, dev);
3348 }
3349
3350
3351 ata_for_each_dev(dev, link, ENABLED) {
3352 if (!ata_dma_enabled(dev))
3353 continue;
3354
3355 dev->xfer_mode = dev->dma_mode;
3356 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3357 if (ap->ops->set_dmamode)
3358 ap->ops->set_dmamode(ap, dev);
3359 }
3360
3361
3362 ata_for_each_dev(dev, link, ENABLED) {
3363 rc = ata_dev_set_mode(dev);
3364 if (rc)
3365 goto out;
3366 }
3367
3368
3369
3370
3371 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3372 ap->host->simplex_claimed = ap;
3373
3374 out:
3375 if (rc)
3376 *r_failed_dev = dev;
3377 return rc;
3378}
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3401 int (*check_ready)(struct ata_link *link))
3402{
3403 unsigned long start = jiffies;
3404 unsigned long nodev_deadline;
3405 int warned = 0;
3406
3407
3408 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3409 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3410 else
3411 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3412
3413
3414
3415
3416
3417 WARN_ON(link == link->ap->slave_link);
3418
3419 if (time_after(nodev_deadline, deadline))
3420 nodev_deadline = deadline;
3421
3422 while (1) {
3423 unsigned long now = jiffies;
3424 int ready, tmp;
3425
3426 ready = tmp = check_ready(link);
3427 if (ready > 0)
3428 return 0;
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441 if (ready == -ENODEV) {
3442 if (ata_link_online(link))
3443 ready = 0;
3444 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3445 !ata_link_offline(link) &&
3446 time_before(now, nodev_deadline))
3447 ready = 0;
3448 }
3449
3450 if (ready)
3451 return ready;
3452 if (time_after(now, deadline))
3453 return -EBUSY;
3454
3455 if (!warned && time_after(now, start + 5 * HZ) &&
3456 (deadline - now > 3 * HZ)) {
3457 ata_link_warn(link,
3458 "link is slow to respond, please be patient "
3459 "(ready=%d)\n", tmp);
3460 warned = 1;
3461 }
3462
3463 ata_msleep(link->ap, 50);
3464 }
3465}
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3482 int (*check_ready)(struct ata_link *link))
3483{
3484 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3485
3486 return ata_wait_ready(link, deadline, check_ready);
3487}
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3512 unsigned long deadline)
3513{
3514 unsigned long interval = params[0];
3515 unsigned long duration = params[1];
3516 unsigned long last_jiffies, t;
3517 u32 last, cur;
3518 int rc;
3519
3520 t = ata_deadline(jiffies, params[2]);
3521 if (time_before(t, deadline))
3522 deadline = t;
3523
3524 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3525 return rc;
3526 cur &= 0xf;
3527
3528 last = cur;
3529 last_jiffies = jiffies;
3530
3531 while (1) {
3532 ata_msleep(link->ap, interval);
3533 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3534 return rc;
3535 cur &= 0xf;
3536
3537
3538 if (cur == last) {
3539 if (cur == 1 && time_before(jiffies, deadline))
3540 continue;
3541 if (time_after(jiffies,
3542 ata_deadline(last_jiffies, duration)))
3543 return 0;
3544 continue;
3545 }
3546
3547
3548 last = cur;
3549 last_jiffies = jiffies;
3550
3551
3552
3553
3554 if (time_after(jiffies, deadline))
3555 return -EPIPE;
3556 }
3557}
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573int sata_link_resume(struct ata_link *link, const unsigned long *params,
3574 unsigned long deadline)
3575{
3576 int tries = ATA_LINK_RESUME_TRIES;
3577 u32 scontrol, serror;
3578 int rc;
3579
3580 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3581 return rc;
3582
3583
3584
3585
3586
3587
3588 do {
3589 scontrol = (scontrol & 0x0f0) | 0x300;
3590 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3591 return rc;
3592
3593
3594
3595
3596
3597 ata_msleep(link->ap, 200);
3598
3599
3600 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3601 return rc;
3602 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3603
3604 if ((scontrol & 0xf0f) != 0x300) {
3605 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3606 scontrol);
3607 return 0;
3608 }
3609
3610 if (tries < ATA_LINK_RESUME_TRIES)
3611 ata_link_warn(link, "link resume succeeded after %d retries\n",
3612 ATA_LINK_RESUME_TRIES - tries);
3613
3614 if ((rc = sata_link_debounce(link, params, deadline)))
3615 return rc;
3616
3617
3618 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3619 rc = sata_scr_write(link, SCR_ERROR, serror);
3620
3621 return rc != -EINVAL ? rc : 0;
3622}
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3643 bool spm_wakeup)
3644{
3645 struct ata_eh_context *ehc = &link->eh_context;
3646 bool woken_up = false;
3647 u32 scontrol;
3648 int rc;
3649
3650 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3651 if (rc)
3652 return rc;
3653
3654 switch (policy) {
3655 case ATA_LPM_MAX_POWER:
3656
3657 scontrol |= (0x7 << 8);
3658
3659 if (spm_wakeup) {
3660 scontrol |= (0x4 << 12);
3661 woken_up = true;
3662 }
3663 break;
3664 case ATA_LPM_MED_POWER:
3665
3666 scontrol &= ~(0x1 << 8);
3667 scontrol |= (0x6 << 8);
3668 break;
3669 case ATA_LPM_MIN_POWER:
3670 if (ata_link_nr_enabled(link) > 0)
3671
3672 scontrol &= ~(0x7 << 8);
3673 else {
3674
3675 scontrol &= ~0xf;
3676 scontrol |= (0x1 << 2);
3677 }
3678 break;
3679 default:
3680 WARN_ON(1);
3681 }
3682
3683 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3684 if (rc)
3685 return rc;
3686
3687
3688 if (woken_up)
3689 msleep(10);
3690
3691
3692 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3693 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3694}
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3714{
3715 struct ata_port *ap = link->ap;
3716 struct ata_eh_context *ehc = &link->eh_context;
3717 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3718 int rc;
3719
3720
3721 if (ehc->i.action & ATA_EH_HARDRESET)
3722 return 0;
3723
3724
3725 if (ap->flags & ATA_FLAG_SATA) {
3726 rc = sata_link_resume(link, timing, deadline);
3727
3728 if (rc && rc != -EOPNOTSUPP)
3729 ata_link_warn(link,
3730 "failed to resume link for reset (errno=%d)\n",
3731 rc);
3732 }
3733
3734
3735 if (ata_phys_link_offline(link))
3736 ehc->i.action &= ~ATA_EH_SOFTRESET;
3737
3738 return 0;
3739}
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3766 unsigned long deadline,
3767 bool *online, int (*check_ready)(struct ata_link *))
3768{
3769 u32 scontrol;
3770 int rc;
3771
3772 DPRINTK("ENTER\n");
3773
3774 if (online)
3775 *online = false;
3776
3777 if (sata_set_spd_needed(link)) {
3778
3779
3780
3781
3782
3783 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3784 goto out;
3785
3786 scontrol = (scontrol & 0x0f0) | 0x304;
3787
3788 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3789 goto out;
3790
3791 sata_set_spd(link);
3792 }
3793
3794
3795 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3796 goto out;
3797
3798 scontrol = (scontrol & 0x0f0) | 0x301;
3799
3800 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3801 goto out;
3802
3803
3804
3805
3806 ata_msleep(link->ap, 1);
3807
3808
3809 rc = sata_link_resume(link, timing, deadline);
3810 if (rc)
3811 goto out;
3812
3813 if (ata_phys_link_offline(link))
3814 goto out;
3815
3816
3817 if (online)
3818 *online = true;
3819
3820 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3821
3822
3823
3824
3825
3826 if (check_ready) {
3827 unsigned long pmp_deadline;
3828
3829 pmp_deadline = ata_deadline(jiffies,
3830 ATA_TMOUT_PMP_SRST_WAIT);
3831 if (time_after(pmp_deadline, deadline))
3832 pmp_deadline = deadline;
3833 ata_wait_ready(link, pmp_deadline, check_ready);
3834 }
3835 rc = -EAGAIN;
3836 goto out;
3837 }
3838
3839 rc = 0;
3840 if (check_ready)
3841 rc = ata_wait_ready(link, deadline, check_ready);
3842 out:
3843 if (rc && rc != -EAGAIN) {
3844
3845 if (online)
3846 *online = false;
3847 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3848 }
3849 DPRINTK("EXIT, rc=%d\n", rc);
3850 return rc;
3851}
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3868 unsigned long deadline)
3869{
3870 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3871 bool online;
3872 int rc;
3873
3874
3875 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3876 return online ? -EAGAIN : rc;
3877}
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3892{
3893 u32 serror;
3894
3895 DPRINTK("ENTER\n");
3896
3897
3898 if (!sata_scr_read(link, SCR_ERROR, &serror))
3899 sata_scr_write(link, SCR_ERROR, serror);
3900
3901
3902 sata_print_link_status(link);
3903
3904 DPRINTK("EXIT\n");
3905}
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3924 const u16 *new_id)
3925{
3926 const u16 *old_id = dev->id;
3927 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3928 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3929
3930 if (dev->class != new_class) {
3931 ata_dev_info(dev, "class mismatch %d != %d\n",
3932 dev->class, new_class);
3933 return 0;
3934 }
3935
3936 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3937 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3938 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3939 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3940
3941 if (strcmp(model[0], model[1])) {
3942 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3943 model[0], model[1]);
3944 return 0;
3945 }
3946
3947 if (strcmp(serial[0], serial[1])) {
3948 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3949 serial[0], serial[1]);
3950 return 0;
3951 }
3952
3953 return 1;
3954}
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3971{
3972 unsigned int class = dev->class;
3973 u16 *id = (void *)dev->link->ap->sector_buf;
3974 int rc;
3975
3976
3977 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3978 if (rc)
3979 return rc;
3980
3981
3982 if (!ata_dev_same_device(dev, class, id))
3983 return -ENODEV;
3984
3985 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3986 return 0;
3987}
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4005 unsigned int readid_flags)
4006{
4007 u64 n_sectors = dev->n_sectors;
4008 u64 n_native_sectors = dev->n_native_sectors;
4009 int rc;
4010
4011 if (!ata_dev_enabled(dev))
4012 return -ENODEV;
4013
4014
4015 if (ata_class_enabled(new_class) &&
4016 new_class != ATA_DEV_ATA &&
4017 new_class != ATA_DEV_ATAPI &&
4018 new_class != ATA_DEV_SEMB) {
4019 ata_dev_info(dev, "class mismatch %u != %u\n",
4020 dev->class, new_class);
4021 rc = -ENODEV;
4022 goto fail;
4023 }
4024
4025
4026 rc = ata_dev_reread_id(dev, readid_flags);
4027 if (rc)
4028 goto fail;
4029
4030
4031 rc = ata_dev_configure(dev);
4032 if (rc)
4033 goto fail;
4034
4035
4036 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4037 dev->n_sectors == n_sectors)
4038 return 0;
4039
4040
4041 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4042 (unsigned long long)n_sectors,
4043 (unsigned long long)dev->n_sectors);
4044
4045
4046
4047
4048
4049
4050 if (dev->n_native_sectors == n_native_sectors &&
4051 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4052 ata_dev_warn(dev,
4053 "new n_sectors matches native, probably "
4054 "late HPA unlock, n_sectors updated\n");
4055
4056 return 0;
4057 }
4058
4059
4060
4061
4062
4063
4064
4065 if (dev->n_native_sectors == n_native_sectors &&
4066 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4067 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4068 ata_dev_warn(dev,
4069 "old n_sectors matches native, probably "
4070 "late HPA lock, will try to unlock HPA\n");
4071
4072 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4073 rc = -EIO;
4074 } else
4075 rc = -ENODEV;
4076
4077
4078 dev->n_native_sectors = n_native_sectors;
4079 dev->n_sectors = n_sectors;
4080 fail:
4081 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4082 return rc;
4083}
4084
4085struct ata_blacklist_entry {
4086 const char *model_num;
4087 const char *model_rev;
4088 unsigned long horkage;
4089};
4090
4091static const struct ata_blacklist_entry ata_device_blacklist [] = {
4092
4093 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4094 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4095 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4096 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4097 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4098 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4099 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4100 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4101 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4102 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4103 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4104 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4105 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4106 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4107 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4108 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4109 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4110 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4111 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4112 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4113 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4114 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4115 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4116 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4117 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4118 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4119 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4120 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4121 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4122
4123 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4124
4125
4126 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4127 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4128 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4129
4130
4131
4132
4133
4134 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4135 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4136
4137 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4138
4139 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4140 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4141 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4142 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4143 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4144
4145
4146 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4147 ATA_HORKAGE_FIRMWARE_WARN },
4148
4149 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4150 ATA_HORKAGE_FIRMWARE_WARN },
4151
4152 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4153 ATA_HORKAGE_FIRMWARE_WARN },
4154
4155 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4156 ATA_HORKAGE_FIRMWARE_WARN },
4157
4158
4159
4160 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4161 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4162 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4163
4164
4165 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4166
4167
4168 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4169 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4170 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4171 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4172
4173
4174 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4175
4176
4177 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4178 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4179 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4180
4181
4182 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4183
4184 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4185
4186
4187 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4188 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4189
4190
4191 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4192 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4193
4194
4195
4196
4197
4198 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4199 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4200 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4201 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4202 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4203
4204
4205 { }
4206};
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235static int glob_match (const char *text, const char *pattern)
4236{
4237 do {
4238
4239 if (*text == *pattern || *pattern == '?') {
4240 if (!*pattern++)
4241 return 0;
4242 } else {
4243
4244 if (!*text || *pattern != '[')
4245 break;
4246 while (*++pattern && *pattern != ']' && *text != *pattern) {
4247 if (*pattern == '-' && *(pattern - 1) != '[')
4248 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4249 ++pattern;
4250 break;
4251 }
4252 }
4253 if (!*pattern || *pattern == ']')
4254 return 1;
4255 while (*pattern && *pattern++ != ']');
4256 }
4257 } while (*++text && *pattern);
4258
4259
4260 if (*pattern == '*') {
4261 if (!*++pattern)
4262 return 0;
4263
4264 while (*text) {
4265 if (glob_match(text, pattern) == 0)
4266 return 0;
4267 ++text;
4268 }
4269 }
4270 if (!*text && !*pattern)
4271 return 0;
4272 return 1;
4273}
4274
4275static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4276{
4277 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4278 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4279 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4280
4281 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4282 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4283
4284 while (ad->model_num) {
4285 if (!glob_match(model_num, ad->model_num)) {
4286 if (ad->model_rev == NULL)
4287 return ad->horkage;
4288 if (!glob_match(model_rev, ad->model_rev))
4289 return ad->horkage;
4290 }
4291 ad++;
4292 }
4293 return 0;
4294}
4295
4296static int ata_dma_blacklisted(const struct ata_device *dev)
4297{
4298
4299
4300
4301
4302 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4303 (dev->flags & ATA_DFLAG_CDB_INTR))
4304 return 1;
4305 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4306}
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316static int ata_is_40wire(struct ata_device *dev)
4317{
4318 if (dev->horkage & ATA_HORKAGE_IVB)
4319 return ata_drive_40wire_relaxed(dev->id);
4320 return ata_drive_40wire(dev->id);
4321}
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336static int cable_is_40wire(struct ata_port *ap)
4337{
4338 struct ata_link *link;
4339 struct ata_device *dev;
4340
4341
4342 if (ap->cbl == ATA_CBL_PATA40)
4343 return 1;
4344
4345
4346 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4347 return 0;
4348
4349
4350
4351
4352
4353 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4354 return 0;
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365 ata_for_each_link(link, ap, EDGE) {
4366 ata_for_each_dev(dev, link, ENABLED) {
4367 if (!ata_is_40wire(dev))
4368 return 0;
4369 }
4370 }
4371 return 1;
4372}
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386static void ata_dev_xfermask(struct ata_device *dev)
4387{
4388 struct ata_link *link = dev->link;
4389 struct ata_port *ap = link->ap;
4390 struct ata_host *host = ap->host;
4391 unsigned long xfer_mask;
4392
4393
4394 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4395 ap->mwdma_mask, ap->udma_mask);
4396
4397
4398 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4399 dev->mwdma_mask, dev->udma_mask);
4400 xfer_mask &= ata_id_xfermask(dev->id);
4401
4402
4403
4404
4405
4406 if (ata_dev_pair(dev)) {
4407
4408 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4409
4410 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4411 }
4412
4413 if (ata_dma_blacklisted(dev)) {
4414 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4415 ata_dev_warn(dev,
4416 "device is on DMA blacklist, disabling DMA\n");
4417 }
4418
4419 if ((host->flags & ATA_HOST_SIMPLEX) &&
4420 host->simplex_claimed && host->simplex_claimed != ap) {
4421 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4422 ata_dev_warn(dev,
4423 "simplex DMA is claimed by other device, disabling DMA\n");
4424 }
4425
4426 if (ap->flags & ATA_FLAG_NO_IORDY)
4427 xfer_mask &= ata_pio_mask_no_iordy(dev);
4428
4429 if (ap->ops->mode_filter)
4430 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4441
4442 if (cable_is_40wire(ap)) {
4443 ata_dev_warn(dev,
4444 "limited to UDMA/33 due to 40-wire cable\n");
4445 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4446 }
4447
4448 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4449 &dev->mwdma_mask, &dev->udma_mask);
4450}
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4467{
4468 struct ata_taskfile tf;
4469 unsigned int err_mask;
4470
4471
4472 DPRINTK("set features - xfer mode\n");
4473
4474
4475
4476
4477 ata_tf_init(dev, &tf);
4478 tf.command = ATA_CMD_SET_FEATURES;
4479 tf.feature = SETFEATURES_XFER;
4480 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4481 tf.protocol = ATA_PROT_NODATA;
4482
4483 if (ata_pio_need_iordy(dev))
4484 tf.nsect = dev->xfer_mode;
4485
4486 else if (ata_id_has_iordy(dev->id))
4487 tf.nsect = 0x01;
4488 else
4489 return 0;
4490
4491 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4492
4493 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4494 return err_mask;
4495}
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4513{
4514 struct ata_taskfile tf;
4515 unsigned int err_mask;
4516
4517
4518 DPRINTK("set features - SATA features\n");
4519
4520 ata_tf_init(dev, &tf);
4521 tf.command = ATA_CMD_SET_FEATURES;
4522 tf.feature = enable;
4523 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4524 tf.protocol = ATA_PROT_NODATA;
4525 tf.nsect = feature;
4526
4527 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4528
4529 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4530 return err_mask;
4531}
4532EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546static unsigned int ata_dev_init_params(struct ata_device *dev,
4547 u16 heads, u16 sectors)
4548{
4549 struct ata_taskfile tf;
4550 unsigned int err_mask;
4551
4552
4553 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4554 return AC_ERR_INVALID;
4555
4556
4557 DPRINTK("init dev params \n");
4558
4559 ata_tf_init(dev, &tf);
4560 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4561 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4562 tf.protocol = ATA_PROT_NODATA;
4563 tf.nsect = sectors;
4564 tf.device |= (heads - 1) & 0x0f;
4565
4566 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4567
4568
4569
4570 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4571 err_mask = 0;
4572
4573 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4574 return err_mask;
4575}
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586void ata_sg_clean(struct ata_queued_cmd *qc)
4587{
4588 struct ata_port *ap = qc->ap;
4589 struct scatterlist *sg = qc->sg;
4590 int dir = qc->dma_dir;
4591
4592 WARN_ON_ONCE(sg == NULL);
4593
4594 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4595
4596 if (qc->n_elem)
4597 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4598
4599 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4600 qc->sg = NULL;
4601}
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617int atapi_check_dma(struct ata_queued_cmd *qc)
4618{
4619 struct ata_port *ap = qc->ap;
4620
4621
4622
4623
4624 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4625 unlikely(qc->nbytes & 15))
4626 return 1;
4627
4628 if (ap->ops->check_atapi_dma)
4629 return ap->ops->check_atapi_dma(qc);
4630
4631 return 0;
4632}
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649int ata_std_qc_defer(struct ata_queued_cmd *qc)
4650{
4651 struct ata_link *link = qc->dev->link;
4652
4653 if (qc->tf.protocol == ATA_PROT_NCQ) {
4654 if (!ata_tag_valid(link->active_tag))
4655 return 0;
4656 } else {
4657 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4658 return 0;
4659 }
4660
4661 return ATA_DEFER_LINK;
4662}
4663
4664void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4680 unsigned int n_elem)
4681{
4682 qc->sg = sg;
4683 qc->n_elem = n_elem;
4684 qc->cursg = qc->sg;
4685}
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700static int ata_sg_setup(struct ata_queued_cmd *qc)
4701{
4702 struct ata_port *ap = qc->ap;
4703 unsigned int n_elem;
4704
4705 VPRINTK("ENTER, ata%u\n", ap->print_id);
4706
4707 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4708 if (n_elem < 1)
4709 return -1;
4710
4711 DPRINTK("%d sg elements mapped\n", n_elem);
4712 qc->orig_n_elem = qc->n_elem;
4713 qc->n_elem = n_elem;
4714 qc->flags |= ATA_QCFLAG_DMAMAP;
4715
4716 return 0;
4717}
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731void swap_buf_le16(u16 *buf, unsigned int buf_words)
4732{
4733#ifdef __BIG_ENDIAN
4734 unsigned int i;
4735
4736 for (i = 0; i < buf_words; i++)
4737 buf[i] = le16_to_cpu(buf[i]);
4738#endif
4739}
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4750{
4751 struct ata_queued_cmd *qc = NULL;
4752 unsigned int i;
4753
4754
4755 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4756 return NULL;
4757
4758
4759 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4760 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4761 qc = __ata_qc_from_tag(ap, i);
4762 break;
4763 }
4764
4765 if (qc)
4766 qc->tag = i;
4767
4768 return qc;
4769}
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4780{
4781 struct ata_port *ap = dev->link->ap;
4782 struct ata_queued_cmd *qc;
4783
4784 qc = ata_qc_new(ap);
4785 if (qc) {
4786 qc->scsicmd = NULL;
4787 qc->ap = ap;
4788 qc->dev = dev;
4789
4790 ata_qc_reinit(qc);
4791 }
4792
4793 return qc;
4794}
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806void ata_qc_free(struct ata_queued_cmd *qc)
4807{
4808 struct ata_port *ap;
4809 unsigned int tag;
4810
4811 WARN_ON_ONCE(qc == NULL);
4812 ap = qc->ap;
4813
4814 qc->flags = 0;
4815 tag = qc->tag;
4816 if (likely(ata_tag_valid(tag))) {
4817 qc->tag = ATA_TAG_POISON;
4818 clear_bit(tag, &ap->qc_allocated);
4819 }
4820}
4821
4822void __ata_qc_complete(struct ata_queued_cmd *qc)
4823{
4824 struct ata_port *ap;
4825 struct ata_link *link;
4826
4827 WARN_ON_ONCE(qc == NULL);
4828 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4829 ap = qc->ap;
4830 link = qc->dev->link;
4831
4832 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4833 ata_sg_clean(qc);
4834
4835
4836 if (qc->tf.protocol == ATA_PROT_NCQ) {
4837 link->sactive &= ~(1 << qc->tag);
4838 if (!link->sactive)
4839 ap->nr_active_links--;
4840 } else {
4841 link->active_tag = ATA_TAG_POISON;
4842 ap->nr_active_links--;
4843 }
4844
4845
4846 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4847 ap->excl_link == link))
4848 ap->excl_link = NULL;
4849
4850
4851
4852
4853
4854 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4855 ap->qc_active &= ~(1 << qc->tag);
4856
4857
4858 qc->complete_fn(qc);
4859}
4860
4861static void fill_result_tf(struct ata_queued_cmd *qc)
4862{
4863 struct ata_port *ap = qc->ap;
4864
4865 qc->result_tf.flags = qc->tf.flags;
4866 ap->ops->qc_fill_rtf(qc);
4867}
4868
4869static void ata_verify_xfer(struct ata_queued_cmd *qc)
4870{
4871 struct ata_device *dev = qc->dev;
4872
4873 if (ata_is_nodata(qc->tf.protocol))
4874 return;
4875
4876 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4877 return;
4878
4879 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4880}
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897void ata_qc_complete(struct ata_queued_cmd *qc)
4898{
4899 struct ata_port *ap = qc->ap;
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914 if (ap->ops->error_handler) {
4915 struct ata_device *dev = qc->dev;
4916 struct ata_eh_info *ehi = &dev->link->eh_info;
4917
4918 if (unlikely(qc->err_mask))
4919 qc->flags |= ATA_QCFLAG_FAILED;
4920
4921
4922
4923
4924
4925 if (unlikely(ata_tag_internal(qc->tag))) {
4926 fill_result_tf(qc);
4927 __ata_qc_complete(qc);
4928 return;
4929 }
4930
4931
4932
4933
4934
4935 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4936 fill_result_tf(qc);
4937 ata_qc_schedule_eh(qc);
4938 return;
4939 }
4940
4941 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4942
4943
4944 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4945 fill_result_tf(qc);
4946
4947
4948
4949
4950 switch (qc->tf.command) {
4951 case ATA_CMD_SET_FEATURES:
4952 if (qc->tf.feature != SETFEATURES_WC_ON &&
4953 qc->tf.feature != SETFEATURES_WC_OFF)
4954 break;
4955
4956 case ATA_CMD_INIT_DEV_PARAMS:
4957 case ATA_CMD_SET_MULTI:
4958
4959 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4960 ata_port_schedule_eh(ap);
4961 break;
4962
4963 case ATA_CMD_SLEEP:
4964 dev->flags |= ATA_DFLAG_SLEEPING;
4965 break;
4966 }
4967
4968 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4969 ata_verify_xfer(qc);
4970
4971 __ata_qc_complete(qc);
4972 } else {
4973 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4974 return;
4975
4976
4977 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4978 fill_result_tf(qc);
4979
4980 __ata_qc_complete(qc);
4981 }
4982}
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002
5003
5004int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5005{
5006 int nr_done = 0;
5007 u32 done_mask;
5008
5009 done_mask = ap->qc_active ^ qc_active;
5010
5011 if (unlikely(done_mask & qc_active)) {
5012 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5013 ap->qc_active, qc_active);
5014 return -EINVAL;
5015 }
5016
5017 while (done_mask) {
5018 struct ata_queued_cmd *qc;
5019 unsigned int tag = __ffs(done_mask);
5020
5021 qc = ata_qc_from_tag(ap, tag);
5022 if (qc) {
5023 ata_qc_complete(qc);
5024 nr_done++;
5025 }
5026 done_mask &= ~(1 << tag);
5027 }
5028
5029 return nr_done;
5030}
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044void ata_qc_issue(struct ata_queued_cmd *qc)
5045{
5046 struct ata_port *ap = qc->ap;
5047 struct ata_link *link = qc->dev->link;
5048 u8 prot = qc->tf.protocol;
5049
5050
5051
5052
5053
5054 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5055
5056 if (ata_is_ncq(prot)) {
5057 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5058
5059 if (!link->sactive)
5060 ap->nr_active_links++;
5061 link->sactive |= 1 << qc->tag;
5062 } else {
5063 WARN_ON_ONCE(link->sactive);
5064
5065 ap->nr_active_links++;
5066 link->active_tag = qc->tag;
5067 }
5068
5069 qc->flags |= ATA_QCFLAG_ACTIVE;
5070 ap->qc_active |= 1 << qc->tag;
5071
5072
5073
5074
5075
5076 if (WARN_ON_ONCE(ata_is_data(prot) &&
5077 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5078 goto sys_err;
5079
5080 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5081 (ap->flags & ATA_FLAG_PIO_DMA)))
5082 if (ata_sg_setup(qc))
5083 goto sys_err;
5084
5085
5086 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5087 link->eh_info.action |= ATA_EH_RESET;
5088 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5089 ata_link_abort(link);
5090 return;
5091 }
5092
5093 ap->ops->qc_prep(qc);
5094
5095 qc->err_mask |= ap->ops->qc_issue(qc);
5096 if (unlikely(qc->err_mask))
5097 goto err;
5098 return;
5099
5100sys_err:
5101 qc->err_mask |= AC_ERR_SYSTEM;
5102err:
5103 ata_qc_complete(qc);
5104}
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118int sata_scr_valid(struct ata_link *link)
5119{
5120 struct ata_port *ap = link->ap;
5121
5122 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5123}
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5142{
5143 if (ata_is_host_link(link)) {
5144 if (sata_scr_valid(link))
5145 return link->ap->ops->scr_read(link, reg, val);
5146 return -EOPNOTSUPP;
5147 }
5148
5149 return sata_pmp_scr_read(link, reg, val);
5150}
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168int sata_scr_write(struct ata_link *link, int reg, u32 val)
5169{
5170 if (ata_is_host_link(link)) {
5171 if (sata_scr_valid(link))
5172 return link->ap->ops->scr_write(link, reg, val);
5173 return -EOPNOTSUPP;
5174 }
5175
5176 return sata_pmp_scr_write(link, reg, val);
5177}
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5195{
5196 if (ata_is_host_link(link)) {
5197 int rc;
5198
5199 if (sata_scr_valid(link)) {
5200 rc = link->ap->ops->scr_write(link, reg, val);
5201 if (rc == 0)
5202 rc = link->ap->ops->scr_read(link, reg, &val);
5203 return rc;
5204 }
5205 return -EOPNOTSUPP;
5206 }
5207
5208 return sata_pmp_scr_write(link, reg, val);
5209}
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225bool ata_phys_link_online(struct ata_link *link)
5226{
5227 u32 sstatus;
5228
5229 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5230 ata_sstatus_online(sstatus))
5231 return true;
5232 return false;
5233}
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249bool ata_phys_link_offline(struct ata_link *link)
5250{
5251 u32 sstatus;
5252
5253 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5254 !ata_sstatus_online(sstatus))
5255 return true;
5256 return false;
5257}
5258
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275bool ata_link_online(struct ata_link *link)
5276{
5277 struct ata_link *slave = link->ap->slave_link;
5278
5279 WARN_ON(link == slave);
5280
5281 return ata_phys_link_online(link) ||
5282 (slave && ata_phys_link_online(slave));
5283}
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301bool ata_link_offline(struct ata_link *link)
5302{
5303 struct ata_link *slave = link->ap->slave_link;
5304
5305 WARN_ON(link == slave);
5306
5307 return ata_phys_link_offline(link) &&
5308 (!slave || ata_phys_link_offline(slave));
5309}
5310
5311#ifdef CONFIG_PM
5312static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5313 unsigned int action, unsigned int ehi_flags,
5314 int *async)
5315{
5316 struct ata_link *link;
5317 unsigned long flags;
5318 int rc = 0;
5319
5320
5321
5322
5323 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5324 if (async) {
5325 *async = -EAGAIN;
5326 return 0;
5327 }
5328 ata_port_wait_eh(ap);
5329 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5330 }
5331
5332
5333 spin_lock_irqsave(ap->lock, flags);
5334
5335 ap->pm_mesg = mesg;
5336 if (async)
5337 ap->pm_result = async;
5338 else
5339 ap->pm_result = &rc;
5340
5341 ap->pflags |= ATA_PFLAG_PM_PENDING;
5342 ata_for_each_link(link, ap, HOST_FIRST) {
5343 link->eh_info.action |= action;
5344 link->eh_info.flags |= ehi_flags;
5345 }
5346
5347 ata_port_schedule_eh(ap);
5348
5349 spin_unlock_irqrestore(ap->lock, flags);
5350
5351
5352 if (!async) {
5353 ata_port_wait_eh(ap);
5354 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5355 }
5356
5357 return rc;
5358}
5359
5360static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
5361{
5362
5363
5364
5365
5366
5367
5368
5369
5370 unsigned int ehi_flags = ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
5371 ATA_EHI_NO_RECOVERY;
5372 return ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
5373}
5374
5375static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
5376{
5377 struct ata_port *ap = to_ata_port(dev);
5378
5379 return __ata_port_suspend_common(ap, mesg, NULL);
5380}
5381
5382static int ata_port_suspend(struct device *dev)
5383{
5384 if (pm_runtime_suspended(dev))
5385 return 0;
5386
5387 return ata_port_suspend_common(dev, PMSG_SUSPEND);
5388}
5389
5390static int ata_port_do_freeze(struct device *dev)
5391{
5392 if (pm_runtime_suspended(dev))
5393 return 0;
5394
5395 return ata_port_suspend_common(dev, PMSG_FREEZE);
5396}
5397
5398static int ata_port_poweroff(struct device *dev)
5399{
5400 return ata_port_suspend_common(dev, PMSG_HIBERNATE);
5401}
5402
5403static int __ata_port_resume_common(struct ata_port *ap, pm_message_t mesg,
5404 int *async)
5405{
5406 int rc;
5407
5408 rc = ata_port_request_pm(ap, mesg, ATA_EH_RESET,
5409 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
5410 return rc;
5411}
5412
5413static int ata_port_resume_common(struct device *dev, pm_message_t mesg)
5414{
5415 struct ata_port *ap = to_ata_port(dev);
5416
5417 return __ata_port_resume_common(ap, mesg, NULL);
5418}
5419
5420static int ata_port_resume(struct device *dev)
5421{
5422 int rc;
5423
5424 rc = ata_port_resume_common(dev, PMSG_RESUME);
5425 if (!rc) {
5426 pm_runtime_disable(dev);
5427 pm_runtime_set_active(dev);
5428 pm_runtime_enable(dev);
5429 }
5430
5431 return rc;
5432}
5433
5434
5435
5436
5437
5438
5439
5440
5441
5442static int ata_port_runtime_idle(struct device *dev)
5443{
5444 struct ata_port *ap = to_ata_port(dev);
5445 struct ata_link *link;
5446 struct ata_device *adev;
5447
5448 ata_for_each_link(link, ap, HOST_FIRST) {
5449 ata_for_each_dev(adev, link, ENABLED)
5450 if (adev->class == ATA_DEV_ATAPI &&
5451 !zpodd_dev_enabled(adev))
5452 return -EBUSY;
5453 }
5454
5455 return 0;
5456}
5457
5458static int ata_port_runtime_suspend(struct device *dev)
5459{
5460 return ata_port_suspend_common(dev, PMSG_AUTO_SUSPEND);
5461}
5462
5463static int ata_port_runtime_resume(struct device *dev)
5464{
5465 return ata_port_resume_common(dev, PMSG_AUTO_RESUME);
5466}
5467
5468static const struct dev_pm_ops ata_port_pm_ops = {
5469 .suspend = ata_port_suspend,
5470 .resume = ata_port_resume,
5471 .freeze = ata_port_do_freeze,
5472 .thaw = ata_port_resume,
5473 .poweroff = ata_port_poweroff,
5474 .restore = ata_port_resume,
5475
5476 .runtime_suspend = ata_port_runtime_suspend,
5477 .runtime_resume = ata_port_runtime_resume,
5478 .runtime_idle = ata_port_runtime_idle,
5479};
5480
5481
5482
5483
5484
5485
5486int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
5487{
5488 return __ata_port_suspend_common(ap, PMSG_SUSPEND, async);
5489}
5490EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
5491
5492int ata_sas_port_async_resume(struct ata_port *ap, int *async)
5493{
5494 return __ata_port_resume_common(ap, PMSG_RESUME, async);
5495}
5496EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5507{
5508 host->dev->power.power_state = mesg;
5509 return 0;
5510}
5511
5512
5513
5514
5515
5516
5517
5518void ata_host_resume(struct ata_host *host)
5519{
5520 host->dev->power.power_state = PMSG_ON;
5521}
5522#endif
5523
5524struct device_type ata_port_type = {
5525 .name = "ata_port",
5526#ifdef CONFIG_PM
5527 .pm = &ata_port_pm_ops,
5528#endif
5529};
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540void ata_dev_init(struct ata_device *dev)
5541{
5542 struct ata_link *link = ata_dev_phys_link(dev);
5543 struct ata_port *ap = link->ap;
5544 unsigned long flags;
5545
5546
5547 link->sata_spd_limit = link->hw_sata_spd_limit;
5548 link->sata_spd = 0;
5549
5550
5551
5552
5553
5554 spin_lock_irqsave(ap->lock, flags);
5555 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5556 dev->horkage = 0;
5557 spin_unlock_irqrestore(ap->lock, flags);
5558
5559 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5560 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5561 dev->pio_mask = UINT_MAX;
5562 dev->mwdma_mask = UINT_MAX;
5563 dev->udma_mask = UINT_MAX;
5564}
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575
5576
5577void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5578{
5579 int i;
5580
5581
5582 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5583 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5584
5585 link->ap = ap;
5586 link->pmp = pmp;
5587 link->active_tag = ATA_TAG_POISON;
5588 link->hw_sata_spd_limit = UINT_MAX;
5589
5590
5591 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5592 struct ata_device *dev = &link->device[i];
5593
5594 dev->link = link;
5595 dev->devno = dev - link->device;
5596#ifdef CONFIG_ATA_ACPI
5597 dev->gtf_filter = ata_acpi_gtf_filter;
5598#endif
5599 ata_dev_init(dev);
5600 }
5601}
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616int sata_link_init_spd(struct ata_link *link)
5617{
5618 u8 spd;
5619 int rc;
5620
5621 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5622 if (rc)
5623 return rc;
5624
5625 spd = (link->saved_scontrol >> 4) & 0xf;
5626 if (spd)
5627 link->hw_sata_spd_limit &= (1 << spd) - 1;
5628
5629 ata_force_link_limits(link);
5630
5631 link->sata_spd_limit = link->hw_sata_spd_limit;
5632
5633 return 0;
5634}
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644
5645
5646
5647
5648struct ata_port *ata_port_alloc(struct ata_host *host)
5649{
5650 struct ata_port *ap;
5651
5652 DPRINTK("ENTER\n");
5653
5654 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5655 if (!ap)
5656 return NULL;
5657
5658 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5659 ap->lock = &host->lock;
5660 ap->print_id = -1;
5661 ap->local_port_no = -1;
5662 ap->host = host;
5663 ap->dev = host->dev;
5664
5665#if defined(ATA_VERBOSE_DEBUG)
5666
5667 ap->msg_enable = 0x00FF;
5668#elif defined(ATA_DEBUG)
5669 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5670#else
5671 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5672#endif
5673
5674 mutex_init(&ap->scsi_scan_mutex);
5675 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5676 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5677 INIT_LIST_HEAD(&ap->eh_done_q);
5678 init_waitqueue_head(&ap->eh_wait_q);
5679 init_completion(&ap->park_req_pending);
5680 init_timer_deferrable(&ap->fastdrain_timer);
5681 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5682 ap->fastdrain_timer.data = (unsigned long)ap;
5683
5684 ap->cbl = ATA_CBL_NONE;
5685
5686 ata_link_init(ap, &ap->link, 0);
5687
5688#ifdef ATA_IRQ_TRAP
5689 ap->stats.unhandled_irq = 1;
5690 ap->stats.idle_irq = 1;
5691#endif
5692 ata_sff_port_init(ap);
5693
5694 return ap;
5695}
5696
5697static void ata_host_release(struct device *gendev, void *res)
5698{
5699 struct ata_host *host = dev_get_drvdata(gendev);
5700 int i;
5701
5702 for (i = 0; i < host->n_ports; i++) {
5703 struct ata_port *ap = host->ports[i];
5704
5705 if (!ap)
5706 continue;
5707
5708 if (ap->scsi_host)
5709 scsi_host_put(ap->scsi_host);
5710
5711 kfree(ap->pmp_link);
5712 kfree(ap->slave_link);
5713 kfree(ap);
5714 host->ports[i] = NULL;
5715 }
5716
5717 dev_set_drvdata(gendev, NULL);
5718}
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5741{
5742 struct ata_host *host;
5743 size_t sz;
5744 int i;
5745
5746 DPRINTK("ENTER\n");
5747
5748 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5749 return NULL;
5750
5751
5752 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5753
5754 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5755 if (!host)
5756 goto err_out;
5757
5758 devres_add(dev, host);
5759 dev_set_drvdata(dev, host);
5760
5761 spin_lock_init(&host->lock);
5762 mutex_init(&host->eh_mutex);
5763 host->dev = dev;
5764 host->n_ports = max_ports;
5765
5766
5767 for (i = 0; i < max_ports; i++) {
5768 struct ata_port *ap;
5769
5770 ap = ata_port_alloc(host);
5771 if (!ap)
5772 goto err_out;
5773
5774 ap->port_no = i;
5775 host->ports[i] = ap;
5776 }
5777
5778 devres_remove_group(dev, NULL);
5779 return host;
5780
5781 err_out:
5782 devres_release_group(dev, NULL);
5783 return NULL;
5784}
5785
5786
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5803 const struct ata_port_info * const * ppi,
5804 int n_ports)
5805{
5806 const struct ata_port_info *pi;
5807 struct ata_host *host;
5808 int i, j;
5809
5810 host = ata_host_alloc(dev, n_ports);
5811 if (!host)
5812 return NULL;
5813
5814 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5815 struct ata_port *ap = host->ports[i];
5816
5817 if (ppi[j])
5818 pi = ppi[j++];
5819
5820 ap->pio_mask = pi->pio_mask;
5821 ap->mwdma_mask = pi->mwdma_mask;
5822 ap->udma_mask = pi->udma_mask;
5823 ap->flags |= pi->flags;
5824 ap->link.flags |= pi->link_flags;
5825 ap->ops = pi->port_ops;
5826
5827 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5828 host->ops = pi->port_ops;
5829 }
5830
5831 return host;
5832}
5833
5834
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878
5879
5880int ata_slave_link_init(struct ata_port *ap)
5881{
5882 struct ata_link *link;
5883
5884 WARN_ON(ap->slave_link);
5885 WARN_ON(ap->flags & ATA_FLAG_PMP);
5886
5887 link = kzalloc(sizeof(*link), GFP_KERNEL);
5888 if (!link)
5889 return -ENOMEM;
5890
5891 ata_link_init(ap, link, 1);
5892 ap->slave_link = link;
5893 return 0;
5894}
5895
5896static void ata_host_stop(struct device *gendev, void *res)
5897{
5898 struct ata_host *host = dev_get_drvdata(gendev);
5899 int i;
5900
5901 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5902
5903 for (i = 0; i < host->n_ports; i++) {
5904 struct ata_port *ap = host->ports[i];
5905
5906 if (ap->ops->port_stop)
5907 ap->ops->port_stop(ap);
5908 }
5909
5910 if (host->ops->host_stop)
5911 host->ops->host_stop(host);
5912}
5913
5914
5915
5916
5917
5918
5919
5920
5921
5922
5923
5924
5925
5926
5927
5928
5929
5930
5931
5932
5933
5934static void ata_finalize_port_ops(struct ata_port_operations *ops)
5935{
5936 static DEFINE_SPINLOCK(lock);
5937 const struct ata_port_operations *cur;
5938 void **begin = (void **)ops;
5939 void **end = (void **)&ops->inherits;
5940 void **pp;
5941
5942 if (!ops || !ops->inherits)
5943 return;
5944
5945 spin_lock(&lock);
5946
5947 for (cur = ops->inherits; cur; cur = cur->inherits) {
5948 void **inherit = (void **)cur;
5949
5950 for (pp = begin; pp < end; pp++, inherit++)
5951 if (!*pp)
5952 *pp = *inherit;
5953 }
5954
5955 for (pp = begin; pp < end; pp++)
5956 if (IS_ERR(*pp))
5957 *pp = NULL;
5958
5959 ops->inherits = NULL;
5960
5961 spin_unlock(&lock);
5962}
5963
5964
5965
5966
5967
5968
5969
5970
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980int ata_host_start(struct ata_host *host)
5981{
5982 int have_stop = 0;
5983 void *start_dr = NULL;
5984 int i, rc;
5985
5986 if (host->flags & ATA_HOST_STARTED)
5987 return 0;
5988
5989 ata_finalize_port_ops(host->ops);
5990
5991 for (i = 0; i < host->n_ports; i++) {
5992 struct ata_port *ap = host->ports[i];
5993
5994 ata_finalize_port_ops(ap->ops);
5995
5996 if (!host->ops && !ata_port_is_dummy(ap))
5997 host->ops = ap->ops;
5998
5999 if (ap->ops->port_stop)
6000 have_stop = 1;
6001 }
6002
6003 if (host->ops->host_stop)
6004 have_stop = 1;
6005
6006 if (have_stop) {
6007 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6008 if (!start_dr)
6009 return -ENOMEM;
6010 }
6011
6012 for (i = 0; i < host->n_ports; i++) {
6013 struct ata_port *ap = host->ports[i];
6014
6015 if (ap->ops->port_start) {
6016 rc = ap->ops->port_start(ap);
6017 if (rc) {
6018 if (rc != -ENODEV)
6019 dev_err(host->dev,
6020 "failed to start port %d (errno=%d)\n",
6021 i, rc);
6022 goto err_out;
6023 }
6024 }
6025 ata_eh_freeze_port(ap);
6026 }
6027
6028 if (start_dr)
6029 devres_add(host->dev, start_dr);
6030 host->flags |= ATA_HOST_STARTED;
6031 return 0;
6032
6033 err_out:
6034 while (--i >= 0) {
6035 struct ata_port *ap = host->ports[i];
6036
6037 if (ap->ops->port_stop)
6038 ap->ops->port_stop(ap);
6039 }
6040 devres_free(start_dr);
6041 return rc;
6042}
6043
6044
6045
6046
6047
6048
6049
6050
6051void ata_host_init(struct ata_host *host, struct device *dev,
6052 struct ata_port_operations *ops)
6053{
6054 spin_lock_init(&host->lock);
6055 mutex_init(&host->eh_mutex);
6056 host->dev = dev;
6057 host->ops = ops;
6058}
6059
6060void __ata_port_probe(struct ata_port *ap)
6061{
6062 struct ata_eh_info *ehi = &ap->link.eh_info;
6063 unsigned long flags;
6064
6065
6066 spin_lock_irqsave(ap->lock, flags);
6067
6068 ehi->probe_mask |= ATA_ALL_DEVICES;
6069 ehi->action |= ATA_EH_RESET;
6070 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6071
6072 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6073 ap->pflags |= ATA_PFLAG_LOADING;
6074 ata_port_schedule_eh(ap);
6075
6076 spin_unlock_irqrestore(ap->lock, flags);
6077}
6078
6079int ata_port_probe(struct ata_port *ap)
6080{
6081 int rc = 0;
6082
6083 if (ap->ops->error_handler) {
6084 __ata_port_probe(ap);
6085 ata_port_wait_eh(ap);
6086 } else {
6087 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6088 rc = ata_bus_probe(ap);
6089 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6090 }
6091 return rc;
6092}
6093
6094
6095static void async_port_probe(void *data, async_cookie_t cookie)
6096{
6097 struct ata_port *ap = data;
6098
6099
6100
6101
6102
6103
6104
6105
6106 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6107 async_synchronize_cookie(cookie);
6108
6109 (void)ata_port_probe(ap);
6110
6111
6112 async_synchronize_cookie(cookie);
6113
6114 ata_scsi_scan_host(ap, 1);
6115}
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128
6129
6130
6131
6132
6133int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6134{
6135 int i, rc;
6136
6137
6138 if (!(host->flags & ATA_HOST_STARTED)) {
6139 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6140 WARN_ON(1);
6141 return -EINVAL;
6142 }
6143
6144
6145
6146
6147
6148 for (i = host->n_ports; host->ports[i]; i++)
6149 kfree(host->ports[i]);
6150
6151
6152 for (i = 0; i < host->n_ports; i++) {
6153 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6154 host->ports[i]->local_port_no = i + 1;
6155 }
6156
6157
6158 for (i = 0; i < host->n_ports; i++) {
6159 rc = ata_tport_add(host->dev,host->ports[i]);
6160 if (rc) {
6161 goto err_tadd;
6162 }
6163 }
6164
6165 rc = ata_scsi_add_hosts(host, sht);
6166 if (rc)
6167 goto err_tadd;
6168
6169
6170 for (i = 0; i < host->n_ports; i++) {
6171 struct ata_port *ap = host->ports[i];
6172 unsigned long xfer_mask;
6173
6174
6175 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6176 ap->cbl = ATA_CBL_SATA;
6177
6178
6179 sata_link_init_spd(&ap->link);
6180 if (ap->slave_link)
6181 sata_link_init_spd(ap->slave_link);
6182
6183
6184 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6185 ap->udma_mask);
6186
6187 if (!ata_port_is_dummy(ap)) {
6188 ata_port_info(ap, "%cATA max %s %s\n",
6189 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6190 ata_mode_string(xfer_mask),
6191 ap->link.eh_info.desc);
6192 ata_ehi_clear_desc(&ap->link.eh_info);
6193 } else
6194 ata_port_info(ap, "DUMMY\n");
6195 }
6196
6197
6198 for (i = 0; i < host->n_ports; i++) {
6199 struct ata_port *ap = host->ports[i];
6200 async_schedule(async_port_probe, ap);
6201 }
6202
6203 return 0;
6204
6205 err_tadd:
6206 while (--i >= 0) {
6207 ata_tport_delete(host->ports[i]);
6208 }
6209 return rc;
6210
6211}
6212
6213
6214
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236int ata_host_activate(struct ata_host *host, int irq,
6237 irq_handler_t irq_handler, unsigned long irq_flags,
6238 struct scsi_host_template *sht)
6239{
6240 int i, rc;
6241
6242 rc = ata_host_start(host);
6243 if (rc)
6244 return rc;
6245
6246
6247 if (!irq) {
6248 WARN_ON(irq_handler);
6249 return ata_host_register(host, sht);
6250 }
6251
6252 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6253 dev_driver_string(host->dev), host);
6254 if (rc)
6255 return rc;
6256
6257 for (i = 0; i < host->n_ports; i++)
6258 ata_port_desc(host->ports[i], "irq %d", irq);
6259
6260 rc = ata_host_register(host, sht);
6261
6262 if (rc)
6263 devm_free_irq(host->dev, irq, host);
6264
6265 return rc;
6266}
6267
6268
6269
6270
6271
6272
6273
6274
6275
6276
6277
6278
6279static void ata_port_detach(struct ata_port *ap)
6280{
6281 unsigned long flags;
6282
6283 if (!ap->ops->error_handler)
6284 goto skip_eh;
6285
6286
6287 spin_lock_irqsave(ap->lock, flags);
6288 ap->pflags |= ATA_PFLAG_UNLOADING;
6289 ata_port_schedule_eh(ap);
6290 spin_unlock_irqrestore(ap->lock, flags);
6291
6292
6293 ata_port_wait_eh(ap);
6294
6295
6296 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6297
6298 cancel_delayed_work_sync(&ap->hotplug_task);
6299
6300 skip_eh:
6301 if (ap->pmp_link) {
6302 int i;
6303 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6304 ata_tlink_delete(&ap->pmp_link[i]);
6305 }
6306 ata_tport_delete(ap);
6307
6308
6309 scsi_remove_host(ap->scsi_host);
6310}
6311
6312
6313
6314
6315
6316
6317
6318
6319
6320
6321void ata_host_detach(struct ata_host *host)
6322{
6323 int i;
6324
6325 for (i = 0; i < host->n_ports; i++)
6326 ata_port_detach(host->ports[i]);
6327
6328
6329 ata_acpi_dissociate(host);
6330}
6331
6332#ifdef CONFIG_PCI
6333
6334
6335
6336
6337
6338
6339
6340
6341
6342
6343
6344
6345void ata_pci_remove_one(struct pci_dev *pdev)
6346{
6347 struct ata_host *host = pci_get_drvdata(pdev);
6348
6349 ata_host_detach(host);
6350}
6351
6352
6353int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6354{
6355 unsigned long tmp = 0;
6356
6357 switch (bits->width) {
6358 case 1: {
6359 u8 tmp8 = 0;
6360 pci_read_config_byte(pdev, bits->reg, &tmp8);
6361 tmp = tmp8;
6362 break;
6363 }
6364 case 2: {
6365 u16 tmp16 = 0;
6366 pci_read_config_word(pdev, bits->reg, &tmp16);
6367 tmp = tmp16;
6368 break;
6369 }
6370 case 4: {
6371 u32 tmp32 = 0;
6372 pci_read_config_dword(pdev, bits->reg, &tmp32);
6373 tmp = tmp32;
6374 break;
6375 }
6376
6377 default:
6378 return -EINVAL;
6379 }
6380
6381 tmp &= bits->mask;
6382
6383 return (tmp == bits->val) ? 1 : 0;
6384}
6385
6386#ifdef CONFIG_PM
6387void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6388{
6389 pci_save_state(pdev);
6390 pci_disable_device(pdev);
6391
6392 if (mesg.event & PM_EVENT_SLEEP)
6393 pci_set_power_state(pdev, PCI_D3hot);
6394}
6395
6396int ata_pci_device_do_resume(struct pci_dev *pdev)
6397{
6398 int rc;
6399
6400 pci_set_power_state(pdev, PCI_D0);
6401 pci_restore_state(pdev);
6402
6403 rc = pcim_enable_device(pdev);
6404 if (rc) {
6405 dev_err(&pdev->dev,
6406 "failed to enable device after resume (%d)\n", rc);
6407 return rc;
6408 }
6409
6410 pci_set_master(pdev);
6411 return 0;
6412}
6413
6414int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6415{
6416 struct ata_host *host = pci_get_drvdata(pdev);
6417 int rc = 0;
6418
6419 rc = ata_host_suspend(host, mesg);
6420 if (rc)
6421 return rc;
6422
6423 ata_pci_device_do_suspend(pdev, mesg);
6424
6425 return 0;
6426}
6427
6428int ata_pci_device_resume(struct pci_dev *pdev)
6429{
6430 struct ata_host *host = pci_get_drvdata(pdev);
6431 int rc;
6432
6433 rc = ata_pci_device_do_resume(pdev);
6434 if (rc == 0)
6435 ata_host_resume(host);
6436 return rc;
6437}
6438#endif
6439
6440#endif
6441
6442
6443
6444
6445
6446
6447
6448
6449
6450
6451
6452
6453int ata_platform_remove_one(struct platform_device *pdev)
6454{
6455 struct ata_host *host = platform_get_drvdata(pdev);
6456
6457 ata_host_detach(host);
6458
6459 return 0;
6460}
6461
6462static int __init ata_parse_force_one(char **cur,
6463 struct ata_force_ent *force_ent,
6464 const char **reason)
6465{
6466
6467
6468
6469
6470
6471 static struct ata_force_param force_tbl[] __initdata = {
6472 { "40c", .cbl = ATA_CBL_PATA40 },
6473 { "80c", .cbl = ATA_CBL_PATA80 },
6474 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6475 { "unk", .cbl = ATA_CBL_PATA_UNK },
6476 { "ign", .cbl = ATA_CBL_PATA_IGN },
6477 { "sata", .cbl = ATA_CBL_SATA },
6478 { "1.5Gbps", .spd_limit = 1 },
6479 { "3.0Gbps", .spd_limit = 2 },
6480 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6481 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6482 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6483 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6484 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6485 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6486 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6487 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6488 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6489 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6490 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6491 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6492 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6493 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6494 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6495 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6496 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6497 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6498 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6499 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6500 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6501 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6502 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6503 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6504 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6505 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6506 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6507 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6508 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6509 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6510 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6511 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6512 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6513 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6514 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6515 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6516 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6517 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6518 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6519 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6520 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6521 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6522 };
6523 char *start = *cur, *p = *cur;
6524 char *id, *val, *endp;
6525 const struct ata_force_param *match_fp = NULL;
6526 int nr_matches = 0, i;
6527
6528
6529 while (*p != '\0' && *p != ',')
6530 p++;
6531
6532 if (*p == '\0')
6533 *cur = p;
6534 else
6535 *cur = p + 1;
6536
6537 *p = '\0';
6538
6539
6540 p = strchr(start, ':');
6541 if (!p) {
6542 val = strstrip(start);
6543 goto parse_val;
6544 }
6545 *p = '\0';
6546
6547 id = strstrip(start);
6548 val = strstrip(p + 1);
6549
6550
6551 p = strchr(id, '.');
6552 if (p) {
6553 *p++ = '\0';
6554 force_ent->device = simple_strtoul(p, &endp, 10);
6555 if (p == endp || *endp != '\0') {
6556 *reason = "invalid device";
6557 return -EINVAL;
6558 }
6559 }
6560
6561 force_ent->port = simple_strtoul(id, &endp, 10);
6562 if (p == endp || *endp != '\0') {
6563 *reason = "invalid port/link";
6564 return -EINVAL;
6565 }
6566
6567 parse_val:
6568
6569 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6570 const struct ata_force_param *fp = &force_tbl[i];
6571
6572 if (strncasecmp(val, fp->name, strlen(val)))
6573 continue;
6574
6575 nr_matches++;
6576 match_fp = fp;
6577
6578 if (strcasecmp(val, fp->name) == 0) {
6579 nr_matches = 1;
6580 break;
6581 }
6582 }
6583
6584 if (!nr_matches) {
6585 *reason = "unknown value";
6586 return -EINVAL;
6587 }
6588 if (nr_matches > 1) {
6589 *reason = "ambigious value";
6590 return -EINVAL;
6591 }
6592
6593 force_ent->param = *match_fp;
6594
6595 return 0;
6596}
6597
6598static void __init ata_parse_force_param(void)
6599{
6600 int idx = 0, size = 1;
6601 int last_port = -1, last_device = -1;
6602 char *p, *cur, *next;
6603
6604
6605 for (p = ata_force_param_buf; *p; p++)
6606 if (*p == ',')
6607 size++;
6608
6609 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6610 if (!ata_force_tbl) {
6611 printk(KERN_WARNING "ata: failed to extend force table, "
6612 "libata.force ignored\n");
6613 return;
6614 }
6615
6616
6617 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6618 const char *reason = "";
6619 struct ata_force_ent te = { .port = -1, .device = -1 };
6620
6621 next = cur;
6622 if (ata_parse_force_one(&next, &te, &reason)) {
6623 printk(KERN_WARNING "ata: failed to parse force "
6624 "parameter \"%s\" (%s)\n",
6625 cur, reason);
6626 continue;
6627 }
6628
6629 if (te.port == -1) {
6630 te.port = last_port;
6631 te.device = last_device;
6632 }
6633
6634 ata_force_tbl[idx++] = te;
6635
6636 last_port = te.port;
6637 last_device = te.device;
6638 }
6639
6640 ata_force_tbl_size = idx;
6641}
6642
6643static int __init ata_init(void)
6644{
6645 int rc;
6646
6647 ata_parse_force_param();
6648
6649 rc = ata_sff_init();
6650 if (rc) {
6651 kfree(ata_force_tbl);
6652 return rc;
6653 }
6654
6655 libata_transport_init();
6656 ata_scsi_transport_template = ata_attach_transport();
6657 if (!ata_scsi_transport_template) {
6658 ata_sff_exit();
6659 rc = -ENOMEM;
6660 goto err_out;
6661 }
6662
6663 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6664 return 0;
6665
6666err_out:
6667 return rc;
6668}
6669
6670static void __exit ata_exit(void)
6671{
6672 ata_release_transport(ata_scsi_transport_template);
6673 libata_transport_exit();
6674 ata_sff_exit();
6675 kfree(ata_force_tbl);
6676}
6677
6678subsys_initcall(ata_init);
6679module_exit(ata_exit);
6680
6681static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6682
6683int ata_ratelimit(void)
6684{
6685 return __ratelimit(&ratelimit);
6686}
6687
6688
6689
6690
6691
6692
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702void ata_msleep(struct ata_port *ap, unsigned int msecs)
6703{
6704 bool owns_eh = ap && ap->host->eh_owner == current;
6705
6706 if (owns_eh)
6707 ata_eh_release(ap);
6708
6709 msleep(msecs);
6710
6711 if (owns_eh)
6712 ata_eh_acquire(ap);
6713}
6714
6715
6716
6717
6718
6719
6720
6721
6722
6723
6724
6725
6726
6727
6728
6729
6730
6731
6732
6733
6734
6735
6736
6737
6738
6739u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6740 unsigned long interval, unsigned long timeout)
6741{
6742 unsigned long deadline;
6743 u32 tmp;
6744
6745 tmp = ioread32(reg);
6746
6747
6748
6749
6750
6751 deadline = ata_deadline(jiffies, timeout);
6752
6753 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6754 ata_msleep(ap, interval);
6755 tmp = ioread32(reg);
6756 }
6757
6758 return tmp;
6759}
6760
6761
6762
6763
6764static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6765{
6766 return AC_ERR_SYSTEM;
6767}
6768
6769static void ata_dummy_error_handler(struct ata_port *ap)
6770{
6771
6772}
6773
6774struct ata_port_operations ata_dummy_port_ops = {
6775 .qc_prep = ata_noop_qc_prep,
6776 .qc_issue = ata_dummy_qc_issue,
6777 .error_handler = ata_dummy_error_handler,
6778 .sched_eh = ata_std_sched_eh,
6779 .end_eh = ata_std_end_eh,
6780};
6781
6782const struct ata_port_info ata_dummy_port_info = {
6783 .port_ops = &ata_dummy_port_ops,
6784};
6785
6786
6787
6788
6789int ata_port_printk(const struct ata_port *ap, const char *level,
6790 const char *fmt, ...)
6791{
6792 struct va_format vaf;
6793 va_list args;
6794 int r;
6795
6796 va_start(args, fmt);
6797
6798 vaf.fmt = fmt;
6799 vaf.va = &args;
6800
6801 r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
6802
6803 va_end(args);
6804
6805 return r;
6806}
6807EXPORT_SYMBOL(ata_port_printk);
6808
6809int ata_link_printk(const struct ata_link *link, const char *level,
6810 const char *fmt, ...)
6811{
6812 struct va_format vaf;
6813 va_list args;
6814 int r;
6815
6816 va_start(args, fmt);
6817
6818 vaf.fmt = fmt;
6819 vaf.va = &args;
6820
6821 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6822 r = printk("%sata%u.%02u: %pV",
6823 level, link->ap->print_id, link->pmp, &vaf);
6824 else
6825 r = printk("%sata%u: %pV",
6826 level, link->ap->print_id, &vaf);
6827
6828 va_end(args);
6829
6830 return r;
6831}
6832EXPORT_SYMBOL(ata_link_printk);
6833
6834int ata_dev_printk(const struct ata_device *dev, const char *level,
6835 const char *fmt, ...)
6836{
6837 struct va_format vaf;
6838 va_list args;
6839 int r;
6840
6841 va_start(args, fmt);
6842
6843 vaf.fmt = fmt;
6844 vaf.va = &args;
6845
6846 r = printk("%sata%u.%02u: %pV",
6847 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6848 &vaf);
6849
6850 va_end(args);
6851
6852 return r;
6853}
6854EXPORT_SYMBOL(ata_dev_printk);
6855
6856void ata_print_version(const struct device *dev, const char *version)
6857{
6858 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6859}
6860EXPORT_SYMBOL(ata_print_version);
6861
6862
6863
6864
6865
6866
6867
6868EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6869EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6870EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6871EXPORT_SYMBOL_GPL(ata_base_port_ops);
6872EXPORT_SYMBOL_GPL(sata_port_ops);
6873EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6874EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6875EXPORT_SYMBOL_GPL(ata_link_next);
6876EXPORT_SYMBOL_GPL(ata_dev_next);
6877EXPORT_SYMBOL_GPL(ata_std_bios_param);
6878EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6879EXPORT_SYMBOL_GPL(ata_host_init);
6880EXPORT_SYMBOL_GPL(ata_host_alloc);
6881EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6882EXPORT_SYMBOL_GPL(ata_slave_link_init);
6883EXPORT_SYMBOL_GPL(ata_host_start);
6884EXPORT_SYMBOL_GPL(ata_host_register);
6885EXPORT_SYMBOL_GPL(ata_host_activate);
6886EXPORT_SYMBOL_GPL(ata_host_detach);
6887EXPORT_SYMBOL_GPL(ata_sg_init);
6888EXPORT_SYMBOL_GPL(ata_qc_complete);
6889EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6890EXPORT_SYMBOL_GPL(atapi_cmd_type);
6891EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6892EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6893EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6894EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6895EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6896EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6897EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6898EXPORT_SYMBOL_GPL(ata_mode_string);
6899EXPORT_SYMBOL_GPL(ata_id_xfermask);
6900EXPORT_SYMBOL_GPL(ata_do_set_mode);
6901EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6902EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6903EXPORT_SYMBOL_GPL(ata_dev_disable);
6904EXPORT_SYMBOL_GPL(sata_set_spd);
6905EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6906EXPORT_SYMBOL_GPL(sata_link_debounce);
6907EXPORT_SYMBOL_GPL(sata_link_resume);
6908EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6909EXPORT_SYMBOL_GPL(ata_std_prereset);
6910EXPORT_SYMBOL_GPL(sata_link_hardreset);
6911EXPORT_SYMBOL_GPL(sata_std_hardreset);
6912EXPORT_SYMBOL_GPL(ata_std_postreset);
6913EXPORT_SYMBOL_GPL(ata_dev_classify);
6914EXPORT_SYMBOL_GPL(ata_dev_pair);
6915EXPORT_SYMBOL_GPL(ata_ratelimit);
6916EXPORT_SYMBOL_GPL(ata_msleep);
6917EXPORT_SYMBOL_GPL(ata_wait_register);
6918EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6919EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6920EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6921EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6922EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
6923EXPORT_SYMBOL_GPL(sata_scr_valid);
6924EXPORT_SYMBOL_GPL(sata_scr_read);
6925EXPORT_SYMBOL_GPL(sata_scr_write);
6926EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6927EXPORT_SYMBOL_GPL(ata_link_online);
6928EXPORT_SYMBOL_GPL(ata_link_offline);
6929#ifdef CONFIG_PM
6930EXPORT_SYMBOL_GPL(ata_host_suspend);
6931EXPORT_SYMBOL_GPL(ata_host_resume);
6932#endif
6933EXPORT_SYMBOL_GPL(ata_id_string);
6934EXPORT_SYMBOL_GPL(ata_id_c_string);
6935EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6936EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6937
6938EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6939EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6940EXPORT_SYMBOL_GPL(ata_timing_compute);
6941EXPORT_SYMBOL_GPL(ata_timing_merge);
6942EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6943
6944#ifdef CONFIG_PCI
6945EXPORT_SYMBOL_GPL(pci_test_config_bits);
6946EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6947#ifdef CONFIG_PM
6948EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6949EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6950EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6951EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6952#endif
6953#endif
6954
6955EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6956
6957EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6958EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6959EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6960EXPORT_SYMBOL_GPL(ata_port_desc);
6961#ifdef CONFIG_PCI
6962EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6963#endif
6964EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6965EXPORT_SYMBOL_GPL(ata_link_abort);
6966EXPORT_SYMBOL_GPL(ata_port_abort);
6967EXPORT_SYMBOL_GPL(ata_port_freeze);
6968EXPORT_SYMBOL_GPL(sata_async_notification);
6969EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6970EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6971EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6972EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6973EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6974EXPORT_SYMBOL_GPL(ata_do_eh);
6975EXPORT_SYMBOL_GPL(ata_std_error_handler);
6976
6977EXPORT_SYMBOL_GPL(ata_cable_40wire);
6978EXPORT_SYMBOL_GPL(ata_cable_80wire);
6979EXPORT_SYMBOL_GPL(ata_cable_unknown);
6980EXPORT_SYMBOL_GPL(ata_cable_ignore);
6981EXPORT_SYMBOL_GPL(ata_cable_sata);
6982