1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
57#include <linux/scatterlist.h>
58#include <linux/io.h>
59#include <linux/async.h>
60#include <linux/log2.h>
61#include <linux/slab.h>
62#include <scsi/scsi.h>
63#include <scsi/scsi_cmnd.h>
64#include <scsi/scsi_host.h>
65#include <linux/libata.h>
66#include <asm/byteorder.h>
67#include <linux/cdrom.h>
68#include <linux/ratelimit.h>
69#include <linux/pm_runtime.h>
70#include <linux/platform_device.h>
71
72#include "libata.h"
73#include "libata-transport.h"
74
75
76const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
77const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
78const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
79
80const struct ata_port_operations ata_base_port_ops = {
81 .prereset = ata_std_prereset,
82 .postreset = ata_std_postreset,
83 .error_handler = ata_std_error_handler,
84 .sched_eh = ata_std_sched_eh,
85 .end_eh = ata_std_end_eh,
86};
87
88const struct ata_port_operations sata_port_ops = {
89 .inherits = &ata_base_port_ops,
90
91 .qc_defer = ata_std_qc_defer,
92 .hardreset = sata_std_hardreset,
93};
94
95static unsigned int ata_dev_init_params(struct ata_device *dev,
96 u16 heads, u16 sectors);
97static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
98static void ata_dev_xfermask(struct ata_device *dev);
99static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
100
101atomic_t ata_print_id = ATOMIC_INIT(0);
102
103struct ata_force_param {
104 const char *name;
105 unsigned int cbl;
106 int spd_limit;
107 unsigned long xfer_mask;
108 unsigned int horkage_on;
109 unsigned int horkage_off;
110 unsigned int lflags;
111};
112
113struct ata_force_ent {
114 int port;
115 int device;
116 struct ata_force_param param;
117};
118
119static struct ata_force_ent *ata_force_tbl;
120static int ata_force_tbl_size;
121
122static char ata_force_param_buf[PAGE_SIZE] __initdata;
123
124module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
125MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
126
127static int atapi_enabled = 1;
128module_param(atapi_enabled, int, 0444);
129MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
130
131static int atapi_dmadir = 0;
132module_param(atapi_dmadir, int, 0444);
133MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
134
135int atapi_passthru16 = 1;
136module_param(atapi_passthru16, int, 0444);
137MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
138
139int libata_fua = 0;
140module_param_named(fua, libata_fua, int, 0444);
141MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
142
143static int ata_ignore_hpa;
144module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
145MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
146
147static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
148module_param_named(dma, libata_dma_mask, int, 0444);
149MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
150
151static int ata_probe_timeout;
152module_param(ata_probe_timeout, int, 0444);
153MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
154
155int libata_noacpi = 0;
156module_param_named(noacpi, libata_noacpi, int, 0444);
157MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
158
159int libata_allow_tpm = 0;
160module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
161MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
162
163static int atapi_an;
164module_param(atapi_an, int, 0444);
165MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
166
167MODULE_AUTHOR("Jeff Garzik");
168MODULE_DESCRIPTION("Library module for ATA devices");
169MODULE_LICENSE("GPL");
170MODULE_VERSION(DRV_VERSION);
171
172
173static bool ata_sstatus_online(u32 sstatus)
174{
175 return (sstatus & 0xf) == 0x3;
176}
177
178
179
180
181
182
183
184
185
186
187
188
189
190struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
191 enum ata_link_iter_mode mode)
192{
193 BUG_ON(mode != ATA_LITER_EDGE &&
194 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
195
196
197 if (!link)
198 switch (mode) {
199 case ATA_LITER_EDGE:
200 case ATA_LITER_PMP_FIRST:
201 if (sata_pmp_attached(ap))
202 return ap->pmp_link;
203
204 case ATA_LITER_HOST_FIRST:
205 return &ap->link;
206 }
207
208
209 if (link == &ap->link)
210 switch (mode) {
211 case ATA_LITER_HOST_FIRST:
212 if (sata_pmp_attached(ap))
213 return ap->pmp_link;
214
215 case ATA_LITER_PMP_FIRST:
216 if (unlikely(ap->slave_link))
217 return ap->slave_link;
218
219 case ATA_LITER_EDGE:
220 return NULL;
221 }
222
223
224 if (unlikely(link == ap->slave_link))
225 return NULL;
226
227
228 if (++link < ap->pmp_link + ap->nr_pmp_links)
229 return link;
230
231 if (mode == ATA_LITER_PMP_FIRST)
232 return &ap->link;
233
234 return NULL;
235}
236
237
238
239
240
241
242
243
244
245
246
247
248
249struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
250 enum ata_dev_iter_mode mode)
251{
252 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
253 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
254
255
256 if (!dev)
257 switch (mode) {
258 case ATA_DITER_ENABLED:
259 case ATA_DITER_ALL:
260 dev = link->device;
261 goto check;
262 case ATA_DITER_ENABLED_REVERSE:
263 case ATA_DITER_ALL_REVERSE:
264 dev = link->device + ata_link_max_devices(link) - 1;
265 goto check;
266 }
267
268 next:
269
270 switch (mode) {
271 case ATA_DITER_ENABLED:
272 case ATA_DITER_ALL:
273 if (++dev < link->device + ata_link_max_devices(link))
274 goto check;
275 return NULL;
276 case ATA_DITER_ENABLED_REVERSE:
277 case ATA_DITER_ALL_REVERSE:
278 if (--dev >= link->device)
279 goto check;
280 return NULL;
281 }
282
283 check:
284 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
285 !ata_dev_enabled(dev))
286 goto next;
287 return dev;
288}
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304struct ata_link *ata_dev_phys_link(struct ata_device *dev)
305{
306 struct ata_port *ap = dev->link->ap;
307
308 if (!ap->slave_link)
309 return dev->link;
310 if (!dev->devno)
311 return &ap->link;
312 return ap->slave_link;
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328void ata_force_cbl(struct ata_port *ap)
329{
330 int i;
331
332 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
333 const struct ata_force_ent *fe = &ata_force_tbl[i];
334
335 if (fe->port != -1 && fe->port != ap->print_id)
336 continue;
337
338 if (fe->param.cbl == ATA_CBL_NONE)
339 continue;
340
341 ap->cbl = fe->param.cbl;
342 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
343 return;
344 }
345}
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363static void ata_force_link_limits(struct ata_link *link)
364{
365 bool did_spd = false;
366 int linkno = link->pmp;
367 int i;
368
369 if (ata_is_host_link(link))
370 linkno += 15;
371
372 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
373 const struct ata_force_ent *fe = &ata_force_tbl[i];
374
375 if (fe->port != -1 && fe->port != link->ap->print_id)
376 continue;
377
378 if (fe->device != -1 && fe->device != linkno)
379 continue;
380
381
382 if (!did_spd && fe->param.spd_limit) {
383 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
384 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
385 fe->param.name);
386 did_spd = true;
387 }
388
389
390 if (fe->param.lflags) {
391 link->flags |= fe->param.lflags;
392 ata_link_notice(link,
393 "FORCE: link flag 0x%x forced -> 0x%x\n",
394 fe->param.lflags, link->flags);
395 }
396 }
397}
398
399
400
401
402
403
404
405
406
407
408
409
410static void ata_force_xfermask(struct ata_device *dev)
411{
412 int devno = dev->link->pmp + dev->devno;
413 int alt_devno = devno;
414 int i;
415
416
417 if (ata_is_host_link(dev->link))
418 alt_devno += 15;
419
420 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
421 const struct ata_force_ent *fe = &ata_force_tbl[i];
422 unsigned long pio_mask, mwdma_mask, udma_mask;
423
424 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
425 continue;
426
427 if (fe->device != -1 && fe->device != devno &&
428 fe->device != alt_devno)
429 continue;
430
431 if (!fe->param.xfer_mask)
432 continue;
433
434 ata_unpack_xfermask(fe->param.xfer_mask,
435 &pio_mask, &mwdma_mask, &udma_mask);
436 if (udma_mask)
437 dev->udma_mask = udma_mask;
438 else if (mwdma_mask) {
439 dev->udma_mask = 0;
440 dev->mwdma_mask = mwdma_mask;
441 } else {
442 dev->udma_mask = 0;
443 dev->mwdma_mask = 0;
444 dev->pio_mask = pio_mask;
445 }
446
447 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
448 fe->param.name);
449 return;
450 }
451}
452
453
454
455
456
457
458
459
460
461
462
463
464static void ata_force_horkage(struct ata_device *dev)
465{
466 int devno = dev->link->pmp + dev->devno;
467 int alt_devno = devno;
468 int i;
469
470
471 if (ata_is_host_link(dev->link))
472 alt_devno += 15;
473
474 for (i = 0; i < ata_force_tbl_size; i++) {
475 const struct ata_force_ent *fe = &ata_force_tbl[i];
476
477 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
478 continue;
479
480 if (fe->device != -1 && fe->device != devno &&
481 fe->device != alt_devno)
482 continue;
483
484 if (!(~dev->horkage & fe->param.horkage_on) &&
485 !(dev->horkage & fe->param.horkage_off))
486 continue;
487
488 dev->horkage |= fe->param.horkage_on;
489 dev->horkage &= ~fe->param.horkage_off;
490
491 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
492 fe->param.name);
493 }
494}
495
496
497
498
499
500
501
502
503
504
505
506
507
508int atapi_cmd_type(u8 opcode)
509{
510 switch (opcode) {
511 case GPCMD_READ_10:
512 case GPCMD_READ_12:
513 return ATAPI_READ;
514
515 case GPCMD_WRITE_10:
516 case GPCMD_WRITE_12:
517 case GPCMD_WRITE_AND_VERIFY_10:
518 return ATAPI_WRITE;
519
520 case GPCMD_READ_CD:
521 case GPCMD_READ_CD_MSF:
522 return ATAPI_READ_CD;
523
524 case ATA_16:
525 case ATA_12:
526 if (atapi_passthru16)
527 return ATAPI_PASS_THRU;
528
529 default:
530 return ATAPI_MISC;
531 }
532}
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
548{
549 fis[0] = 0x27;
550 fis[1] = pmp & 0xf;
551 if (is_cmd)
552 fis[1] |= (1 << 7);
553
554 fis[2] = tf->command;
555 fis[3] = tf->feature;
556
557 fis[4] = tf->lbal;
558 fis[5] = tf->lbam;
559 fis[6] = tf->lbah;
560 fis[7] = tf->device;
561
562 fis[8] = tf->hob_lbal;
563 fis[9] = tf->hob_lbam;
564 fis[10] = tf->hob_lbah;
565 fis[11] = tf->hob_feature;
566
567 fis[12] = tf->nsect;
568 fis[13] = tf->hob_nsect;
569 fis[14] = 0;
570 fis[15] = tf->ctl;
571
572 fis[16] = 0;
573 fis[17] = 0;
574 fis[18] = 0;
575 fis[19] = 0;
576}
577
578
579
580
581
582
583
584
585
586
587
588
589void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
590{
591 tf->command = fis[2];
592 tf->feature = fis[3];
593
594 tf->lbal = fis[4];
595 tf->lbam = fis[5];
596 tf->lbah = fis[6];
597 tf->device = fis[7];
598
599 tf->hob_lbal = fis[8];
600 tf->hob_lbam = fis[9];
601 tf->hob_lbah = fis[10];
602
603 tf->nsect = fis[12];
604 tf->hob_nsect = fis[13];
605}
606
607static const u8 ata_rw_cmds[] = {
608
609 ATA_CMD_READ_MULTI,
610 ATA_CMD_WRITE_MULTI,
611 ATA_CMD_READ_MULTI_EXT,
612 ATA_CMD_WRITE_MULTI_EXT,
613 0,
614 0,
615 0,
616 ATA_CMD_WRITE_MULTI_FUA_EXT,
617
618 ATA_CMD_PIO_READ,
619 ATA_CMD_PIO_WRITE,
620 ATA_CMD_PIO_READ_EXT,
621 ATA_CMD_PIO_WRITE_EXT,
622 0,
623 0,
624 0,
625 0,
626
627 ATA_CMD_READ,
628 ATA_CMD_WRITE,
629 ATA_CMD_READ_EXT,
630 ATA_CMD_WRITE_EXT,
631 0,
632 0,
633 0,
634 ATA_CMD_WRITE_FUA_EXT
635};
636
637
638
639
640
641
642
643
644
645
646
647
648static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
649{
650 u8 cmd;
651
652 int index, fua, lba48, write;
653
654 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
655 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
656 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
657
658 if (dev->flags & ATA_DFLAG_PIO) {
659 tf->protocol = ATA_PROT_PIO;
660 index = dev->multi_count ? 0 : 8;
661 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
662
663 tf->protocol = ATA_PROT_PIO;
664 index = dev->multi_count ? 0 : 8;
665 } else {
666 tf->protocol = ATA_PROT_DMA;
667 index = 16;
668 }
669
670 cmd = ata_rw_cmds[index + fua + lba48 + write];
671 if (cmd) {
672 tf->command = cmd;
673 return 0;
674 }
675 return -1;
676}
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
694{
695 u64 block = 0;
696
697 if (tf->flags & ATA_TFLAG_LBA) {
698 if (tf->flags & ATA_TFLAG_LBA48) {
699 block |= (u64)tf->hob_lbah << 40;
700 block |= (u64)tf->hob_lbam << 32;
701 block |= (u64)tf->hob_lbal << 24;
702 } else
703 block |= (tf->device & 0xf) << 24;
704
705 block |= tf->lbah << 16;
706 block |= tf->lbam << 8;
707 block |= tf->lbal;
708 } else {
709 u32 cyl, head, sect;
710
711 cyl = tf->lbam | (tf->lbah << 8);
712 head = tf->device & 0xf;
713 sect = tf->lbal;
714
715 if (!sect) {
716 ata_dev_warn(dev,
717 "device reported invalid CHS sector 0\n");
718 sect = 1;
719 }
720
721 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
722 }
723
724 return block;
725}
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
748 u64 block, u32 n_block, unsigned int tf_flags,
749 unsigned int tag)
750{
751 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
752 tf->flags |= tf_flags;
753
754 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
755
756 if (!lba_48_ok(block, n_block))
757 return -ERANGE;
758
759 tf->protocol = ATA_PROT_NCQ;
760 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
761
762 if (tf->flags & ATA_TFLAG_WRITE)
763 tf->command = ATA_CMD_FPDMA_WRITE;
764 else
765 tf->command = ATA_CMD_FPDMA_READ;
766
767 tf->nsect = tag << 3;
768 tf->hob_feature = (n_block >> 8) & 0xff;
769 tf->feature = n_block & 0xff;
770
771 tf->hob_lbah = (block >> 40) & 0xff;
772 tf->hob_lbam = (block >> 32) & 0xff;
773 tf->hob_lbal = (block >> 24) & 0xff;
774 tf->lbah = (block >> 16) & 0xff;
775 tf->lbam = (block >> 8) & 0xff;
776 tf->lbal = block & 0xff;
777
778 tf->device = ATA_LBA;
779 if (tf->flags & ATA_TFLAG_FUA)
780 tf->device |= 1 << 7;
781 } else if (dev->flags & ATA_DFLAG_LBA) {
782 tf->flags |= ATA_TFLAG_LBA;
783
784 if (lba_28_ok(block, n_block)) {
785
786 tf->device |= (block >> 24) & 0xf;
787 } else if (lba_48_ok(block, n_block)) {
788 if (!(dev->flags & ATA_DFLAG_LBA48))
789 return -ERANGE;
790
791
792 tf->flags |= ATA_TFLAG_LBA48;
793
794 tf->hob_nsect = (n_block >> 8) & 0xff;
795
796 tf->hob_lbah = (block >> 40) & 0xff;
797 tf->hob_lbam = (block >> 32) & 0xff;
798 tf->hob_lbal = (block >> 24) & 0xff;
799 } else
800
801 return -ERANGE;
802
803 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
804 return -EINVAL;
805
806 tf->nsect = n_block & 0xff;
807
808 tf->lbah = (block >> 16) & 0xff;
809 tf->lbam = (block >> 8) & 0xff;
810 tf->lbal = block & 0xff;
811
812 tf->device |= ATA_LBA;
813 } else {
814
815 u32 sect, head, cyl, track;
816
817
818 if (!lba_28_ok(block, n_block))
819 return -ERANGE;
820
821 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
822 return -EINVAL;
823
824
825 track = (u32)block / dev->sectors;
826 cyl = track / dev->heads;
827 head = track % dev->heads;
828 sect = (u32)block % dev->sectors + 1;
829
830 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
831 (u32)block, track, cyl, head, sect);
832
833
834
835
836
837 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
838 return -ERANGE;
839
840 tf->nsect = n_block & 0xff;
841 tf->lbal = sect;
842 tf->lbam = cyl;
843 tf->lbah = cyl >> 8;
844 tf->device |= head;
845 }
846
847 return 0;
848}
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865unsigned long ata_pack_xfermask(unsigned long pio_mask,
866 unsigned long mwdma_mask,
867 unsigned long udma_mask)
868{
869 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
870 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
871 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
872}
873
874
875
876
877
878
879
880
881
882
883
884void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
885 unsigned long *mwdma_mask, unsigned long *udma_mask)
886{
887 if (pio_mask)
888 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
889 if (mwdma_mask)
890 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
891 if (udma_mask)
892 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
893}
894
895static const struct ata_xfer_ent {
896 int shift, bits;
897 u8 base;
898} ata_xfer_tbl[] = {
899 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
900 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
901 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
902 { -1, },
903};
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918u8 ata_xfer_mask2mode(unsigned long xfer_mask)
919{
920 int highbit = fls(xfer_mask) - 1;
921 const struct ata_xfer_ent *ent;
922
923 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
924 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
925 return ent->base + highbit - ent->shift;
926 return 0xff;
927}
928
929
930
931
932
933
934
935
936
937
938
939
940
941unsigned long ata_xfer_mode2mask(u8 xfer_mode)
942{
943 const struct ata_xfer_ent *ent;
944
945 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
946 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
947 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
948 & ~((1 << ent->shift) - 1);
949 return 0;
950}
951
952
953
954
955
956
957
958
959
960
961
962
963
964int ata_xfer_mode2shift(unsigned long xfer_mode)
965{
966 const struct ata_xfer_ent *ent;
967
968 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
969 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
970 return ent->shift;
971 return -1;
972}
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988const char *ata_mode_string(unsigned long xfer_mask)
989{
990 static const char * const xfer_mode_str[] = {
991 "PIO0",
992 "PIO1",
993 "PIO2",
994 "PIO3",
995 "PIO4",
996 "PIO5",
997 "PIO6",
998 "MWDMA0",
999 "MWDMA1",
1000 "MWDMA2",
1001 "MWDMA3",
1002 "MWDMA4",
1003 "UDMA/16",
1004 "UDMA/25",
1005 "UDMA/33",
1006 "UDMA/44",
1007 "UDMA/66",
1008 "UDMA/100",
1009 "UDMA/133",
1010 "UDMA7",
1011 };
1012 int highbit;
1013
1014 highbit = fls(xfer_mask) - 1;
1015 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1016 return xfer_mode_str[highbit];
1017 return "<n/a>";
1018}
1019
1020const char *sata_spd_string(unsigned int spd)
1021{
1022 static const char * const spd_str[] = {
1023 "1.5 Gbps",
1024 "3.0 Gbps",
1025 "6.0 Gbps",
1026 };
1027
1028 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1029 return "<unknown>";
1030 return spd_str[spd - 1];
1031}
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1049{
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1072 DPRINTK("found ATA device by sig\n");
1073 return ATA_DEV_ATA;
1074 }
1075
1076 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1077 DPRINTK("found ATAPI device by sig\n");
1078 return ATA_DEV_ATAPI;
1079 }
1080
1081 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1082 DPRINTK("found PMP device by sig\n");
1083 return ATA_DEV_PMP;
1084 }
1085
1086 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1087 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1088 return ATA_DEV_SEMB;
1089 }
1090
1091 DPRINTK("unknown device\n");
1092 return ATA_DEV_UNKNOWN;
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110void ata_id_string(const u16 *id, unsigned char *s,
1111 unsigned int ofs, unsigned int len)
1112{
1113 unsigned int c;
1114
1115 BUG_ON(len & 1);
1116
1117 while (len > 0) {
1118 c = id[ofs] >> 8;
1119 *s = c;
1120 s++;
1121
1122 c = id[ofs] & 0xff;
1123 *s = c;
1124 s++;
1125
1126 ofs++;
1127 len -= 2;
1128 }
1129}
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145void ata_id_c_string(const u16 *id, unsigned char *s,
1146 unsigned int ofs, unsigned int len)
1147{
1148 unsigned char *p;
1149
1150 ata_id_string(id, s, ofs, len - 1);
1151
1152 p = s + strnlen(s, len - 1);
1153 while (p > s && p[-1] == ' ')
1154 p--;
1155 *p = '\0';
1156}
1157
1158static u64 ata_id_n_sectors(const u16 *id)
1159{
1160 if (ata_id_has_lba(id)) {
1161 if (ata_id_has_lba48(id))
1162 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1163 else
1164 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1165 } else {
1166 if (ata_id_current_chs_valid(id))
1167 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1168 id[ATA_ID_CUR_SECTORS];
1169 else
1170 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1171 id[ATA_ID_SECTORS];
1172 }
1173}
1174
1175u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1176{
1177 u64 sectors = 0;
1178
1179 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1180 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1181 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1182 sectors |= (tf->lbah & 0xff) << 16;
1183 sectors |= (tf->lbam & 0xff) << 8;
1184 sectors |= (tf->lbal & 0xff);
1185
1186 return sectors;
1187}
1188
1189u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1190{
1191 u64 sectors = 0;
1192
1193 sectors |= (tf->device & 0x0f) << 24;
1194 sectors |= (tf->lbah & 0xff) << 16;
1195 sectors |= (tf->lbam & 0xff) << 8;
1196 sectors |= (tf->lbal & 0xff);
1197
1198 return sectors;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1214{
1215 unsigned int err_mask;
1216 struct ata_taskfile tf;
1217 int lba48 = ata_id_has_lba48(dev->id);
1218
1219 ata_tf_init(dev, &tf);
1220
1221
1222 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1223
1224 if (lba48) {
1225 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1226 tf.flags |= ATA_TFLAG_LBA48;
1227 } else
1228 tf.command = ATA_CMD_READ_NATIVE_MAX;
1229
1230 tf.protocol |= ATA_PROT_NODATA;
1231 tf.device |= ATA_LBA;
1232
1233 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1234 if (err_mask) {
1235 ata_dev_warn(dev,
1236 "failed to read native max address (err_mask=0x%x)\n",
1237 err_mask);
1238 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1239 return -EACCES;
1240 return -EIO;
1241 }
1242
1243 if (lba48)
1244 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1245 else
1246 *max_sectors = ata_tf_to_lba(&tf) + 1;
1247 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1248 (*max_sectors)--;
1249 return 0;
1250}
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1265{
1266 unsigned int err_mask;
1267 struct ata_taskfile tf;
1268 int lba48 = ata_id_has_lba48(dev->id);
1269
1270 new_sectors--;
1271
1272 ata_tf_init(dev, &tf);
1273
1274 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1275
1276 if (lba48) {
1277 tf.command = ATA_CMD_SET_MAX_EXT;
1278 tf.flags |= ATA_TFLAG_LBA48;
1279
1280 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1281 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1282 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1283 } else {
1284 tf.command = ATA_CMD_SET_MAX;
1285
1286 tf.device |= (new_sectors >> 24) & 0xf;
1287 }
1288
1289 tf.protocol |= ATA_PROT_NODATA;
1290 tf.device |= ATA_LBA;
1291
1292 tf.lbal = (new_sectors >> 0) & 0xff;
1293 tf.lbam = (new_sectors >> 8) & 0xff;
1294 tf.lbah = (new_sectors >> 16) & 0xff;
1295
1296 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1297 if (err_mask) {
1298 ata_dev_warn(dev,
1299 "failed to set max address (err_mask=0x%x)\n",
1300 err_mask);
1301 if (err_mask == AC_ERR_DEV &&
1302 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1303 return -EACCES;
1304 return -EIO;
1305 }
1306
1307 return 0;
1308}
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321static int ata_hpa_resize(struct ata_device *dev)
1322{
1323 struct ata_eh_context *ehc = &dev->link->eh_context;
1324 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1325 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1326 u64 sectors = ata_id_n_sectors(dev->id);
1327 u64 native_sectors;
1328 int rc;
1329
1330
1331 if (dev->class != ATA_DEV_ATA ||
1332 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1333 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1334 return 0;
1335
1336
1337 rc = ata_read_native_max_address(dev, &native_sectors);
1338 if (rc) {
1339
1340
1341
1342 if (rc == -EACCES || !unlock_hpa) {
1343 ata_dev_warn(dev,
1344 "HPA support seems broken, skipping HPA handling\n");
1345 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1346
1347
1348 if (rc == -EACCES)
1349 rc = 0;
1350 }
1351
1352 return rc;
1353 }
1354 dev->n_native_sectors = native_sectors;
1355
1356
1357 if (native_sectors <= sectors || !unlock_hpa) {
1358 if (!print_info || native_sectors == sectors)
1359 return 0;
1360
1361 if (native_sectors > sectors)
1362 ata_dev_info(dev,
1363 "HPA detected: current %llu, native %llu\n",
1364 (unsigned long long)sectors,
1365 (unsigned long long)native_sectors);
1366 else if (native_sectors < sectors)
1367 ata_dev_warn(dev,
1368 "native sectors (%llu) is smaller than sectors (%llu)\n",
1369 (unsigned long long)native_sectors,
1370 (unsigned long long)sectors);
1371 return 0;
1372 }
1373
1374
1375 rc = ata_set_max_sectors(dev, native_sectors);
1376 if (rc == -EACCES) {
1377
1378 ata_dev_warn(dev,
1379 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1380 (unsigned long long)sectors,
1381 (unsigned long long)native_sectors);
1382 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1383 return 0;
1384 } else if (rc)
1385 return rc;
1386
1387
1388 rc = ata_dev_reread_id(dev, 0);
1389 if (rc) {
1390 ata_dev_err(dev,
1391 "failed to re-read IDENTIFY data after HPA resizing\n");
1392 return rc;
1393 }
1394
1395 if (print_info) {
1396 u64 new_sectors = ata_id_n_sectors(dev->id);
1397 ata_dev_info(dev,
1398 "HPA unlocked: %llu -> %llu, native %llu\n",
1399 (unsigned long long)sectors,
1400 (unsigned long long)new_sectors,
1401 (unsigned long long)native_sectors);
1402 }
1403
1404 return 0;
1405}
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418static inline void ata_dump_id(const u16 *id)
1419{
1420 DPRINTK("49==0x%04x "
1421 "53==0x%04x "
1422 "63==0x%04x "
1423 "64==0x%04x "
1424 "75==0x%04x \n",
1425 id[49],
1426 id[53],
1427 id[63],
1428 id[64],
1429 id[75]);
1430 DPRINTK("80==0x%04x "
1431 "81==0x%04x "
1432 "82==0x%04x "
1433 "83==0x%04x "
1434 "84==0x%04x \n",
1435 id[80],
1436 id[81],
1437 id[82],
1438 id[83],
1439 id[84]);
1440 DPRINTK("88==0x%04x "
1441 "93==0x%04x\n",
1442 id[88],
1443 id[93]);
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461unsigned long ata_id_xfermask(const u16 *id)
1462{
1463 unsigned long pio_mask, mwdma_mask, udma_mask;
1464
1465
1466 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1467 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1468 pio_mask <<= 3;
1469 pio_mask |= 0x7;
1470 } else {
1471
1472
1473
1474
1475 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1476 if (mode < 5)
1477 pio_mask = (2 << mode) - 1;
1478 else
1479 pio_mask = 1;
1480
1481
1482
1483
1484
1485
1486
1487 }
1488
1489 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1490
1491 if (ata_id_is_cfa(id)) {
1492
1493
1494
1495 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1496 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1497
1498 if (pio)
1499 pio_mask |= (1 << 5);
1500 if (pio > 1)
1501 pio_mask |= (1 << 6);
1502 if (dma)
1503 mwdma_mask |= (1 << 3);
1504 if (dma > 1)
1505 mwdma_mask |= (1 << 4);
1506 }
1507
1508 udma_mask = 0;
1509 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1510 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1511
1512 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1513}
1514
1515static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1516{
1517 struct completion *waiting = qc->private_data;
1518
1519 complete(waiting);
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544unsigned ata_exec_internal_sg(struct ata_device *dev,
1545 struct ata_taskfile *tf, const u8 *cdb,
1546 int dma_dir, struct scatterlist *sgl,
1547 unsigned int n_elem, unsigned long timeout)
1548{
1549 struct ata_link *link = dev->link;
1550 struct ata_port *ap = link->ap;
1551 u8 command = tf->command;
1552 int auto_timeout = 0;
1553 struct ata_queued_cmd *qc;
1554 unsigned int tag, preempted_tag;
1555 u32 preempted_sactive, preempted_qc_active;
1556 int preempted_nr_active_links;
1557 DECLARE_COMPLETION_ONSTACK(wait);
1558 unsigned long flags;
1559 unsigned int err_mask;
1560 int rc;
1561
1562 spin_lock_irqsave(ap->lock, flags);
1563
1564
1565 if (ap->pflags & ATA_PFLAG_FROZEN) {
1566 spin_unlock_irqrestore(ap->lock, flags);
1567 return AC_ERR_SYSTEM;
1568 }
1569
1570
1571
1572
1573
1574
1575
1576
1577 if (ap->ops->error_handler)
1578 tag = ATA_TAG_INTERNAL;
1579 else
1580 tag = 0;
1581
1582 if (test_and_set_bit(tag, &ap->qc_allocated))
1583 BUG();
1584 qc = __ata_qc_from_tag(ap, tag);
1585
1586 qc->tag = tag;
1587 qc->scsicmd = NULL;
1588 qc->ap = ap;
1589 qc->dev = dev;
1590 ata_qc_reinit(qc);
1591
1592 preempted_tag = link->active_tag;
1593 preempted_sactive = link->sactive;
1594 preempted_qc_active = ap->qc_active;
1595 preempted_nr_active_links = ap->nr_active_links;
1596 link->active_tag = ATA_TAG_POISON;
1597 link->sactive = 0;
1598 ap->qc_active = 0;
1599 ap->nr_active_links = 0;
1600
1601
1602 qc->tf = *tf;
1603 if (cdb)
1604 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1605
1606
1607 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1608 dma_dir == DMA_FROM_DEVICE)
1609 qc->tf.feature |= ATAPI_DMADIR;
1610
1611 qc->flags |= ATA_QCFLAG_RESULT_TF;
1612 qc->dma_dir = dma_dir;
1613 if (dma_dir != DMA_NONE) {
1614 unsigned int i, buflen = 0;
1615 struct scatterlist *sg;
1616
1617 for_each_sg(sgl, sg, n_elem, i)
1618 buflen += sg->length;
1619
1620 ata_sg_init(qc, sgl, n_elem);
1621 qc->nbytes = buflen;
1622 }
1623
1624 qc->private_data = &wait;
1625 qc->complete_fn = ata_qc_complete_internal;
1626
1627 ata_qc_issue(qc);
1628
1629 spin_unlock_irqrestore(ap->lock, flags);
1630
1631 if (!timeout) {
1632 if (ata_probe_timeout)
1633 timeout = ata_probe_timeout * 1000;
1634 else {
1635 timeout = ata_internal_cmd_timeout(dev, command);
1636 auto_timeout = 1;
1637 }
1638 }
1639
1640 if (ap->ops->error_handler)
1641 ata_eh_release(ap);
1642
1643 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1644
1645 if (ap->ops->error_handler)
1646 ata_eh_acquire(ap);
1647
1648 ata_sff_flush_pio_task(ap);
1649
1650 if (!rc) {
1651 spin_lock_irqsave(ap->lock, flags);
1652
1653
1654
1655
1656
1657
1658 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1659 qc->err_mask |= AC_ERR_TIMEOUT;
1660
1661 if (ap->ops->error_handler)
1662 ata_port_freeze(ap);
1663 else
1664 ata_qc_complete(qc);
1665
1666 if (ata_msg_warn(ap))
1667 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1668 command);
1669 }
1670
1671 spin_unlock_irqrestore(ap->lock, flags);
1672 }
1673
1674
1675 if (ap->ops->post_internal_cmd)
1676 ap->ops->post_internal_cmd(qc);
1677
1678
1679 if (qc->flags & ATA_QCFLAG_FAILED) {
1680 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1681 qc->err_mask |= AC_ERR_DEV;
1682
1683 if (!qc->err_mask)
1684 qc->err_mask |= AC_ERR_OTHER;
1685
1686 if (qc->err_mask & ~AC_ERR_OTHER)
1687 qc->err_mask &= ~AC_ERR_OTHER;
1688 }
1689
1690
1691 spin_lock_irqsave(ap->lock, flags);
1692
1693 *tf = qc->result_tf;
1694 err_mask = qc->err_mask;
1695
1696 ata_qc_free(qc);
1697 link->active_tag = preempted_tag;
1698 link->sactive = preempted_sactive;
1699 ap->qc_active = preempted_qc_active;
1700 ap->nr_active_links = preempted_nr_active_links;
1701
1702 spin_unlock_irqrestore(ap->lock, flags);
1703
1704 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1705 ata_internal_cmd_timed_out(dev, command);
1706
1707 return err_mask;
1708}
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729unsigned ata_exec_internal(struct ata_device *dev,
1730 struct ata_taskfile *tf, const u8 *cdb,
1731 int dma_dir, void *buf, unsigned int buflen,
1732 unsigned long timeout)
1733{
1734 struct scatterlist *psg = NULL, sg;
1735 unsigned int n_elem = 0;
1736
1737 if (dma_dir != DMA_NONE) {
1738 WARN_ON(!buf);
1739 sg_init_one(&sg, buf, buflen);
1740 psg = &sg;
1741 n_elem++;
1742 }
1743
1744 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1745 timeout);
1746}
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1763{
1764 struct ata_taskfile tf;
1765
1766 ata_tf_init(dev, &tf);
1767
1768 tf.command = cmd;
1769 tf.flags |= ATA_TFLAG_DEVICE;
1770 tf.protocol = ATA_PROT_NODATA;
1771
1772 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1783{
1784
1785
1786
1787
1788 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1789 return 0;
1790
1791
1792
1793 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1794 return 0;
1795
1796 if (ata_id_is_cfa(adev->id)
1797 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1798 return 0;
1799
1800 if (adev->pio_mode > XFER_PIO_2)
1801 return 1;
1802
1803 if (ata_id_has_iordy(adev->id))
1804 return 1;
1805 return 0;
1806}
1807
1808
1809
1810
1811
1812
1813
1814
1815static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1816{
1817
1818 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
1819 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1820
1821 if (pio) {
1822
1823 if (pio > 240)
1824 return 3 << ATA_SHIFT_PIO;
1825 return 7 << ATA_SHIFT_PIO;
1826 }
1827 }
1828 return 3 << ATA_SHIFT_PIO;
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841unsigned int ata_do_dev_read_id(struct ata_device *dev,
1842 struct ata_taskfile *tf, u16 *id)
1843{
1844 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1845 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1846}
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1870 unsigned int flags, u16 *id)
1871{
1872 struct ata_port *ap = dev->link->ap;
1873 unsigned int class = *p_class;
1874 struct ata_taskfile tf;
1875 unsigned int err_mask = 0;
1876 const char *reason;
1877 bool is_semb = class == ATA_DEV_SEMB;
1878 int may_fallback = 1, tried_spinup = 0;
1879 int rc;
1880
1881 if (ata_msg_ctl(ap))
1882 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1883
1884retry:
1885 ata_tf_init(dev, &tf);
1886
1887 switch (class) {
1888 case ATA_DEV_SEMB:
1889 class = ATA_DEV_ATA;
1890 case ATA_DEV_ATA:
1891 tf.command = ATA_CMD_ID_ATA;
1892 break;
1893 case ATA_DEV_ATAPI:
1894 tf.command = ATA_CMD_ID_ATAPI;
1895 break;
1896 default:
1897 rc = -ENODEV;
1898 reason = "unsupported class";
1899 goto err_out;
1900 }
1901
1902 tf.protocol = ATA_PROT_PIO;
1903
1904
1905
1906
1907 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1908
1909
1910
1911
1912 tf.flags |= ATA_TFLAG_POLLING;
1913
1914 if (ap->ops->read_id)
1915 err_mask = ap->ops->read_id(dev, &tf, id);
1916 else
1917 err_mask = ata_do_dev_read_id(dev, &tf, id);
1918
1919 if (err_mask) {
1920 if (err_mask & AC_ERR_NODEV_HINT) {
1921 ata_dev_dbg(dev, "NODEV after polling detection\n");
1922 return -ENOENT;
1923 }
1924
1925 if (is_semb) {
1926 ata_dev_info(dev,
1927 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1928
1929 *p_class = ATA_DEV_SEMB_UNSUP;
1930 return 0;
1931 }
1932
1933 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1934
1935
1936
1937
1938
1939 if (may_fallback) {
1940 may_fallback = 0;
1941
1942 if (class == ATA_DEV_ATA)
1943 class = ATA_DEV_ATAPI;
1944 else
1945 class = ATA_DEV_ATA;
1946 goto retry;
1947 }
1948
1949
1950
1951
1952
1953 ata_dev_dbg(dev,
1954 "both IDENTIFYs aborted, assuming NODEV\n");
1955 return -ENOENT;
1956 }
1957
1958 rc = -EIO;
1959 reason = "I/O error";
1960 goto err_out;
1961 }
1962
1963 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1964 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1965 "class=%d may_fallback=%d tried_spinup=%d\n",
1966 class, may_fallback, tried_spinup);
1967 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1968 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1969 }
1970
1971
1972
1973
1974 may_fallback = 0;
1975
1976 swap_buf_le16(id, ATA_ID_WORDS);
1977
1978
1979 rc = -EINVAL;
1980 reason = "device reports invalid type";
1981
1982 if (class == ATA_DEV_ATA) {
1983 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1984 goto err_out;
1985 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1986 ata_id_is_ata(id)) {
1987 ata_dev_dbg(dev,
1988 "host indicates ignore ATA devices, ignored\n");
1989 return -ENOENT;
1990 }
1991 } else {
1992 if (ata_id_is_ata(id))
1993 goto err_out;
1994 }
1995
1996 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1997 tried_spinup = 1;
1998
1999
2000
2001
2002
2003 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2004 if (err_mask && id[2] != 0x738c) {
2005 rc = -EIO;
2006 reason = "SPINUP failed";
2007 goto err_out;
2008 }
2009
2010
2011
2012
2013 if (id[2] == 0x37c8)
2014 goto retry;
2015 }
2016
2017 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2030 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2031 if (err_mask) {
2032 rc = -EIO;
2033 reason = "INIT_DEV_PARAMS failed";
2034 goto err_out;
2035 }
2036
2037
2038
2039
2040 flags &= ~ATA_READID_POSTRESET;
2041 goto retry;
2042 }
2043 }
2044
2045 *p_class = class;
2046
2047 return 0;
2048
2049 err_out:
2050 if (ata_msg_warn(ap))
2051 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2052 reason, err_mask);
2053 return rc;
2054}
2055
2056static int ata_do_link_spd_horkage(struct ata_device *dev)
2057{
2058 struct ata_link *plink = ata_dev_phys_link(dev);
2059 u32 target, target_limit;
2060
2061 if (!sata_scr_valid(plink))
2062 return 0;
2063
2064 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2065 target = 1;
2066 else
2067 return 0;
2068
2069 target_limit = (1 << target) - 1;
2070
2071
2072 if (plink->sata_spd_limit <= target_limit)
2073 return 0;
2074
2075 plink->sata_spd_limit = target_limit;
2076
2077
2078
2079
2080
2081 if (plink->sata_spd > target) {
2082 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2083 sata_spd_string(target));
2084 return -EAGAIN;
2085 }
2086 return 0;
2087}
2088
2089static inline u8 ata_dev_knobble(struct ata_device *dev)
2090{
2091 struct ata_port *ap = dev->link->ap;
2092
2093 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2094 return 0;
2095
2096 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2097}
2098
2099static int ata_dev_config_ncq(struct ata_device *dev,
2100 char *desc, size_t desc_sz)
2101{
2102 struct ata_port *ap = dev->link->ap;
2103 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2104 unsigned int err_mask;
2105 char *aa_desc = "";
2106
2107 if (!ata_id_has_ncq(dev->id)) {
2108 desc[0] = '\0';
2109 return 0;
2110 }
2111 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2112 snprintf(desc, desc_sz, "NCQ (not used)");
2113 return 0;
2114 }
2115 if (ap->flags & ATA_FLAG_NCQ) {
2116 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2117 dev->flags |= ATA_DFLAG_NCQ;
2118 }
2119
2120 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2121 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2122 ata_id_has_fpdma_aa(dev->id)) {
2123 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2124 SATA_FPDMA_AA);
2125 if (err_mask) {
2126 ata_dev_err(dev,
2127 "failed to enable AA (error_mask=0x%x)\n",
2128 err_mask);
2129 if (err_mask != AC_ERR_DEV) {
2130 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2131 return -EIO;
2132 }
2133 } else
2134 aa_desc = ", AA";
2135 }
2136
2137 if (hdepth >= ddepth)
2138 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2139 else
2140 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2141 ddepth, aa_desc);
2142 return 0;
2143}
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158int ata_dev_configure(struct ata_device *dev)
2159{
2160 struct ata_port *ap = dev->link->ap;
2161 struct ata_eh_context *ehc = &dev->link->eh_context;
2162 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2163 const u16 *id = dev->id;
2164 unsigned long xfer_mask;
2165 unsigned int err_mask;
2166 char revbuf[7];
2167 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2168 char modelbuf[ATA_ID_PROD_LEN+1];
2169 int rc;
2170
2171 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2172 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2173 return 0;
2174 }
2175
2176 if (ata_msg_probe(ap))
2177 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2178
2179
2180 dev->horkage |= ata_dev_blacklisted(dev);
2181 ata_force_horkage(dev);
2182
2183 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2184 ata_dev_info(dev, "unsupported device, disabling\n");
2185 ata_dev_disable(dev);
2186 return 0;
2187 }
2188
2189 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2190 dev->class == ATA_DEV_ATAPI) {
2191 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2192 atapi_enabled ? "not supported with this driver"
2193 : "disabled");
2194 ata_dev_disable(dev);
2195 return 0;
2196 }
2197
2198 rc = ata_do_link_spd_horkage(dev);
2199 if (rc)
2200 return rc;
2201
2202
2203 rc = ata_acpi_on_devcfg(dev);
2204 if (rc)
2205 return rc;
2206
2207
2208 rc = ata_hpa_resize(dev);
2209 if (rc)
2210 return rc;
2211
2212
2213 if (ata_msg_probe(ap))
2214 ata_dev_dbg(dev,
2215 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2216 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2217 __func__,
2218 id[49], id[82], id[83], id[84],
2219 id[85], id[86], id[87], id[88]);
2220
2221
2222 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2223 dev->max_sectors = 0;
2224 dev->cdb_len = 0;
2225 dev->n_sectors = 0;
2226 dev->cylinders = 0;
2227 dev->heads = 0;
2228 dev->sectors = 0;
2229 dev->multi_count = 0;
2230
2231
2232
2233
2234
2235
2236 xfer_mask = ata_id_xfermask(id);
2237
2238 if (ata_msg_probe(ap))
2239 ata_dump_id(id);
2240
2241
2242 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2243 sizeof(fwrevbuf));
2244
2245 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2246 sizeof(modelbuf));
2247
2248
2249 if (dev->class == ATA_DEV_ATA) {
2250 if (ata_id_is_cfa(id)) {
2251
2252 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2253 ata_dev_warn(dev,
2254 "supports DRM functions and may not be fully accessible\n");
2255 snprintf(revbuf, 7, "CFA");
2256 } else {
2257 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2258
2259 if (ata_id_has_tpm(id))
2260 ata_dev_warn(dev,
2261 "supports DRM functions and may not be fully accessible\n");
2262 }
2263
2264 dev->n_sectors = ata_id_n_sectors(id);
2265
2266
2267 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2268 unsigned int max = dev->id[47] & 0xff;
2269 unsigned int cnt = dev->id[59] & 0xff;
2270
2271 if (is_power_of_2(max) && is_power_of_2(cnt))
2272 if (cnt <= max)
2273 dev->multi_count = cnt;
2274 }
2275
2276 if (ata_id_has_lba(id)) {
2277 const char *lba_desc;
2278 char ncq_desc[24];
2279
2280 lba_desc = "LBA";
2281 dev->flags |= ATA_DFLAG_LBA;
2282 if (ata_id_has_lba48(id)) {
2283 dev->flags |= ATA_DFLAG_LBA48;
2284 lba_desc = "LBA48";
2285
2286 if (dev->n_sectors >= (1UL << 28) &&
2287 ata_id_has_flush_ext(id))
2288 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2289 }
2290
2291
2292 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2293 if (rc)
2294 return rc;
2295
2296
2297 if (ata_msg_drv(ap) && print_info) {
2298 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2299 revbuf, modelbuf, fwrevbuf,
2300 ata_mode_string(xfer_mask));
2301 ata_dev_info(dev,
2302 "%llu sectors, multi %u: %s %s\n",
2303 (unsigned long long)dev->n_sectors,
2304 dev->multi_count, lba_desc, ncq_desc);
2305 }
2306 } else {
2307
2308
2309
2310 dev->cylinders = id[1];
2311 dev->heads = id[3];
2312 dev->sectors = id[6];
2313
2314 if (ata_id_current_chs_valid(id)) {
2315
2316 dev->cylinders = id[54];
2317 dev->heads = id[55];
2318 dev->sectors = id[56];
2319 }
2320
2321
2322 if (ata_msg_drv(ap) && print_info) {
2323 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2324 revbuf, modelbuf, fwrevbuf,
2325 ata_mode_string(xfer_mask));
2326 ata_dev_info(dev,
2327 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2328 (unsigned long long)dev->n_sectors,
2329 dev->multi_count, dev->cylinders,
2330 dev->heads, dev->sectors);
2331 }
2332 }
2333
2334
2335
2336
2337 if (ata_id_has_devslp(dev->id)) {
2338 u8 *sata_setting = ap->sector_buf;
2339 int i, j;
2340
2341 dev->flags |= ATA_DFLAG_DEVSLP;
2342 err_mask = ata_read_log_page(dev,
2343 ATA_LOG_SATA_ID_DEV_DATA,
2344 ATA_LOG_SATA_SETTINGS,
2345 sata_setting,
2346 1);
2347 if (err_mask)
2348 ata_dev_dbg(dev,
2349 "failed to get Identify Device Data, Emask 0x%x\n",
2350 err_mask);
2351 else
2352 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2353 j = ATA_LOG_DEVSLP_OFFSET + i;
2354 dev->devslp_timing[i] = sata_setting[j];
2355 }
2356 }
2357
2358 dev->cdb_len = 16;
2359 }
2360
2361
2362 else if (dev->class == ATA_DEV_ATAPI) {
2363 const char *cdb_intr_string = "";
2364 const char *atapi_an_string = "";
2365 const char *dma_dir_string = "";
2366 u32 sntf;
2367
2368 rc = atapi_cdb_len(id);
2369 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2370 if (ata_msg_warn(ap))
2371 ata_dev_warn(dev, "unsupported CDB len\n");
2372 rc = -EINVAL;
2373 goto err_out_nosup;
2374 }
2375 dev->cdb_len = (unsigned int) rc;
2376
2377
2378
2379
2380
2381
2382 if (atapi_an &&
2383 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2384 (!sata_pmp_attached(ap) ||
2385 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2386
2387 err_mask = ata_dev_set_feature(dev,
2388 SETFEATURES_SATA_ENABLE, SATA_AN);
2389 if (err_mask)
2390 ata_dev_err(dev,
2391 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2392 err_mask);
2393 else {
2394 dev->flags |= ATA_DFLAG_AN;
2395 atapi_an_string = ", ATAPI AN";
2396 }
2397 }
2398
2399 if (ata_id_cdb_intr(dev->id)) {
2400 dev->flags |= ATA_DFLAG_CDB_INTR;
2401 cdb_intr_string = ", CDB intr";
2402 }
2403
2404 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2405 dev->flags |= ATA_DFLAG_DMADIR;
2406 dma_dir_string = ", DMADIR";
2407 }
2408
2409 if (ata_id_has_da(dev->id)) {
2410 dev->flags |= ATA_DFLAG_DA;
2411 zpodd_init(dev);
2412 }
2413
2414
2415 if (ata_msg_drv(ap) && print_info)
2416 ata_dev_info(dev,
2417 "ATAPI: %s, %s, max %s%s%s%s\n",
2418 modelbuf, fwrevbuf,
2419 ata_mode_string(xfer_mask),
2420 cdb_intr_string, atapi_an_string,
2421 dma_dir_string);
2422 }
2423
2424
2425 dev->max_sectors = ATA_MAX_SECTORS;
2426 if (dev->flags & ATA_DFLAG_LBA48)
2427 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2428
2429
2430
2431 if (ata_dev_knobble(dev)) {
2432 if (ata_msg_drv(ap) && print_info)
2433 ata_dev_info(dev, "applying bridge limits\n");
2434 dev->udma_mask &= ATA_UDMA5;
2435 dev->max_sectors = ATA_MAX_SECTORS;
2436 }
2437
2438 if ((dev->class == ATA_DEV_ATAPI) &&
2439 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2440 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2441 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2442 }
2443
2444 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2445 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2446 dev->max_sectors);
2447
2448 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2449 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2450
2451 if (ap->ops->dev_config)
2452 ap->ops->dev_config(dev);
2453
2454 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2455
2456
2457
2458
2459
2460
2461 if (print_info) {
2462 ata_dev_warn(dev,
2463"Drive reports diagnostics failure. This may indicate a drive\n");
2464 ata_dev_warn(dev,
2465"fault or invalid emulation. Contact drive vendor for information.\n");
2466 }
2467 }
2468
2469 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2470 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2471 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2472 }
2473
2474 return 0;
2475
2476err_out_nosup:
2477 if (ata_msg_probe(ap))
2478 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2479 return rc;
2480}
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490int ata_cable_40wire(struct ata_port *ap)
2491{
2492 return ATA_CBL_PATA40;
2493}
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503int ata_cable_80wire(struct ata_port *ap)
2504{
2505 return ATA_CBL_PATA80;
2506}
2507
2508
2509
2510
2511
2512
2513
2514
2515int ata_cable_unknown(struct ata_port *ap)
2516{
2517 return ATA_CBL_PATA_UNK;
2518}
2519
2520
2521
2522
2523
2524
2525
2526
2527int ata_cable_ignore(struct ata_port *ap)
2528{
2529 return ATA_CBL_PATA_IGN;
2530}
2531
2532
2533
2534
2535
2536
2537
2538
2539int ata_cable_sata(struct ata_port *ap)
2540{
2541 return ATA_CBL_SATA;
2542}
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559int ata_bus_probe(struct ata_port *ap)
2560{
2561 unsigned int classes[ATA_MAX_DEVICES];
2562 int tries[ATA_MAX_DEVICES];
2563 int rc;
2564 struct ata_device *dev;
2565
2566 ata_for_each_dev(dev, &ap->link, ALL)
2567 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2568
2569 retry:
2570 ata_for_each_dev(dev, &ap->link, ALL) {
2571
2572
2573
2574
2575
2576
2577
2578 dev->pio_mode = XFER_PIO_0;
2579 dev->dma_mode = 0xff;
2580
2581
2582
2583
2584
2585
2586 if (ap->ops->set_piomode)
2587 ap->ops->set_piomode(ap, dev);
2588 }
2589
2590
2591 ap->ops->phy_reset(ap);
2592
2593 ata_for_each_dev(dev, &ap->link, ALL) {
2594 if (dev->class != ATA_DEV_UNKNOWN)
2595 classes[dev->devno] = dev->class;
2596 else
2597 classes[dev->devno] = ATA_DEV_NONE;
2598
2599 dev->class = ATA_DEV_UNKNOWN;
2600 }
2601
2602
2603
2604
2605
2606 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2607 if (tries[dev->devno])
2608 dev->class = classes[dev->devno];
2609
2610 if (!ata_dev_enabled(dev))
2611 continue;
2612
2613 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2614 dev->id);
2615 if (rc)
2616 goto fail;
2617 }
2618
2619
2620 if (ap->ops->cable_detect)
2621 ap->cbl = ap->ops->cable_detect(ap);
2622
2623
2624
2625
2626
2627
2628 ata_for_each_dev(dev, &ap->link, ENABLED)
2629 if (ata_id_is_sata(dev->id))
2630 ap->cbl = ATA_CBL_SATA;
2631
2632
2633
2634
2635 ata_for_each_dev(dev, &ap->link, ENABLED) {
2636 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2637 rc = ata_dev_configure(dev);
2638 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2639 if (rc)
2640 goto fail;
2641 }
2642
2643
2644 rc = ata_set_mode(&ap->link, &dev);
2645 if (rc)
2646 goto fail;
2647
2648 ata_for_each_dev(dev, &ap->link, ENABLED)
2649 return 0;
2650
2651 return -ENODEV;
2652
2653 fail:
2654 tries[dev->devno]--;
2655
2656 switch (rc) {
2657 case -EINVAL:
2658
2659 tries[dev->devno] = 0;
2660 break;
2661
2662 case -ENODEV:
2663
2664 tries[dev->devno] = min(tries[dev->devno], 1);
2665 case -EIO:
2666 if (tries[dev->devno] == 1) {
2667
2668
2669
2670 sata_down_spd_limit(&ap->link, 0);
2671 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2672 }
2673 }
2674
2675 if (!tries[dev->devno])
2676 ata_dev_disable(dev);
2677
2678 goto retry;
2679}
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690static void sata_print_link_status(struct ata_link *link)
2691{
2692 u32 sstatus, scontrol, tmp;
2693
2694 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2695 return;
2696 sata_scr_read(link, SCR_CONTROL, &scontrol);
2697
2698 if (ata_phys_link_online(link)) {
2699 tmp = (sstatus >> 4) & 0xf;
2700 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2701 sata_spd_string(tmp), sstatus, scontrol);
2702 } else {
2703 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2704 sstatus, scontrol);
2705 }
2706}
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716struct ata_device *ata_dev_pair(struct ata_device *adev)
2717{
2718 struct ata_link *link = adev->link;
2719 struct ata_device *pair = &link->device[1 - adev->devno];
2720 if (!ata_dev_enabled(pair))
2721 return NULL;
2722 return pair;
2723}
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2746{
2747 u32 sstatus, spd, mask;
2748 int rc, bit;
2749
2750 if (!sata_scr_valid(link))
2751 return -EOPNOTSUPP;
2752
2753
2754
2755
2756 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2757 if (rc == 0 && ata_sstatus_online(sstatus))
2758 spd = (sstatus >> 4) & 0xf;
2759 else
2760 spd = link->sata_spd;
2761
2762 mask = link->sata_spd_limit;
2763 if (mask <= 1)
2764 return -EINVAL;
2765
2766
2767 bit = fls(mask) - 1;
2768 mask &= ~(1 << bit);
2769
2770
2771
2772
2773 if (spd > 1)
2774 mask &= (1 << (spd - 1)) - 1;
2775 else
2776 mask &= 1;
2777
2778
2779 if (!mask)
2780 return -EINVAL;
2781
2782 if (spd_limit) {
2783 if (mask & ((1 << spd_limit) - 1))
2784 mask &= (1 << spd_limit) - 1;
2785 else {
2786 bit = ffs(mask) - 1;
2787 mask = 1 << bit;
2788 }
2789 }
2790
2791 link->sata_spd_limit = mask;
2792
2793 ata_link_warn(link, "limiting SATA link speed to %s\n",
2794 sata_spd_string(fls(mask)));
2795
2796 return 0;
2797}
2798
2799static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2800{
2801 struct ata_link *host_link = &link->ap->link;
2802 u32 limit, target, spd;
2803
2804 limit = link->sata_spd_limit;
2805
2806
2807
2808
2809
2810 if (!ata_is_host_link(link) && host_link->sata_spd)
2811 limit &= (1 << host_link->sata_spd) - 1;
2812
2813 if (limit == UINT_MAX)
2814 target = 0;
2815 else
2816 target = fls(limit);
2817
2818 spd = (*scontrol >> 4) & 0xf;
2819 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2820
2821 return spd != target;
2822}
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839static int sata_set_spd_needed(struct ata_link *link)
2840{
2841 u32 scontrol;
2842
2843 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2844 return 1;
2845
2846 return __sata_set_spd_needed(link, &scontrol);
2847}
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862int sata_set_spd(struct ata_link *link)
2863{
2864 u32 scontrol;
2865 int rc;
2866
2867 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2868 return rc;
2869
2870 if (!__sata_set_spd_needed(link, &scontrol))
2871 return 0;
2872
2873 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2874 return rc;
2875
2876 return 1;
2877}
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891static const struct ata_timing ata_timing[] = {
2892
2893 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2894 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2895 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2896 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2897 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2898 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2899 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2900
2901 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2902 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2903 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2904
2905 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2906 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2907 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2908 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2909 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2910
2911
2912 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2913 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2914 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2915 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2916 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2917 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2918 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
2919
2920 { 0xFF }
2921};
2922
2923#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2924#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2925
2926static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2927{
2928 q->setup = EZ(t->setup * 1000, T);
2929 q->act8b = EZ(t->act8b * 1000, T);
2930 q->rec8b = EZ(t->rec8b * 1000, T);
2931 q->cyc8b = EZ(t->cyc8b * 1000, T);
2932 q->active = EZ(t->active * 1000, T);
2933 q->recover = EZ(t->recover * 1000, T);
2934 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2935 q->cycle = EZ(t->cycle * 1000, T);
2936 q->udma = EZ(t->udma * 1000, UT);
2937}
2938
2939void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2940 struct ata_timing *m, unsigned int what)
2941{
2942 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2943 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2944 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2945 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2946 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2947 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2948 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2949 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2950 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2951}
2952
2953const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2954{
2955 const struct ata_timing *t = ata_timing;
2956
2957 while (xfer_mode > t->mode)
2958 t++;
2959
2960 if (xfer_mode == t->mode)
2961 return t;
2962
2963 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
2964 __func__, xfer_mode);
2965
2966 return NULL;
2967}
2968
2969int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2970 struct ata_timing *t, int T, int UT)
2971{
2972 const u16 *id = adev->id;
2973 const struct ata_timing *s;
2974 struct ata_timing p;
2975
2976
2977
2978
2979
2980 if (!(s = ata_timing_find_mode(speed)))
2981 return -EINVAL;
2982
2983 memcpy(t, s, sizeof(*s));
2984
2985
2986
2987
2988
2989
2990 if (id[ATA_ID_FIELD_VALID] & 2) {
2991 memset(&p, 0, sizeof(p));
2992
2993 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
2994 if (speed <= XFER_PIO_2)
2995 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
2996 else if ((speed <= XFER_PIO_4) ||
2997 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
2998 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
2999 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3000 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3001
3002 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3003 }
3004
3005
3006
3007
3008
3009 ata_timing_quantize(t, t, T, UT);
3010
3011
3012
3013
3014
3015
3016
3017 if (speed > XFER_PIO_6) {
3018 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3019 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3020 }
3021
3022
3023
3024
3025
3026 if (t->act8b + t->rec8b < t->cyc8b) {
3027 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3028 t->rec8b = t->cyc8b - t->act8b;
3029 }
3030
3031 if (t->active + t->recover < t->cycle) {
3032 t->active += (t->cycle - (t->active + t->recover)) / 2;
3033 t->recover = t->cycle - t->active;
3034 }
3035
3036
3037
3038
3039 if (t->active + t->recover > t->cycle)
3040 t->cycle = t->active + t->recover;
3041
3042 return 0;
3043}
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3062{
3063 u8 base_mode = 0xff, last_mode = 0xff;
3064 const struct ata_xfer_ent *ent;
3065 const struct ata_timing *t;
3066
3067 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3068 if (ent->shift == xfer_shift)
3069 base_mode = ent->base;
3070
3071 for (t = ata_timing_find_mode(base_mode);
3072 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3073 unsigned short this_cycle;
3074
3075 switch (xfer_shift) {
3076 case ATA_SHIFT_PIO:
3077 case ATA_SHIFT_MWDMA:
3078 this_cycle = t->cycle;
3079 break;
3080 case ATA_SHIFT_UDMA:
3081 this_cycle = t->udma;
3082 break;
3083 default:
3084 return 0xff;
3085 }
3086
3087 if (cycle > this_cycle)
3088 break;
3089
3090 last_mode = t->mode;
3091 }
3092
3093 return last_mode;
3094}
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3112{
3113 char buf[32];
3114 unsigned long orig_mask, xfer_mask;
3115 unsigned long pio_mask, mwdma_mask, udma_mask;
3116 int quiet, highbit;
3117
3118 quiet = !!(sel & ATA_DNXFER_QUIET);
3119 sel &= ~ATA_DNXFER_QUIET;
3120
3121 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3122 dev->mwdma_mask,
3123 dev->udma_mask);
3124 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3125
3126 switch (sel) {
3127 case ATA_DNXFER_PIO:
3128 highbit = fls(pio_mask) - 1;
3129 pio_mask &= ~(1 << highbit);
3130 break;
3131
3132 case ATA_DNXFER_DMA:
3133 if (udma_mask) {
3134 highbit = fls(udma_mask) - 1;
3135 udma_mask &= ~(1 << highbit);
3136 if (!udma_mask)
3137 return -ENOENT;
3138 } else if (mwdma_mask) {
3139 highbit = fls(mwdma_mask) - 1;
3140 mwdma_mask &= ~(1 << highbit);
3141 if (!mwdma_mask)
3142 return -ENOENT;
3143 }
3144 break;
3145
3146 case ATA_DNXFER_40C:
3147 udma_mask &= ATA_UDMA_MASK_40C;
3148 break;
3149
3150 case ATA_DNXFER_FORCE_PIO0:
3151 pio_mask &= 1;
3152 case ATA_DNXFER_FORCE_PIO:
3153 mwdma_mask = 0;
3154 udma_mask = 0;
3155 break;
3156
3157 default:
3158 BUG();
3159 }
3160
3161 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3162
3163 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3164 return -ENOENT;
3165
3166 if (!quiet) {
3167 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3168 snprintf(buf, sizeof(buf), "%s:%s",
3169 ata_mode_string(xfer_mask),
3170 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3171 else
3172 snprintf(buf, sizeof(buf), "%s",
3173 ata_mode_string(xfer_mask));
3174
3175 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3176 }
3177
3178 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3179 &dev->udma_mask);
3180
3181 return 0;
3182}
3183
3184static int ata_dev_set_mode(struct ata_device *dev)
3185{
3186 struct ata_port *ap = dev->link->ap;
3187 struct ata_eh_context *ehc = &dev->link->eh_context;
3188 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3189 const char *dev_err_whine = "";
3190 int ign_dev_err = 0;
3191 unsigned int err_mask = 0;
3192 int rc;
3193
3194 dev->flags &= ~ATA_DFLAG_PIO;
3195 if (dev->xfer_shift == ATA_SHIFT_PIO)
3196 dev->flags |= ATA_DFLAG_PIO;
3197
3198 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3199 dev_err_whine = " (SET_XFERMODE skipped)";
3200 else {
3201 if (nosetxfer)
3202 ata_dev_warn(dev,
3203 "NOSETXFER but PATA detected - can't "
3204 "skip SETXFER, might malfunction\n");
3205 err_mask = ata_dev_set_xfermode(dev);
3206 }
3207
3208 if (err_mask & ~AC_ERR_DEV)
3209 goto fail;
3210
3211
3212 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3213 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3214 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3215 if (rc)
3216 return rc;
3217
3218 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3219
3220 if (ata_id_is_cfa(dev->id))
3221 ign_dev_err = 1;
3222
3223
3224 if (ata_id_major_version(dev->id) == 0 &&
3225 dev->pio_mode <= XFER_PIO_2)
3226 ign_dev_err = 1;
3227
3228
3229
3230 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3231 ign_dev_err = 1;
3232 }
3233
3234
3235 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3236 dev->dma_mode == XFER_MW_DMA_0 &&
3237 (dev->id[63] >> 8) & 1)
3238 ign_dev_err = 1;
3239
3240
3241 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3242 ign_dev_err = 1;
3243
3244 if (err_mask & AC_ERR_DEV) {
3245 if (!ign_dev_err)
3246 goto fail;
3247 else
3248 dev_err_whine = " (device error ignored)";
3249 }
3250
3251 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3252 dev->xfer_shift, (int)dev->xfer_mode);
3253
3254 ata_dev_info(dev, "configured for %s%s\n",
3255 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3256 dev_err_whine);
3257
3258 return 0;
3259
3260 fail:
3261 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3262 return -EIO;
3263}
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3283{
3284 struct ata_port *ap = link->ap;
3285 struct ata_device *dev;
3286 int rc = 0, used_dma = 0, found = 0;
3287
3288
3289 ata_for_each_dev(dev, link, ENABLED) {
3290 unsigned long pio_mask, dma_mask;
3291 unsigned int mode_mask;
3292
3293 mode_mask = ATA_DMA_MASK_ATA;
3294 if (dev->class == ATA_DEV_ATAPI)
3295 mode_mask = ATA_DMA_MASK_ATAPI;
3296 else if (ata_id_is_cfa(dev->id))
3297 mode_mask = ATA_DMA_MASK_CFA;
3298
3299 ata_dev_xfermask(dev);
3300 ata_force_xfermask(dev);
3301
3302 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3303
3304 if (libata_dma_mask & mode_mask)
3305 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3306 dev->udma_mask);
3307 else
3308 dma_mask = 0;
3309
3310 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3311 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3312
3313 found = 1;
3314 if (ata_dma_enabled(dev))
3315 used_dma = 1;
3316 }
3317 if (!found)
3318 goto out;
3319
3320
3321 ata_for_each_dev(dev, link, ENABLED) {
3322 if (dev->pio_mode == 0xff) {
3323 ata_dev_warn(dev, "no PIO support\n");
3324 rc = -EINVAL;
3325 goto out;
3326 }
3327
3328 dev->xfer_mode = dev->pio_mode;
3329 dev->xfer_shift = ATA_SHIFT_PIO;
3330 if (ap->ops->set_piomode)
3331 ap->ops->set_piomode(ap, dev);
3332 }
3333
3334
3335 ata_for_each_dev(dev, link, ENABLED) {
3336 if (!ata_dma_enabled(dev))
3337 continue;
3338
3339 dev->xfer_mode = dev->dma_mode;
3340 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3341 if (ap->ops->set_dmamode)
3342 ap->ops->set_dmamode(ap, dev);
3343 }
3344
3345
3346 ata_for_each_dev(dev, link, ENABLED) {
3347 rc = ata_dev_set_mode(dev);
3348 if (rc)
3349 goto out;
3350 }
3351
3352
3353
3354
3355 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3356 ap->host->simplex_claimed = ap;
3357
3358 out:
3359 if (rc)
3360 *r_failed_dev = dev;
3361 return rc;
3362}
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3385 int (*check_ready)(struct ata_link *link))
3386{
3387 unsigned long start = jiffies;
3388 unsigned long nodev_deadline;
3389 int warned = 0;
3390
3391
3392 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3393 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3394 else
3395 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3396
3397
3398
3399
3400
3401 WARN_ON(link == link->ap->slave_link);
3402
3403 if (time_after(nodev_deadline, deadline))
3404 nodev_deadline = deadline;
3405
3406 while (1) {
3407 unsigned long now = jiffies;
3408 int ready, tmp;
3409
3410 ready = tmp = check_ready(link);
3411 if (ready > 0)
3412 return 0;
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425 if (ready == -ENODEV) {
3426 if (ata_link_online(link))
3427 ready = 0;
3428 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3429 !ata_link_offline(link) &&
3430 time_before(now, nodev_deadline))
3431 ready = 0;
3432 }
3433
3434 if (ready)
3435 return ready;
3436 if (time_after(now, deadline))
3437 return -EBUSY;
3438
3439 if (!warned && time_after(now, start + 5 * HZ) &&
3440 (deadline - now > 3 * HZ)) {
3441 ata_link_warn(link,
3442 "link is slow to respond, please be patient "
3443 "(ready=%d)\n", tmp);
3444 warned = 1;
3445 }
3446
3447 ata_msleep(link->ap, 50);
3448 }
3449}
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3466 int (*check_ready)(struct ata_link *link))
3467{
3468 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3469
3470 return ata_wait_ready(link, deadline, check_ready);
3471}
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3496 unsigned long deadline)
3497{
3498 unsigned long interval = params[0];
3499 unsigned long duration = params[1];
3500 unsigned long last_jiffies, t;
3501 u32 last, cur;
3502 int rc;
3503
3504 t = ata_deadline(jiffies, params[2]);
3505 if (time_before(t, deadline))
3506 deadline = t;
3507
3508 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3509 return rc;
3510 cur &= 0xf;
3511
3512 last = cur;
3513 last_jiffies = jiffies;
3514
3515 while (1) {
3516 ata_msleep(link->ap, interval);
3517 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3518 return rc;
3519 cur &= 0xf;
3520
3521
3522 if (cur == last) {
3523 if (cur == 1 && time_before(jiffies, deadline))
3524 continue;
3525 if (time_after(jiffies,
3526 ata_deadline(last_jiffies, duration)))
3527 return 0;
3528 continue;
3529 }
3530
3531
3532 last = cur;
3533 last_jiffies = jiffies;
3534
3535
3536
3537
3538 if (time_after(jiffies, deadline))
3539 return -EPIPE;
3540 }
3541}
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557int sata_link_resume(struct ata_link *link, const unsigned long *params,
3558 unsigned long deadline)
3559{
3560 int tries = ATA_LINK_RESUME_TRIES;
3561 u32 scontrol, serror;
3562 int rc;
3563
3564 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3565 return rc;
3566
3567
3568
3569
3570
3571
3572 do {
3573 scontrol = (scontrol & 0x0f0) | 0x300;
3574 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3575 return rc;
3576
3577
3578
3579
3580
3581 ata_msleep(link->ap, 200);
3582
3583
3584 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3585 return rc;
3586 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3587
3588 if ((scontrol & 0xf0f) != 0x300) {
3589 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3590 scontrol);
3591 return 0;
3592 }
3593
3594 if (tries < ATA_LINK_RESUME_TRIES)
3595 ata_link_warn(link, "link resume succeeded after %d retries\n",
3596 ATA_LINK_RESUME_TRIES - tries);
3597
3598 if ((rc = sata_link_debounce(link, params, deadline)))
3599 return rc;
3600
3601
3602 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3603 rc = sata_scr_write(link, SCR_ERROR, serror);
3604
3605 return rc != -EINVAL ? rc : 0;
3606}
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3627 bool spm_wakeup)
3628{
3629 struct ata_eh_context *ehc = &link->eh_context;
3630 bool woken_up = false;
3631 u32 scontrol;
3632 int rc;
3633
3634 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3635 if (rc)
3636 return rc;
3637
3638 switch (policy) {
3639 case ATA_LPM_MAX_POWER:
3640
3641 scontrol |= (0x7 << 8);
3642
3643 if (spm_wakeup) {
3644 scontrol |= (0x4 << 12);
3645 woken_up = true;
3646 }
3647 break;
3648 case ATA_LPM_MED_POWER:
3649
3650 scontrol &= ~(0x1 << 8);
3651 scontrol |= (0x6 << 8);
3652 break;
3653 case ATA_LPM_MIN_POWER:
3654 if (ata_link_nr_enabled(link) > 0)
3655
3656 scontrol &= ~(0x7 << 8);
3657 else {
3658
3659 scontrol &= ~0xf;
3660 scontrol |= (0x1 << 2);
3661 }
3662 break;
3663 default:
3664 WARN_ON(1);
3665 }
3666
3667 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3668 if (rc)
3669 return rc;
3670
3671
3672 if (woken_up)
3673 msleep(10);
3674
3675
3676 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3677 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3678}
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3698{
3699 struct ata_port *ap = link->ap;
3700 struct ata_eh_context *ehc = &link->eh_context;
3701 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3702 int rc;
3703
3704
3705 if (ehc->i.action & ATA_EH_HARDRESET)
3706 return 0;
3707
3708
3709 if (ap->flags & ATA_FLAG_SATA) {
3710 rc = sata_link_resume(link, timing, deadline);
3711
3712 if (rc && rc != -EOPNOTSUPP)
3713 ata_link_warn(link,
3714 "failed to resume link for reset (errno=%d)\n",
3715 rc);
3716 }
3717
3718
3719 if (ata_phys_link_offline(link))
3720 ehc->i.action &= ~ATA_EH_SOFTRESET;
3721
3722 return 0;
3723}
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3750 unsigned long deadline,
3751 bool *online, int (*check_ready)(struct ata_link *))
3752{
3753 u32 scontrol;
3754 int rc;
3755
3756 DPRINTK("ENTER\n");
3757
3758 if (online)
3759 *online = false;
3760
3761 if (sata_set_spd_needed(link)) {
3762
3763
3764
3765
3766
3767 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3768 goto out;
3769
3770 scontrol = (scontrol & 0x0f0) | 0x304;
3771
3772 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3773 goto out;
3774
3775 sata_set_spd(link);
3776 }
3777
3778
3779 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3780 goto out;
3781
3782 scontrol = (scontrol & 0x0f0) | 0x301;
3783
3784 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3785 goto out;
3786
3787
3788
3789
3790 ata_msleep(link->ap, 1);
3791
3792
3793 rc = sata_link_resume(link, timing, deadline);
3794 if (rc)
3795 goto out;
3796
3797 if (ata_phys_link_offline(link))
3798 goto out;
3799
3800
3801 if (online)
3802 *online = true;
3803
3804 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3805
3806
3807
3808
3809
3810 if (check_ready) {
3811 unsigned long pmp_deadline;
3812
3813 pmp_deadline = ata_deadline(jiffies,
3814 ATA_TMOUT_PMP_SRST_WAIT);
3815 if (time_after(pmp_deadline, deadline))
3816 pmp_deadline = deadline;
3817 ata_wait_ready(link, pmp_deadline, check_ready);
3818 }
3819 rc = -EAGAIN;
3820 goto out;
3821 }
3822
3823 rc = 0;
3824 if (check_ready)
3825 rc = ata_wait_ready(link, deadline, check_ready);
3826 out:
3827 if (rc && rc != -EAGAIN) {
3828
3829 if (online)
3830 *online = false;
3831 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3832 }
3833 DPRINTK("EXIT, rc=%d\n", rc);
3834 return rc;
3835}
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3852 unsigned long deadline)
3853{
3854 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3855 bool online;
3856 int rc;
3857
3858
3859 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3860 return online ? -EAGAIN : rc;
3861}
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3876{
3877 u32 serror;
3878
3879 DPRINTK("ENTER\n");
3880
3881
3882 if (!sata_scr_read(link, SCR_ERROR, &serror))
3883 sata_scr_write(link, SCR_ERROR, serror);
3884
3885
3886 sata_print_link_status(link);
3887
3888 DPRINTK("EXIT\n");
3889}
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3908 const u16 *new_id)
3909{
3910 const u16 *old_id = dev->id;
3911 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3912 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3913
3914 if (dev->class != new_class) {
3915 ata_dev_info(dev, "class mismatch %d != %d\n",
3916 dev->class, new_class);
3917 return 0;
3918 }
3919
3920 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3921 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3922 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3923 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3924
3925 if (strcmp(model[0], model[1])) {
3926 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3927 model[0], model[1]);
3928 return 0;
3929 }
3930
3931 if (strcmp(serial[0], serial[1])) {
3932 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3933 serial[0], serial[1]);
3934 return 0;
3935 }
3936
3937 return 1;
3938}
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3955{
3956 unsigned int class = dev->class;
3957 u16 *id = (void *)dev->link->ap->sector_buf;
3958 int rc;
3959
3960
3961 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3962 if (rc)
3963 return rc;
3964
3965
3966 if (!ata_dev_same_device(dev, class, id))
3967 return -ENODEV;
3968
3969 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3970 return 0;
3971}
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3989 unsigned int readid_flags)
3990{
3991 u64 n_sectors = dev->n_sectors;
3992 u64 n_native_sectors = dev->n_native_sectors;
3993 int rc;
3994
3995 if (!ata_dev_enabled(dev))
3996 return -ENODEV;
3997
3998
3999 if (ata_class_enabled(new_class) &&
4000 new_class != ATA_DEV_ATA &&
4001 new_class != ATA_DEV_ATAPI &&
4002 new_class != ATA_DEV_SEMB) {
4003 ata_dev_info(dev, "class mismatch %u != %u\n",
4004 dev->class, new_class);
4005 rc = -ENODEV;
4006 goto fail;
4007 }
4008
4009
4010 rc = ata_dev_reread_id(dev, readid_flags);
4011 if (rc)
4012 goto fail;
4013
4014
4015 rc = ata_dev_configure(dev);
4016 if (rc)
4017 goto fail;
4018
4019
4020 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4021 dev->n_sectors == n_sectors)
4022 return 0;
4023
4024
4025 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4026 (unsigned long long)n_sectors,
4027 (unsigned long long)dev->n_sectors);
4028
4029
4030
4031
4032
4033
4034 if (dev->n_native_sectors == n_native_sectors &&
4035 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4036 ata_dev_warn(dev,
4037 "new n_sectors matches native, probably "
4038 "late HPA unlock, n_sectors updated\n");
4039
4040 return 0;
4041 }
4042
4043
4044
4045
4046
4047
4048
4049 if (dev->n_native_sectors == n_native_sectors &&
4050 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4051 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4052 ata_dev_warn(dev,
4053 "old n_sectors matches native, probably "
4054 "late HPA lock, will try to unlock HPA\n");
4055
4056 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4057 rc = -EIO;
4058 } else
4059 rc = -ENODEV;
4060
4061
4062 dev->n_native_sectors = n_native_sectors;
4063 dev->n_sectors = n_sectors;
4064 fail:
4065 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4066 return rc;
4067}
4068
4069struct ata_blacklist_entry {
4070 const char *model_num;
4071 const char *model_rev;
4072 unsigned long horkage;
4073};
4074
4075static const struct ata_blacklist_entry ata_device_blacklist [] = {
4076
4077 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4078 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4079 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4080 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4081 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4082 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4083 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4084 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4085 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4086 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4087 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4088 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4089 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4090 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4091 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4092 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4093 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4094 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4095 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4096 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4097 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4098 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4099 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4100 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4101 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4102 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4103 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4104 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4105 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4106
4107 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4108
4109
4110 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4111 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4112 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4113
4114
4115
4116
4117
4118 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4119 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4120
4121 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4122
4123 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4124 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4125 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4126 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4127 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4128
4129
4130 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4131 ATA_HORKAGE_FIRMWARE_WARN },
4132
4133 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4134 ATA_HORKAGE_FIRMWARE_WARN },
4135
4136 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4137 ATA_HORKAGE_FIRMWARE_WARN },
4138
4139 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4140 ATA_HORKAGE_FIRMWARE_WARN },
4141
4142
4143
4144 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4145 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4146 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4147
4148
4149 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4150
4151
4152 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4153 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4154 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4155 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4156
4157
4158 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4159
4160
4161 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4162 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4163 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4164
4165
4166 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4167
4168 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4169
4170
4171 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4172 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4173
4174
4175 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4176 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4177
4178
4179
4180
4181
4182 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4183 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4184 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4185 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4186 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4187
4188
4189 { }
4190};
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219static int glob_match (const char *text, const char *pattern)
4220{
4221 do {
4222
4223 if (*text == *pattern || *pattern == '?') {
4224 if (!*pattern++)
4225 return 0;
4226 } else {
4227
4228 if (!*text || *pattern != '[')
4229 break;
4230 while (*++pattern && *pattern != ']' && *text != *pattern) {
4231 if (*pattern == '-' && *(pattern - 1) != '[')
4232 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4233 ++pattern;
4234 break;
4235 }
4236 }
4237 if (!*pattern || *pattern == ']')
4238 return 1;
4239 while (*pattern && *pattern++ != ']');
4240 }
4241 } while (*++text && *pattern);
4242
4243
4244 if (*pattern == '*') {
4245 if (!*++pattern)
4246 return 0;
4247
4248 while (*text) {
4249 if (glob_match(text, pattern) == 0)
4250 return 0;
4251 ++text;
4252 }
4253 }
4254 if (!*text && !*pattern)
4255 return 0;
4256 return 1;
4257}
4258
4259static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4260{
4261 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4262 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4263 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4264
4265 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4266 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4267
4268 while (ad->model_num) {
4269 if (!glob_match(model_num, ad->model_num)) {
4270 if (ad->model_rev == NULL)
4271 return ad->horkage;
4272 if (!glob_match(model_rev, ad->model_rev))
4273 return ad->horkage;
4274 }
4275 ad++;
4276 }
4277 return 0;
4278}
4279
4280static int ata_dma_blacklisted(const struct ata_device *dev)
4281{
4282
4283
4284
4285
4286 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4287 (dev->flags & ATA_DFLAG_CDB_INTR))
4288 return 1;
4289 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4290}
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300static int ata_is_40wire(struct ata_device *dev)
4301{
4302 if (dev->horkage & ATA_HORKAGE_IVB)
4303 return ata_drive_40wire_relaxed(dev->id);
4304 return ata_drive_40wire(dev->id);
4305}
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320static int cable_is_40wire(struct ata_port *ap)
4321{
4322 struct ata_link *link;
4323 struct ata_device *dev;
4324
4325
4326 if (ap->cbl == ATA_CBL_PATA40)
4327 return 1;
4328
4329
4330 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4331 return 0;
4332
4333
4334
4335
4336
4337 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4338 return 0;
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349 ata_for_each_link(link, ap, EDGE) {
4350 ata_for_each_dev(dev, link, ENABLED) {
4351 if (!ata_is_40wire(dev))
4352 return 0;
4353 }
4354 }
4355 return 1;
4356}
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370static void ata_dev_xfermask(struct ata_device *dev)
4371{
4372 struct ata_link *link = dev->link;
4373 struct ata_port *ap = link->ap;
4374 struct ata_host *host = ap->host;
4375 unsigned long xfer_mask;
4376
4377
4378 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4379 ap->mwdma_mask, ap->udma_mask);
4380
4381
4382 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4383 dev->mwdma_mask, dev->udma_mask);
4384 xfer_mask &= ata_id_xfermask(dev->id);
4385
4386
4387
4388
4389
4390 if (ata_dev_pair(dev)) {
4391
4392 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4393
4394 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4395 }
4396
4397 if (ata_dma_blacklisted(dev)) {
4398 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4399 ata_dev_warn(dev,
4400 "device is on DMA blacklist, disabling DMA\n");
4401 }
4402
4403 if ((host->flags & ATA_HOST_SIMPLEX) &&
4404 host->simplex_claimed && host->simplex_claimed != ap) {
4405 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4406 ata_dev_warn(dev,
4407 "simplex DMA is claimed by other device, disabling DMA\n");
4408 }
4409
4410 if (ap->flags & ATA_FLAG_NO_IORDY)
4411 xfer_mask &= ata_pio_mask_no_iordy(dev);
4412
4413 if (ap->ops->mode_filter)
4414 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4425
4426 if (cable_is_40wire(ap)) {
4427 ata_dev_warn(dev,
4428 "limited to UDMA/33 due to 40-wire cable\n");
4429 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4430 }
4431
4432 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4433 &dev->mwdma_mask, &dev->udma_mask);
4434}
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4451{
4452 struct ata_taskfile tf;
4453 unsigned int err_mask;
4454
4455
4456 DPRINTK("set features - xfer mode\n");
4457
4458
4459
4460
4461 ata_tf_init(dev, &tf);
4462 tf.command = ATA_CMD_SET_FEATURES;
4463 tf.feature = SETFEATURES_XFER;
4464 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4465 tf.protocol = ATA_PROT_NODATA;
4466
4467 if (ata_pio_need_iordy(dev))
4468 tf.nsect = dev->xfer_mode;
4469
4470 else if (ata_id_has_iordy(dev->id))
4471 tf.nsect = 0x01;
4472 else
4473 return 0;
4474
4475 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4476
4477 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4478 return err_mask;
4479}
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4497{
4498 struct ata_taskfile tf;
4499 unsigned int err_mask;
4500
4501
4502 DPRINTK("set features - SATA features\n");
4503
4504 ata_tf_init(dev, &tf);
4505 tf.command = ATA_CMD_SET_FEATURES;
4506 tf.feature = enable;
4507 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4508 tf.protocol = ATA_PROT_NODATA;
4509 tf.nsect = feature;
4510
4511 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4512
4513 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4514 return err_mask;
4515}
4516EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530static unsigned int ata_dev_init_params(struct ata_device *dev,
4531 u16 heads, u16 sectors)
4532{
4533 struct ata_taskfile tf;
4534 unsigned int err_mask;
4535
4536
4537 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4538 return AC_ERR_INVALID;
4539
4540
4541 DPRINTK("init dev params \n");
4542
4543 ata_tf_init(dev, &tf);
4544 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4545 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4546 tf.protocol = ATA_PROT_NODATA;
4547 tf.nsect = sectors;
4548 tf.device |= (heads - 1) & 0x0f;
4549
4550 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4551
4552
4553
4554 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4555 err_mask = 0;
4556
4557 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4558 return err_mask;
4559}
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570void ata_sg_clean(struct ata_queued_cmd *qc)
4571{
4572 struct ata_port *ap = qc->ap;
4573 struct scatterlist *sg = qc->sg;
4574 int dir = qc->dma_dir;
4575
4576 WARN_ON_ONCE(sg == NULL);
4577
4578 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4579
4580 if (qc->n_elem)
4581 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4582
4583 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4584 qc->sg = NULL;
4585}
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601int atapi_check_dma(struct ata_queued_cmd *qc)
4602{
4603 struct ata_port *ap = qc->ap;
4604
4605
4606
4607
4608 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4609 unlikely(qc->nbytes & 15))
4610 return 1;
4611
4612 if (ap->ops->check_atapi_dma)
4613 return ap->ops->check_atapi_dma(qc);
4614
4615 return 0;
4616}
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633int ata_std_qc_defer(struct ata_queued_cmd *qc)
4634{
4635 struct ata_link *link = qc->dev->link;
4636
4637 if (qc->tf.protocol == ATA_PROT_NCQ) {
4638 if (!ata_tag_valid(link->active_tag))
4639 return 0;
4640 } else {
4641 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4642 return 0;
4643 }
4644
4645 return ATA_DEFER_LINK;
4646}
4647
4648void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4664 unsigned int n_elem)
4665{
4666 qc->sg = sg;
4667 qc->n_elem = n_elem;
4668 qc->cursg = qc->sg;
4669}
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684static int ata_sg_setup(struct ata_queued_cmd *qc)
4685{
4686 struct ata_port *ap = qc->ap;
4687 unsigned int n_elem;
4688
4689 VPRINTK("ENTER, ata%u\n", ap->print_id);
4690
4691 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4692 if (n_elem < 1)
4693 return -1;
4694
4695 DPRINTK("%d sg elements mapped\n", n_elem);
4696 qc->orig_n_elem = qc->n_elem;
4697 qc->n_elem = n_elem;
4698 qc->flags |= ATA_QCFLAG_DMAMAP;
4699
4700 return 0;
4701}
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715void swap_buf_le16(u16 *buf, unsigned int buf_words)
4716{
4717#ifdef __BIG_ENDIAN
4718 unsigned int i;
4719
4720 for (i = 0; i < buf_words; i++)
4721 buf[i] = le16_to_cpu(buf[i]);
4722#endif
4723}
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4734{
4735 struct ata_queued_cmd *qc = NULL;
4736 unsigned int i;
4737
4738
4739 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4740 return NULL;
4741
4742
4743 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4744 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4745 qc = __ata_qc_from_tag(ap, i);
4746 break;
4747 }
4748
4749 if (qc)
4750 qc->tag = i;
4751
4752 return qc;
4753}
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4764{
4765 struct ata_port *ap = dev->link->ap;
4766 struct ata_queued_cmd *qc;
4767
4768 qc = ata_qc_new(ap);
4769 if (qc) {
4770 qc->scsicmd = NULL;
4771 qc->ap = ap;
4772 qc->dev = dev;
4773
4774 ata_qc_reinit(qc);
4775 }
4776
4777 return qc;
4778}
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790void ata_qc_free(struct ata_queued_cmd *qc)
4791{
4792 struct ata_port *ap;
4793 unsigned int tag;
4794
4795 WARN_ON_ONCE(qc == NULL);
4796 ap = qc->ap;
4797
4798 qc->flags = 0;
4799 tag = qc->tag;
4800 if (likely(ata_tag_valid(tag))) {
4801 qc->tag = ATA_TAG_POISON;
4802 clear_bit(tag, &ap->qc_allocated);
4803 }
4804}
4805
4806void __ata_qc_complete(struct ata_queued_cmd *qc)
4807{
4808 struct ata_port *ap;
4809 struct ata_link *link;
4810
4811 WARN_ON_ONCE(qc == NULL);
4812 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4813 ap = qc->ap;
4814 link = qc->dev->link;
4815
4816 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4817 ata_sg_clean(qc);
4818
4819
4820 if (qc->tf.protocol == ATA_PROT_NCQ) {
4821 link->sactive &= ~(1 << qc->tag);
4822 if (!link->sactive)
4823 ap->nr_active_links--;
4824 } else {
4825 link->active_tag = ATA_TAG_POISON;
4826 ap->nr_active_links--;
4827 }
4828
4829
4830 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4831 ap->excl_link == link))
4832 ap->excl_link = NULL;
4833
4834
4835
4836
4837
4838 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4839 ap->qc_active &= ~(1 << qc->tag);
4840
4841
4842 qc->complete_fn(qc);
4843}
4844
4845static void fill_result_tf(struct ata_queued_cmd *qc)
4846{
4847 struct ata_port *ap = qc->ap;
4848
4849 qc->result_tf.flags = qc->tf.flags;
4850 ap->ops->qc_fill_rtf(qc);
4851}
4852
4853static void ata_verify_xfer(struct ata_queued_cmd *qc)
4854{
4855 struct ata_device *dev = qc->dev;
4856
4857 if (ata_is_nodata(qc->tf.protocol))
4858 return;
4859
4860 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4861 return;
4862
4863 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4864}
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881void ata_qc_complete(struct ata_queued_cmd *qc)
4882{
4883 struct ata_port *ap = qc->ap;
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898 if (ap->ops->error_handler) {
4899 struct ata_device *dev = qc->dev;
4900 struct ata_eh_info *ehi = &dev->link->eh_info;
4901
4902 if (unlikely(qc->err_mask))
4903 qc->flags |= ATA_QCFLAG_FAILED;
4904
4905
4906
4907
4908
4909 if (unlikely(ata_tag_internal(qc->tag))) {
4910 fill_result_tf(qc);
4911 __ata_qc_complete(qc);
4912 return;
4913 }
4914
4915
4916
4917
4918
4919 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4920 fill_result_tf(qc);
4921 ata_qc_schedule_eh(qc);
4922 return;
4923 }
4924
4925 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4926
4927
4928 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4929 fill_result_tf(qc);
4930
4931
4932
4933
4934 switch (qc->tf.command) {
4935 case ATA_CMD_SET_FEATURES:
4936 if (qc->tf.feature != SETFEATURES_WC_ON &&
4937 qc->tf.feature != SETFEATURES_WC_OFF)
4938 break;
4939
4940 case ATA_CMD_INIT_DEV_PARAMS:
4941 case ATA_CMD_SET_MULTI:
4942
4943 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4944 ata_port_schedule_eh(ap);
4945 break;
4946
4947 case ATA_CMD_SLEEP:
4948 dev->flags |= ATA_DFLAG_SLEEPING;
4949 break;
4950 }
4951
4952 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4953 ata_verify_xfer(qc);
4954
4955 __ata_qc_complete(qc);
4956 } else {
4957 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4958 return;
4959
4960
4961 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4962 fill_result_tf(qc);
4963
4964 __ata_qc_complete(qc);
4965 }
4966}
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4989{
4990 int nr_done = 0;
4991 u32 done_mask;
4992
4993 done_mask = ap->qc_active ^ qc_active;
4994
4995 if (unlikely(done_mask & qc_active)) {
4996 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
4997 ap->qc_active, qc_active);
4998 return -EINVAL;
4999 }
5000
5001 while (done_mask) {
5002 struct ata_queued_cmd *qc;
5003 unsigned int tag = __ffs(done_mask);
5004
5005 qc = ata_qc_from_tag(ap, tag);
5006 if (qc) {
5007 ata_qc_complete(qc);
5008 nr_done++;
5009 }
5010 done_mask &= ~(1 << tag);
5011 }
5012
5013 return nr_done;
5014}
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028void ata_qc_issue(struct ata_queued_cmd *qc)
5029{
5030 struct ata_port *ap = qc->ap;
5031 struct ata_link *link = qc->dev->link;
5032 u8 prot = qc->tf.protocol;
5033
5034
5035
5036
5037
5038 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5039
5040 if (ata_is_ncq(prot)) {
5041 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5042
5043 if (!link->sactive)
5044 ap->nr_active_links++;
5045 link->sactive |= 1 << qc->tag;
5046 } else {
5047 WARN_ON_ONCE(link->sactive);
5048
5049 ap->nr_active_links++;
5050 link->active_tag = qc->tag;
5051 }
5052
5053 qc->flags |= ATA_QCFLAG_ACTIVE;
5054 ap->qc_active |= 1 << qc->tag;
5055
5056
5057
5058
5059
5060 if (WARN_ON_ONCE(ata_is_data(prot) &&
5061 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5062 goto sys_err;
5063
5064 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5065 (ap->flags & ATA_FLAG_PIO_DMA)))
5066 if (ata_sg_setup(qc))
5067 goto sys_err;
5068
5069
5070 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5071 link->eh_info.action |= ATA_EH_RESET;
5072 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5073 ata_link_abort(link);
5074 return;
5075 }
5076
5077 ap->ops->qc_prep(qc);
5078
5079 qc->err_mask |= ap->ops->qc_issue(qc);
5080 if (unlikely(qc->err_mask))
5081 goto err;
5082 return;
5083
5084sys_err:
5085 qc->err_mask |= AC_ERR_SYSTEM;
5086err:
5087 ata_qc_complete(qc);
5088}
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102int sata_scr_valid(struct ata_link *link)
5103{
5104 struct ata_port *ap = link->ap;
5105
5106 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5107}
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5126{
5127 if (ata_is_host_link(link)) {
5128 if (sata_scr_valid(link))
5129 return link->ap->ops->scr_read(link, reg, val);
5130 return -EOPNOTSUPP;
5131 }
5132
5133 return sata_pmp_scr_read(link, reg, val);
5134}
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152int sata_scr_write(struct ata_link *link, int reg, u32 val)
5153{
5154 if (ata_is_host_link(link)) {
5155 if (sata_scr_valid(link))
5156 return link->ap->ops->scr_write(link, reg, val);
5157 return -EOPNOTSUPP;
5158 }
5159
5160 return sata_pmp_scr_write(link, reg, val);
5161}
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5179{
5180 if (ata_is_host_link(link)) {
5181 int rc;
5182
5183 if (sata_scr_valid(link)) {
5184 rc = link->ap->ops->scr_write(link, reg, val);
5185 if (rc == 0)
5186 rc = link->ap->ops->scr_read(link, reg, &val);
5187 return rc;
5188 }
5189 return -EOPNOTSUPP;
5190 }
5191
5192 return sata_pmp_scr_write(link, reg, val);
5193}
5194
5195
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209bool ata_phys_link_online(struct ata_link *link)
5210{
5211 u32 sstatus;
5212
5213 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5214 ata_sstatus_online(sstatus))
5215 return true;
5216 return false;
5217}
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233bool ata_phys_link_offline(struct ata_link *link)
5234{
5235 u32 sstatus;
5236
5237 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5238 !ata_sstatus_online(sstatus))
5239 return true;
5240 return false;
5241}
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259bool ata_link_online(struct ata_link *link)
5260{
5261 struct ata_link *slave = link->ap->slave_link;
5262
5263 WARN_ON(link == slave);
5264
5265 return ata_phys_link_online(link) ||
5266 (slave && ata_phys_link_online(slave));
5267}
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285bool ata_link_offline(struct ata_link *link)
5286{
5287 struct ata_link *slave = link->ap->slave_link;
5288
5289 WARN_ON(link == slave);
5290
5291 return ata_phys_link_offline(link) &&
5292 (!slave || ata_phys_link_offline(slave));
5293}
5294
5295#ifdef CONFIG_PM
5296static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5297 unsigned int action, unsigned int ehi_flags,
5298 int *async)
5299{
5300 struct ata_link *link;
5301 unsigned long flags;
5302 int rc = 0;
5303
5304
5305
5306
5307 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5308 if (async) {
5309 *async = -EAGAIN;
5310 return 0;
5311 }
5312 ata_port_wait_eh(ap);
5313 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5314 }
5315
5316
5317 spin_lock_irqsave(ap->lock, flags);
5318
5319 ap->pm_mesg = mesg;
5320 if (async)
5321 ap->pm_result = async;
5322 else
5323 ap->pm_result = &rc;
5324
5325 ap->pflags |= ATA_PFLAG_PM_PENDING;
5326 ata_for_each_link(link, ap, HOST_FIRST) {
5327 link->eh_info.action |= action;
5328 link->eh_info.flags |= ehi_flags;
5329 }
5330
5331 ata_port_schedule_eh(ap);
5332
5333 spin_unlock_irqrestore(ap->lock, flags);
5334
5335
5336 if (!async) {
5337 ata_port_wait_eh(ap);
5338 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5339 }
5340
5341 return rc;
5342}
5343
5344static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
5345{
5346
5347
5348
5349
5350
5351
5352
5353
5354 unsigned int ehi_flags = ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
5355 ATA_EHI_NO_RECOVERY;
5356 return ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
5357}
5358
5359static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
5360{
5361 struct ata_port *ap = to_ata_port(dev);
5362
5363 return __ata_port_suspend_common(ap, mesg, NULL);
5364}
5365
5366static int ata_port_suspend(struct device *dev)
5367{
5368 if (pm_runtime_suspended(dev))
5369 return 0;
5370
5371 return ata_port_suspend_common(dev, PMSG_SUSPEND);
5372}
5373
5374static int ata_port_do_freeze(struct device *dev)
5375{
5376 if (pm_runtime_suspended(dev))
5377 return 0;
5378
5379 return ata_port_suspend_common(dev, PMSG_FREEZE);
5380}
5381
5382static int ata_port_poweroff(struct device *dev)
5383{
5384 return ata_port_suspend_common(dev, PMSG_HIBERNATE);
5385}
5386
5387static int __ata_port_resume_common(struct ata_port *ap, pm_message_t mesg,
5388 int *async)
5389{
5390 int rc;
5391
5392 rc = ata_port_request_pm(ap, mesg, ATA_EH_RESET,
5393 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
5394 return rc;
5395}
5396
5397static int ata_port_resume_common(struct device *dev, pm_message_t mesg)
5398{
5399 struct ata_port *ap = to_ata_port(dev);
5400
5401 return __ata_port_resume_common(ap, mesg, NULL);
5402}
5403
5404static int ata_port_resume(struct device *dev)
5405{
5406 int rc;
5407
5408 rc = ata_port_resume_common(dev, PMSG_RESUME);
5409 if (!rc) {
5410 pm_runtime_disable(dev);
5411 pm_runtime_set_active(dev);
5412 pm_runtime_enable(dev);
5413 }
5414
5415 return rc;
5416}
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426static int ata_port_runtime_idle(struct device *dev)
5427{
5428 struct ata_port *ap = to_ata_port(dev);
5429 struct ata_link *link;
5430 struct ata_device *adev;
5431
5432 ata_for_each_link(link, ap, HOST_FIRST) {
5433 ata_for_each_dev(adev, link, ENABLED)
5434 if (adev->class == ATA_DEV_ATAPI &&
5435 !zpodd_dev_enabled(adev))
5436 return -EBUSY;
5437 }
5438
5439 return 0;
5440}
5441
5442static int ata_port_runtime_suspend(struct device *dev)
5443{
5444 return ata_port_suspend_common(dev, PMSG_AUTO_SUSPEND);
5445}
5446
5447static int ata_port_runtime_resume(struct device *dev)
5448{
5449 return ata_port_resume_common(dev, PMSG_AUTO_RESUME);
5450}
5451
5452static const struct dev_pm_ops ata_port_pm_ops = {
5453 .suspend = ata_port_suspend,
5454 .resume = ata_port_resume,
5455 .freeze = ata_port_do_freeze,
5456 .thaw = ata_port_resume,
5457 .poweroff = ata_port_poweroff,
5458 .restore = ata_port_resume,
5459
5460 .runtime_suspend = ata_port_runtime_suspend,
5461 .runtime_resume = ata_port_runtime_resume,
5462 .runtime_idle = ata_port_runtime_idle,
5463};
5464
5465
5466
5467
5468
5469
5470int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
5471{
5472 return __ata_port_suspend_common(ap, PMSG_SUSPEND, async);
5473}
5474EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
5475
5476int ata_sas_port_async_resume(struct ata_port *ap, int *async)
5477{
5478 return __ata_port_resume_common(ap, PMSG_RESUME, async);
5479}
5480EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5491{
5492 host->dev->power.power_state = mesg;
5493 return 0;
5494}
5495
5496
5497
5498
5499
5500
5501
5502void ata_host_resume(struct ata_host *host)
5503{
5504 host->dev->power.power_state = PMSG_ON;
5505}
5506#endif
5507
5508struct device_type ata_port_type = {
5509 .name = "ata_port",
5510#ifdef CONFIG_PM
5511 .pm = &ata_port_pm_ops,
5512#endif
5513};
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524void ata_dev_init(struct ata_device *dev)
5525{
5526 struct ata_link *link = ata_dev_phys_link(dev);
5527 struct ata_port *ap = link->ap;
5528 unsigned long flags;
5529
5530
5531 link->sata_spd_limit = link->hw_sata_spd_limit;
5532 link->sata_spd = 0;
5533
5534
5535
5536
5537
5538 spin_lock_irqsave(ap->lock, flags);
5539 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5540 dev->horkage = 0;
5541 spin_unlock_irqrestore(ap->lock, flags);
5542
5543 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5544 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5545 dev->pio_mask = UINT_MAX;
5546 dev->mwdma_mask = UINT_MAX;
5547 dev->udma_mask = UINT_MAX;
5548}
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5562{
5563 int i;
5564
5565
5566 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5567 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5568
5569 link->ap = ap;
5570 link->pmp = pmp;
5571 link->active_tag = ATA_TAG_POISON;
5572 link->hw_sata_spd_limit = UINT_MAX;
5573
5574
5575 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5576 struct ata_device *dev = &link->device[i];
5577
5578 dev->link = link;
5579 dev->devno = dev - link->device;
5580#ifdef CONFIG_ATA_ACPI
5581 dev->gtf_filter = ata_acpi_gtf_filter;
5582#endif
5583 ata_dev_init(dev);
5584 }
5585}
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599
5600int sata_link_init_spd(struct ata_link *link)
5601{
5602 u8 spd;
5603 int rc;
5604
5605 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5606 if (rc)
5607 return rc;
5608
5609 spd = (link->saved_scontrol >> 4) & 0xf;
5610 if (spd)
5611 link->hw_sata_spd_limit &= (1 << spd) - 1;
5612
5613 ata_force_link_limits(link);
5614
5615 link->sata_spd_limit = link->hw_sata_spd_limit;
5616
5617 return 0;
5618}
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629
5630
5631
5632struct ata_port *ata_port_alloc(struct ata_host *host)
5633{
5634 struct ata_port *ap;
5635
5636 DPRINTK("ENTER\n");
5637
5638 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5639 if (!ap)
5640 return NULL;
5641
5642 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5643 ap->lock = &host->lock;
5644 ap->print_id = -1;
5645 ap->local_port_no = -1;
5646 ap->host = host;
5647 ap->dev = host->dev;
5648
5649#if defined(ATA_VERBOSE_DEBUG)
5650
5651 ap->msg_enable = 0x00FF;
5652#elif defined(ATA_DEBUG)
5653 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5654#else
5655 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5656#endif
5657
5658 mutex_init(&ap->scsi_scan_mutex);
5659 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5660 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5661 INIT_LIST_HEAD(&ap->eh_done_q);
5662 init_waitqueue_head(&ap->eh_wait_q);
5663 init_completion(&ap->park_req_pending);
5664 init_timer_deferrable(&ap->fastdrain_timer);
5665 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5666 ap->fastdrain_timer.data = (unsigned long)ap;
5667
5668 ap->cbl = ATA_CBL_NONE;
5669
5670 ata_link_init(ap, &ap->link, 0);
5671
5672#ifdef ATA_IRQ_TRAP
5673 ap->stats.unhandled_irq = 1;
5674 ap->stats.idle_irq = 1;
5675#endif
5676 ata_sff_port_init(ap);
5677
5678 return ap;
5679}
5680
5681static void ata_host_release(struct device *gendev, void *res)
5682{
5683 struct ata_host *host = dev_get_drvdata(gendev);
5684 int i;
5685
5686 for (i = 0; i < host->n_ports; i++) {
5687 struct ata_port *ap = host->ports[i];
5688
5689 if (!ap)
5690 continue;
5691
5692 if (ap->scsi_host)
5693 scsi_host_put(ap->scsi_host);
5694
5695 kfree(ap->pmp_link);
5696 kfree(ap->slave_link);
5697 kfree(ap);
5698 host->ports[i] = NULL;
5699 }
5700
5701 dev_set_drvdata(gendev, NULL);
5702}
5703
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723
5724struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5725{
5726 struct ata_host *host;
5727 size_t sz;
5728 int i;
5729
5730 DPRINTK("ENTER\n");
5731
5732 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5733 return NULL;
5734
5735
5736 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5737
5738 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5739 if (!host)
5740 goto err_out;
5741
5742 devres_add(dev, host);
5743 dev_set_drvdata(dev, host);
5744
5745 spin_lock_init(&host->lock);
5746 mutex_init(&host->eh_mutex);
5747 host->dev = dev;
5748 host->n_ports = max_ports;
5749
5750
5751 for (i = 0; i < max_ports; i++) {
5752 struct ata_port *ap;
5753
5754 ap = ata_port_alloc(host);
5755 if (!ap)
5756 goto err_out;
5757
5758 ap->port_no = i;
5759 host->ports[i] = ap;
5760 }
5761
5762 devres_remove_group(dev, NULL);
5763 return host;
5764
5765 err_out:
5766 devres_release_group(dev, NULL);
5767 return NULL;
5768}
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5787 const struct ata_port_info * const * ppi,
5788 int n_ports)
5789{
5790 const struct ata_port_info *pi;
5791 struct ata_host *host;
5792 int i, j;
5793
5794 host = ata_host_alloc(dev, n_ports);
5795 if (!host)
5796 return NULL;
5797
5798 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5799 struct ata_port *ap = host->ports[i];
5800
5801 if (ppi[j])
5802 pi = ppi[j++];
5803
5804 ap->pio_mask = pi->pio_mask;
5805 ap->mwdma_mask = pi->mwdma_mask;
5806 ap->udma_mask = pi->udma_mask;
5807 ap->flags |= pi->flags;
5808 ap->link.flags |= pi->link_flags;
5809 ap->ops = pi->port_ops;
5810
5811 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5812 host->ops = pi->port_ops;
5813 }
5814
5815 return host;
5816}
5817
5818
5819
5820
5821
5822
5823
5824
5825
5826
5827
5828
5829
5830
5831
5832
5833
5834
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864int ata_slave_link_init(struct ata_port *ap)
5865{
5866 struct ata_link *link;
5867
5868 WARN_ON(ap->slave_link);
5869 WARN_ON(ap->flags & ATA_FLAG_PMP);
5870
5871 link = kzalloc(sizeof(*link), GFP_KERNEL);
5872 if (!link)
5873 return -ENOMEM;
5874
5875 ata_link_init(ap, link, 1);
5876 ap->slave_link = link;
5877 return 0;
5878}
5879
5880static void ata_host_stop(struct device *gendev, void *res)
5881{
5882 struct ata_host *host = dev_get_drvdata(gendev);
5883 int i;
5884
5885 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5886
5887 for (i = 0; i < host->n_ports; i++) {
5888 struct ata_port *ap = host->ports[i];
5889
5890 if (ap->ops->port_stop)
5891 ap->ops->port_stop(ap);
5892 }
5893
5894 if (host->ops->host_stop)
5895 host->ops->host_stop(host);
5896}
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915
5916
5917
5918static void ata_finalize_port_ops(struct ata_port_operations *ops)
5919{
5920 static DEFINE_SPINLOCK(lock);
5921 const struct ata_port_operations *cur;
5922 void **begin = (void **)ops;
5923 void **end = (void **)&ops->inherits;
5924 void **pp;
5925
5926 if (!ops || !ops->inherits)
5927 return;
5928
5929 spin_lock(&lock);
5930
5931 for (cur = ops->inherits; cur; cur = cur->inherits) {
5932 void **inherit = (void **)cur;
5933
5934 for (pp = begin; pp < end; pp++, inherit++)
5935 if (!*pp)
5936 *pp = *inherit;
5937 }
5938
5939 for (pp = begin; pp < end; pp++)
5940 if (IS_ERR(*pp))
5941 *pp = NULL;
5942
5943 ops->inherits = NULL;
5944
5945 spin_unlock(&lock);
5946}
5947
5948
5949
5950
5951
5952
5953
5954
5955
5956
5957
5958
5959
5960
5961
5962
5963
5964int ata_host_start(struct ata_host *host)
5965{
5966 int have_stop = 0;
5967 void *start_dr = NULL;
5968 int i, rc;
5969
5970 if (host->flags & ATA_HOST_STARTED)
5971 return 0;
5972
5973 ata_finalize_port_ops(host->ops);
5974
5975 for (i = 0; i < host->n_ports; i++) {
5976 struct ata_port *ap = host->ports[i];
5977
5978 ata_finalize_port_ops(ap->ops);
5979
5980 if (!host->ops && !ata_port_is_dummy(ap))
5981 host->ops = ap->ops;
5982
5983 if (ap->ops->port_stop)
5984 have_stop = 1;
5985 }
5986
5987 if (host->ops->host_stop)
5988 have_stop = 1;
5989
5990 if (have_stop) {
5991 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5992 if (!start_dr)
5993 return -ENOMEM;
5994 }
5995
5996 for (i = 0; i < host->n_ports; i++) {
5997 struct ata_port *ap = host->ports[i];
5998
5999 if (ap->ops->port_start) {
6000 rc = ap->ops->port_start(ap);
6001 if (rc) {
6002 if (rc != -ENODEV)
6003 dev_err(host->dev,
6004 "failed to start port %d (errno=%d)\n",
6005 i, rc);
6006 goto err_out;
6007 }
6008 }
6009 ata_eh_freeze_port(ap);
6010 }
6011
6012 if (start_dr)
6013 devres_add(host->dev, start_dr);
6014 host->flags |= ATA_HOST_STARTED;
6015 return 0;
6016
6017 err_out:
6018 while (--i >= 0) {
6019 struct ata_port *ap = host->ports[i];
6020
6021 if (ap->ops->port_stop)
6022 ap->ops->port_stop(ap);
6023 }
6024 devres_free(start_dr);
6025 return rc;
6026}
6027
6028
6029
6030
6031
6032
6033
6034
6035void ata_host_init(struct ata_host *host, struct device *dev,
6036 struct ata_port_operations *ops)
6037{
6038 spin_lock_init(&host->lock);
6039 mutex_init(&host->eh_mutex);
6040 host->dev = dev;
6041 host->ops = ops;
6042}
6043
6044void __ata_port_probe(struct ata_port *ap)
6045{
6046 struct ata_eh_info *ehi = &ap->link.eh_info;
6047 unsigned long flags;
6048
6049
6050 spin_lock_irqsave(ap->lock, flags);
6051
6052 ehi->probe_mask |= ATA_ALL_DEVICES;
6053 ehi->action |= ATA_EH_RESET;
6054 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6055
6056 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6057 ap->pflags |= ATA_PFLAG_LOADING;
6058 ata_port_schedule_eh(ap);
6059
6060 spin_unlock_irqrestore(ap->lock, flags);
6061}
6062
6063int ata_port_probe(struct ata_port *ap)
6064{
6065 int rc = 0;
6066
6067 if (ap->ops->error_handler) {
6068 __ata_port_probe(ap);
6069 ata_port_wait_eh(ap);
6070 } else {
6071 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6072 rc = ata_bus_probe(ap);
6073 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6074 }
6075 return rc;
6076}
6077
6078
6079static void async_port_probe(void *data, async_cookie_t cookie)
6080{
6081 struct ata_port *ap = data;
6082
6083
6084
6085
6086
6087
6088
6089
6090 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6091 async_synchronize_cookie(cookie);
6092
6093 (void)ata_port_probe(ap);
6094
6095
6096 async_synchronize_cookie(cookie);
6097
6098 ata_scsi_scan_host(ap, 1);
6099}
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115
6116
6117int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6118{
6119 int i, rc;
6120
6121
6122 if (!(host->flags & ATA_HOST_STARTED)) {
6123 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6124 WARN_ON(1);
6125 return -EINVAL;
6126 }
6127
6128
6129
6130
6131
6132 for (i = host->n_ports; host->ports[i]; i++)
6133 kfree(host->ports[i]);
6134
6135
6136 for (i = 0; i < host->n_ports; i++) {
6137 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6138 host->ports[i]->local_port_no = i + 1;
6139 }
6140
6141
6142 for (i = 0; i < host->n_ports; i++) {
6143 rc = ata_tport_add(host->dev,host->ports[i]);
6144 if (rc) {
6145 goto err_tadd;
6146 }
6147 }
6148
6149 rc = ata_scsi_add_hosts(host, sht);
6150 if (rc)
6151 goto err_tadd;
6152
6153 ata_acpi_hotplug_init(host);
6154
6155
6156 for (i = 0; i < host->n_ports; i++) {
6157 struct ata_port *ap = host->ports[i];
6158 unsigned long xfer_mask;
6159
6160
6161 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6162 ap->cbl = ATA_CBL_SATA;
6163
6164
6165 sata_link_init_spd(&ap->link);
6166 if (ap->slave_link)
6167 sata_link_init_spd(ap->slave_link);
6168
6169
6170 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6171 ap->udma_mask);
6172
6173 if (!ata_port_is_dummy(ap)) {
6174 ata_port_info(ap, "%cATA max %s %s\n",
6175 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6176 ata_mode_string(xfer_mask),
6177 ap->link.eh_info.desc);
6178 ata_ehi_clear_desc(&ap->link.eh_info);
6179 } else
6180 ata_port_info(ap, "DUMMY\n");
6181 }
6182
6183
6184 for (i = 0; i < host->n_ports; i++) {
6185 struct ata_port *ap = host->ports[i];
6186 async_schedule(async_port_probe, ap);
6187 }
6188
6189 return 0;
6190
6191 err_tadd:
6192 while (--i >= 0) {
6193 ata_tport_delete(host->ports[i]);
6194 }
6195 return rc;
6196
6197}
6198
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214
6215
6216
6217
6218
6219
6220
6221
6222int ata_host_activate(struct ata_host *host, int irq,
6223 irq_handler_t irq_handler, unsigned long irq_flags,
6224 struct scsi_host_template *sht)
6225{
6226 int i, rc;
6227
6228 rc = ata_host_start(host);
6229 if (rc)
6230 return rc;
6231
6232
6233 if (!irq) {
6234 WARN_ON(irq_handler);
6235 return ata_host_register(host, sht);
6236 }
6237
6238 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6239 dev_driver_string(host->dev), host);
6240 if (rc)
6241 return rc;
6242
6243 for (i = 0; i < host->n_ports; i++)
6244 ata_port_desc(host->ports[i], "irq %d", irq);
6245
6246 rc = ata_host_register(host, sht);
6247
6248 if (rc)
6249 devm_free_irq(host->dev, irq, host);
6250
6251 return rc;
6252}
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265static void ata_port_detach(struct ata_port *ap)
6266{
6267 unsigned long flags;
6268
6269 if (!ap->ops->error_handler)
6270 goto skip_eh;
6271
6272
6273 spin_lock_irqsave(ap->lock, flags);
6274 ap->pflags |= ATA_PFLAG_UNLOADING;
6275 ata_port_schedule_eh(ap);
6276 spin_unlock_irqrestore(ap->lock, flags);
6277
6278
6279 ata_port_wait_eh(ap);
6280
6281
6282 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6283
6284 cancel_delayed_work_sync(&ap->hotplug_task);
6285
6286 skip_eh:
6287 if (ap->pmp_link) {
6288 int i;
6289 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6290 ata_tlink_delete(&ap->pmp_link[i]);
6291 }
6292 ata_tport_delete(ap);
6293
6294
6295 scsi_remove_host(ap->scsi_host);
6296}
6297
6298
6299
6300
6301
6302
6303
6304
6305
6306
6307void ata_host_detach(struct ata_host *host)
6308{
6309 int i;
6310
6311 for (i = 0; i < host->n_ports; i++)
6312 ata_port_detach(host->ports[i]);
6313
6314
6315 ata_acpi_dissociate(host);
6316}
6317
6318#ifdef CONFIG_PCI
6319
6320
6321
6322
6323
6324
6325
6326
6327
6328
6329
6330
6331void ata_pci_remove_one(struct pci_dev *pdev)
6332{
6333 struct ata_host *host = pci_get_drvdata(pdev);
6334
6335 ata_host_detach(host);
6336}
6337
6338
6339int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6340{
6341 unsigned long tmp = 0;
6342
6343 switch (bits->width) {
6344 case 1: {
6345 u8 tmp8 = 0;
6346 pci_read_config_byte(pdev, bits->reg, &tmp8);
6347 tmp = tmp8;
6348 break;
6349 }
6350 case 2: {
6351 u16 tmp16 = 0;
6352 pci_read_config_word(pdev, bits->reg, &tmp16);
6353 tmp = tmp16;
6354 break;
6355 }
6356 case 4: {
6357 u32 tmp32 = 0;
6358 pci_read_config_dword(pdev, bits->reg, &tmp32);
6359 tmp = tmp32;
6360 break;
6361 }
6362
6363 default:
6364 return -EINVAL;
6365 }
6366
6367 tmp &= bits->mask;
6368
6369 return (tmp == bits->val) ? 1 : 0;
6370}
6371
6372#ifdef CONFIG_PM
6373void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6374{
6375 pci_save_state(pdev);
6376 pci_disable_device(pdev);
6377
6378 if (mesg.event & PM_EVENT_SLEEP)
6379 pci_set_power_state(pdev, PCI_D3hot);
6380}
6381
6382int ata_pci_device_do_resume(struct pci_dev *pdev)
6383{
6384 int rc;
6385
6386 pci_set_power_state(pdev, PCI_D0);
6387 pci_restore_state(pdev);
6388
6389 rc = pcim_enable_device(pdev);
6390 if (rc) {
6391 dev_err(&pdev->dev,
6392 "failed to enable device after resume (%d)\n", rc);
6393 return rc;
6394 }
6395
6396 pci_set_master(pdev);
6397 return 0;
6398}
6399
6400int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6401{
6402 struct ata_host *host = pci_get_drvdata(pdev);
6403 int rc = 0;
6404
6405 rc = ata_host_suspend(host, mesg);
6406 if (rc)
6407 return rc;
6408
6409 ata_pci_device_do_suspend(pdev, mesg);
6410
6411 return 0;
6412}
6413
6414int ata_pci_device_resume(struct pci_dev *pdev)
6415{
6416 struct ata_host *host = pci_get_drvdata(pdev);
6417 int rc;
6418
6419 rc = ata_pci_device_do_resume(pdev);
6420 if (rc == 0)
6421 ata_host_resume(host);
6422 return rc;
6423}
6424#endif
6425
6426#endif
6427
6428
6429
6430
6431
6432
6433
6434
6435
6436
6437
6438
6439int ata_platform_remove_one(struct platform_device *pdev)
6440{
6441 struct ata_host *host = platform_get_drvdata(pdev);
6442
6443 ata_host_detach(host);
6444
6445 return 0;
6446}
6447
6448static int __init ata_parse_force_one(char **cur,
6449 struct ata_force_ent *force_ent,
6450 const char **reason)
6451{
6452
6453
6454
6455
6456
6457 static struct ata_force_param force_tbl[] __initdata = {
6458 { "40c", .cbl = ATA_CBL_PATA40 },
6459 { "80c", .cbl = ATA_CBL_PATA80 },
6460 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6461 { "unk", .cbl = ATA_CBL_PATA_UNK },
6462 { "ign", .cbl = ATA_CBL_PATA_IGN },
6463 { "sata", .cbl = ATA_CBL_SATA },
6464 { "1.5Gbps", .spd_limit = 1 },
6465 { "3.0Gbps", .spd_limit = 2 },
6466 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6467 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6468 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6469 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6470 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6471 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6472 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6473 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6474 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6475 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6476 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6477 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6478 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6479 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6480 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6481 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6482 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6483 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6484 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6485 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6486 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6487 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6488 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6489 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6490 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6491 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6492 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6493 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6494 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6495 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6496 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6497 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6498 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6499 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6500 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6501 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6502 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6503 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6504 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6505 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6506 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6507 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6508 };
6509 char *start = *cur, *p = *cur;
6510 char *id, *val, *endp;
6511 const struct ata_force_param *match_fp = NULL;
6512 int nr_matches = 0, i;
6513
6514
6515 while (*p != '\0' && *p != ',')
6516 p++;
6517
6518 if (*p == '\0')
6519 *cur = p;
6520 else
6521 *cur = p + 1;
6522
6523 *p = '\0';
6524
6525
6526 p = strchr(start, ':');
6527 if (!p) {
6528 val = strstrip(start);
6529 goto parse_val;
6530 }
6531 *p = '\0';
6532
6533 id = strstrip(start);
6534 val = strstrip(p + 1);
6535
6536
6537 p = strchr(id, '.');
6538 if (p) {
6539 *p++ = '\0';
6540 force_ent->device = simple_strtoul(p, &endp, 10);
6541 if (p == endp || *endp != '\0') {
6542 *reason = "invalid device";
6543 return -EINVAL;
6544 }
6545 }
6546
6547 force_ent->port = simple_strtoul(id, &endp, 10);
6548 if (p == endp || *endp != '\0') {
6549 *reason = "invalid port/link";
6550 return -EINVAL;
6551 }
6552
6553 parse_val:
6554
6555 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6556 const struct ata_force_param *fp = &force_tbl[i];
6557
6558 if (strncasecmp(val, fp->name, strlen(val)))
6559 continue;
6560
6561 nr_matches++;
6562 match_fp = fp;
6563
6564 if (strcasecmp(val, fp->name) == 0) {
6565 nr_matches = 1;
6566 break;
6567 }
6568 }
6569
6570 if (!nr_matches) {
6571 *reason = "unknown value";
6572 return -EINVAL;
6573 }
6574 if (nr_matches > 1) {
6575 *reason = "ambigious value";
6576 return -EINVAL;
6577 }
6578
6579 force_ent->param = *match_fp;
6580
6581 return 0;
6582}
6583
6584static void __init ata_parse_force_param(void)
6585{
6586 int idx = 0, size = 1;
6587 int last_port = -1, last_device = -1;
6588 char *p, *cur, *next;
6589
6590
6591 for (p = ata_force_param_buf; *p; p++)
6592 if (*p == ',')
6593 size++;
6594
6595 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6596 if (!ata_force_tbl) {
6597 printk(KERN_WARNING "ata: failed to extend force table, "
6598 "libata.force ignored\n");
6599 return;
6600 }
6601
6602
6603 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6604 const char *reason = "";
6605 struct ata_force_ent te = { .port = -1, .device = -1 };
6606
6607 next = cur;
6608 if (ata_parse_force_one(&next, &te, &reason)) {
6609 printk(KERN_WARNING "ata: failed to parse force "
6610 "parameter \"%s\" (%s)\n",
6611 cur, reason);
6612 continue;
6613 }
6614
6615 if (te.port == -1) {
6616 te.port = last_port;
6617 te.device = last_device;
6618 }
6619
6620 ata_force_tbl[idx++] = te;
6621
6622 last_port = te.port;
6623 last_device = te.device;
6624 }
6625
6626 ata_force_tbl_size = idx;
6627}
6628
6629static int __init ata_init(void)
6630{
6631 int rc;
6632
6633 ata_parse_force_param();
6634
6635 ata_acpi_register();
6636
6637 rc = ata_sff_init();
6638 if (rc) {
6639 kfree(ata_force_tbl);
6640 return rc;
6641 }
6642
6643 libata_transport_init();
6644 ata_scsi_transport_template = ata_attach_transport();
6645 if (!ata_scsi_transport_template) {
6646 ata_sff_exit();
6647 rc = -ENOMEM;
6648 goto err_out;
6649 }
6650
6651 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6652 return 0;
6653
6654err_out:
6655 return rc;
6656}
6657
6658static void __exit ata_exit(void)
6659{
6660 ata_release_transport(ata_scsi_transport_template);
6661 libata_transport_exit();
6662 ata_sff_exit();
6663 ata_acpi_unregister();
6664 kfree(ata_force_tbl);
6665}
6666
6667subsys_initcall(ata_init);
6668module_exit(ata_exit);
6669
6670static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6671
6672int ata_ratelimit(void)
6673{
6674 return __ratelimit(&ratelimit);
6675}
6676
6677
6678
6679
6680
6681
6682
6683
6684
6685
6686
6687
6688
6689
6690
6691void ata_msleep(struct ata_port *ap, unsigned int msecs)
6692{
6693 bool owns_eh = ap && ap->host->eh_owner == current;
6694
6695 if (owns_eh)
6696 ata_eh_release(ap);
6697
6698 msleep(msecs);
6699
6700 if (owns_eh)
6701 ata_eh_acquire(ap);
6702}
6703
6704
6705
6706
6707
6708
6709
6710
6711
6712
6713
6714
6715
6716
6717
6718
6719
6720
6721
6722
6723
6724
6725
6726
6727
6728u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6729 unsigned long interval, unsigned long timeout)
6730{
6731 unsigned long deadline;
6732 u32 tmp;
6733
6734 tmp = ioread32(reg);
6735
6736
6737
6738
6739
6740 deadline = ata_deadline(jiffies, timeout);
6741
6742 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6743 ata_msleep(ap, interval);
6744 tmp = ioread32(reg);
6745 }
6746
6747 return tmp;
6748}
6749
6750
6751
6752
6753static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6754{
6755 return AC_ERR_SYSTEM;
6756}
6757
6758static void ata_dummy_error_handler(struct ata_port *ap)
6759{
6760
6761}
6762
6763struct ata_port_operations ata_dummy_port_ops = {
6764 .qc_prep = ata_noop_qc_prep,
6765 .qc_issue = ata_dummy_qc_issue,
6766 .error_handler = ata_dummy_error_handler,
6767 .sched_eh = ata_std_sched_eh,
6768 .end_eh = ata_std_end_eh,
6769};
6770
6771const struct ata_port_info ata_dummy_port_info = {
6772 .port_ops = &ata_dummy_port_ops,
6773};
6774
6775
6776
6777
6778int ata_port_printk(const struct ata_port *ap, const char *level,
6779 const char *fmt, ...)
6780{
6781 struct va_format vaf;
6782 va_list args;
6783 int r;
6784
6785 va_start(args, fmt);
6786
6787 vaf.fmt = fmt;
6788 vaf.va = &args;
6789
6790 r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
6791
6792 va_end(args);
6793
6794 return r;
6795}
6796EXPORT_SYMBOL(ata_port_printk);
6797
6798int ata_link_printk(const struct ata_link *link, const char *level,
6799 const char *fmt, ...)
6800{
6801 struct va_format vaf;
6802 va_list args;
6803 int r;
6804
6805 va_start(args, fmt);
6806
6807 vaf.fmt = fmt;
6808 vaf.va = &args;
6809
6810 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6811 r = printk("%sata%u.%02u: %pV",
6812 level, link->ap->print_id, link->pmp, &vaf);
6813 else
6814 r = printk("%sata%u: %pV",
6815 level, link->ap->print_id, &vaf);
6816
6817 va_end(args);
6818
6819 return r;
6820}
6821EXPORT_SYMBOL(ata_link_printk);
6822
6823int ata_dev_printk(const struct ata_device *dev, const char *level,
6824 const char *fmt, ...)
6825{
6826 struct va_format vaf;
6827 va_list args;
6828 int r;
6829
6830 va_start(args, fmt);
6831
6832 vaf.fmt = fmt;
6833 vaf.va = &args;
6834
6835 r = printk("%sata%u.%02u: %pV",
6836 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6837 &vaf);
6838
6839 va_end(args);
6840
6841 return r;
6842}
6843EXPORT_SYMBOL(ata_dev_printk);
6844
6845void ata_print_version(const struct device *dev, const char *version)
6846{
6847 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6848}
6849EXPORT_SYMBOL(ata_print_version);
6850
6851
6852
6853
6854
6855
6856
6857EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6858EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6859EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6860EXPORT_SYMBOL_GPL(ata_base_port_ops);
6861EXPORT_SYMBOL_GPL(sata_port_ops);
6862EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6863EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6864EXPORT_SYMBOL_GPL(ata_link_next);
6865EXPORT_SYMBOL_GPL(ata_dev_next);
6866EXPORT_SYMBOL_GPL(ata_std_bios_param);
6867EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6868EXPORT_SYMBOL_GPL(ata_host_init);
6869EXPORT_SYMBOL_GPL(ata_host_alloc);
6870EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6871EXPORT_SYMBOL_GPL(ata_slave_link_init);
6872EXPORT_SYMBOL_GPL(ata_host_start);
6873EXPORT_SYMBOL_GPL(ata_host_register);
6874EXPORT_SYMBOL_GPL(ata_host_activate);
6875EXPORT_SYMBOL_GPL(ata_host_detach);
6876EXPORT_SYMBOL_GPL(ata_sg_init);
6877EXPORT_SYMBOL_GPL(ata_qc_complete);
6878EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6879EXPORT_SYMBOL_GPL(atapi_cmd_type);
6880EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6881EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6882EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6883EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6884EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6885EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6886EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6887EXPORT_SYMBOL_GPL(ata_mode_string);
6888EXPORT_SYMBOL_GPL(ata_id_xfermask);
6889EXPORT_SYMBOL_GPL(ata_do_set_mode);
6890EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6891EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6892EXPORT_SYMBOL_GPL(ata_dev_disable);
6893EXPORT_SYMBOL_GPL(sata_set_spd);
6894EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6895EXPORT_SYMBOL_GPL(sata_link_debounce);
6896EXPORT_SYMBOL_GPL(sata_link_resume);
6897EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6898EXPORT_SYMBOL_GPL(ata_std_prereset);
6899EXPORT_SYMBOL_GPL(sata_link_hardreset);
6900EXPORT_SYMBOL_GPL(sata_std_hardreset);
6901EXPORT_SYMBOL_GPL(ata_std_postreset);
6902EXPORT_SYMBOL_GPL(ata_dev_classify);
6903EXPORT_SYMBOL_GPL(ata_dev_pair);
6904EXPORT_SYMBOL_GPL(ata_ratelimit);
6905EXPORT_SYMBOL_GPL(ata_msleep);
6906EXPORT_SYMBOL_GPL(ata_wait_register);
6907EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6908EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6909EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6910EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6911EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
6912EXPORT_SYMBOL_GPL(sata_scr_valid);
6913EXPORT_SYMBOL_GPL(sata_scr_read);
6914EXPORT_SYMBOL_GPL(sata_scr_write);
6915EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6916EXPORT_SYMBOL_GPL(ata_link_online);
6917EXPORT_SYMBOL_GPL(ata_link_offline);
6918#ifdef CONFIG_PM
6919EXPORT_SYMBOL_GPL(ata_host_suspend);
6920EXPORT_SYMBOL_GPL(ata_host_resume);
6921#endif
6922EXPORT_SYMBOL_GPL(ata_id_string);
6923EXPORT_SYMBOL_GPL(ata_id_c_string);
6924EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6925EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6926
6927EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6928EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6929EXPORT_SYMBOL_GPL(ata_timing_compute);
6930EXPORT_SYMBOL_GPL(ata_timing_merge);
6931EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6932
6933#ifdef CONFIG_PCI
6934EXPORT_SYMBOL_GPL(pci_test_config_bits);
6935EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6936#ifdef CONFIG_PM
6937EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6938EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6939EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6940EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6941#endif
6942#endif
6943
6944EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6945
6946EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6947EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6948EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6949EXPORT_SYMBOL_GPL(ata_port_desc);
6950#ifdef CONFIG_PCI
6951EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6952#endif
6953EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6954EXPORT_SYMBOL_GPL(ata_link_abort);
6955EXPORT_SYMBOL_GPL(ata_port_abort);
6956EXPORT_SYMBOL_GPL(ata_port_freeze);
6957EXPORT_SYMBOL_GPL(sata_async_notification);
6958EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6959EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6960EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6961EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6962EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6963EXPORT_SYMBOL_GPL(ata_do_eh);
6964EXPORT_SYMBOL_GPL(ata_std_error_handler);
6965
6966EXPORT_SYMBOL_GPL(ata_cable_40wire);
6967EXPORT_SYMBOL_GPL(ata_cable_80wire);
6968EXPORT_SYMBOL_GPL(ata_cable_unknown);
6969EXPORT_SYMBOL_GPL(ata_cable_ignore);
6970EXPORT_SYMBOL_GPL(ata_cable_sata);
6971