1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
57#include <linux/scatterlist.h>
58#include <linux/io.h>
59#include <linux/async.h>
60#include <linux/log2.h>
61#include <linux/slab.h>
62#include <scsi/scsi.h>
63#include <scsi/scsi_cmnd.h>
64#include <scsi/scsi_host.h>
65#include <linux/libata.h>
66#include <asm/byteorder.h>
67#include <linux/cdrom.h>
68#include <linux/ratelimit.h>
69#include <linux/pm_runtime.h>
70
71#include "libata.h"
72#include "libata-transport.h"
73
74
75const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
76const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
77const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
78
79const struct ata_port_operations ata_base_port_ops = {
80 .prereset = ata_std_prereset,
81 .postreset = ata_std_postreset,
82 .error_handler = ata_std_error_handler,
83};
84
85const struct ata_port_operations sata_port_ops = {
86 .inherits = &ata_base_port_ops,
87
88 .qc_defer = ata_std_qc_defer,
89 .hardreset = sata_std_hardreset,
90};
91
92static unsigned int ata_dev_init_params(struct ata_device *dev,
93 u16 heads, u16 sectors);
94static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
95static void ata_dev_xfermask(struct ata_device *dev);
96static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
97
98unsigned int ata_print_id = 1;
99
100struct ata_force_param {
101 const char *name;
102 unsigned int cbl;
103 int spd_limit;
104 unsigned long xfer_mask;
105 unsigned int horkage_on;
106 unsigned int horkage_off;
107 unsigned int lflags;
108};
109
110struct ata_force_ent {
111 int port;
112 int device;
113 struct ata_force_param param;
114};
115
116static struct ata_force_ent *ata_force_tbl;
117static int ata_force_tbl_size;
118
119static char ata_force_param_buf[PAGE_SIZE] __initdata;
120
121module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
122MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
123
124static int atapi_enabled = 1;
125module_param(atapi_enabled, int, 0444);
126MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
127
128static int atapi_dmadir = 0;
129module_param(atapi_dmadir, int, 0444);
130MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
131
132int atapi_passthru16 = 1;
133module_param(atapi_passthru16, int, 0444);
134MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
135
136int libata_fua = 0;
137module_param_named(fua, libata_fua, int, 0444);
138MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
139
140static int ata_ignore_hpa;
141module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
142MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
143
144static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
145module_param_named(dma, libata_dma_mask, int, 0444);
146MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
147
148static int ata_probe_timeout;
149module_param(ata_probe_timeout, int, 0444);
150MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
151
152int libata_noacpi = 0;
153module_param_named(noacpi, libata_noacpi, int, 0444);
154MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
155
156int libata_allow_tpm = 0;
157module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
158MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
159
160static int atapi_an;
161module_param(atapi_an, int, 0444);
162MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
163
164MODULE_AUTHOR("Jeff Garzik");
165MODULE_DESCRIPTION("Library module for ATA devices");
166MODULE_LICENSE("GPL");
167MODULE_VERSION(DRV_VERSION);
168
169
170static bool ata_sstatus_online(u32 sstatus)
171{
172 return (sstatus & 0xf) == 0x3;
173}
174
175
176
177
178
179
180
181
182
183
184
185
186
187struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
188 enum ata_link_iter_mode mode)
189{
190 BUG_ON(mode != ATA_LITER_EDGE &&
191 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
192
193
194 if (!link)
195 switch (mode) {
196 case ATA_LITER_EDGE:
197 case ATA_LITER_PMP_FIRST:
198 if (sata_pmp_attached(ap))
199 return ap->pmp_link;
200
201 case ATA_LITER_HOST_FIRST:
202 return &ap->link;
203 }
204
205
206 if (link == &ap->link)
207 switch (mode) {
208 case ATA_LITER_HOST_FIRST:
209 if (sata_pmp_attached(ap))
210 return ap->pmp_link;
211
212 case ATA_LITER_PMP_FIRST:
213 if (unlikely(ap->slave_link))
214 return ap->slave_link;
215
216 case ATA_LITER_EDGE:
217 return NULL;
218 }
219
220
221 if (unlikely(link == ap->slave_link))
222 return NULL;
223
224
225 if (++link < ap->pmp_link + ap->nr_pmp_links)
226 return link;
227
228 if (mode == ATA_LITER_PMP_FIRST)
229 return &ap->link;
230
231 return NULL;
232}
233
234
235
236
237
238
239
240
241
242
243
244
245
246struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
247 enum ata_dev_iter_mode mode)
248{
249 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
250 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
251
252
253 if (!dev)
254 switch (mode) {
255 case ATA_DITER_ENABLED:
256 case ATA_DITER_ALL:
257 dev = link->device;
258 goto check;
259 case ATA_DITER_ENABLED_REVERSE:
260 case ATA_DITER_ALL_REVERSE:
261 dev = link->device + ata_link_max_devices(link) - 1;
262 goto check;
263 }
264
265 next:
266
267 switch (mode) {
268 case ATA_DITER_ENABLED:
269 case ATA_DITER_ALL:
270 if (++dev < link->device + ata_link_max_devices(link))
271 goto check;
272 return NULL;
273 case ATA_DITER_ENABLED_REVERSE:
274 case ATA_DITER_ALL_REVERSE:
275 if (--dev >= link->device)
276 goto check;
277 return NULL;
278 }
279
280 check:
281 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
282 !ata_dev_enabled(dev))
283 goto next;
284 return dev;
285}
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301struct ata_link *ata_dev_phys_link(struct ata_device *dev)
302{
303 struct ata_port *ap = dev->link->ap;
304
305 if (!ap->slave_link)
306 return dev->link;
307 if (!dev->devno)
308 return &ap->link;
309 return ap->slave_link;
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325void ata_force_cbl(struct ata_port *ap)
326{
327 int i;
328
329 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
330 const struct ata_force_ent *fe = &ata_force_tbl[i];
331
332 if (fe->port != -1 && fe->port != ap->print_id)
333 continue;
334
335 if (fe->param.cbl == ATA_CBL_NONE)
336 continue;
337
338 ap->cbl = fe->param.cbl;
339 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
340 return;
341 }
342}
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360static void ata_force_link_limits(struct ata_link *link)
361{
362 bool did_spd = false;
363 int linkno = link->pmp;
364 int i;
365
366 if (ata_is_host_link(link))
367 linkno += 15;
368
369 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
370 const struct ata_force_ent *fe = &ata_force_tbl[i];
371
372 if (fe->port != -1 && fe->port != link->ap->print_id)
373 continue;
374
375 if (fe->device != -1 && fe->device != linkno)
376 continue;
377
378
379 if (!did_spd && fe->param.spd_limit) {
380 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
381 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
382 fe->param.name);
383 did_spd = true;
384 }
385
386
387 if (fe->param.lflags) {
388 link->flags |= fe->param.lflags;
389 ata_link_notice(link,
390 "FORCE: link flag 0x%x forced -> 0x%x\n",
391 fe->param.lflags, link->flags);
392 }
393 }
394}
395
396
397
398
399
400
401
402
403
404
405
406
407static void ata_force_xfermask(struct ata_device *dev)
408{
409 int devno = dev->link->pmp + dev->devno;
410 int alt_devno = devno;
411 int i;
412
413
414 if (ata_is_host_link(dev->link))
415 alt_devno += 15;
416
417 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
418 const struct ata_force_ent *fe = &ata_force_tbl[i];
419 unsigned long pio_mask, mwdma_mask, udma_mask;
420
421 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
422 continue;
423
424 if (fe->device != -1 && fe->device != devno &&
425 fe->device != alt_devno)
426 continue;
427
428 if (!fe->param.xfer_mask)
429 continue;
430
431 ata_unpack_xfermask(fe->param.xfer_mask,
432 &pio_mask, &mwdma_mask, &udma_mask);
433 if (udma_mask)
434 dev->udma_mask = udma_mask;
435 else if (mwdma_mask) {
436 dev->udma_mask = 0;
437 dev->mwdma_mask = mwdma_mask;
438 } else {
439 dev->udma_mask = 0;
440 dev->mwdma_mask = 0;
441 dev->pio_mask = pio_mask;
442 }
443
444 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
445 fe->param.name);
446 return;
447 }
448}
449
450
451
452
453
454
455
456
457
458
459
460
461static void ata_force_horkage(struct ata_device *dev)
462{
463 int devno = dev->link->pmp + dev->devno;
464 int alt_devno = devno;
465 int i;
466
467
468 if (ata_is_host_link(dev->link))
469 alt_devno += 15;
470
471 for (i = 0; i < ata_force_tbl_size; i++) {
472 const struct ata_force_ent *fe = &ata_force_tbl[i];
473
474 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
475 continue;
476
477 if (fe->device != -1 && fe->device != devno &&
478 fe->device != alt_devno)
479 continue;
480
481 if (!(~dev->horkage & fe->param.horkage_on) &&
482 !(dev->horkage & fe->param.horkage_off))
483 continue;
484
485 dev->horkage |= fe->param.horkage_on;
486 dev->horkage &= ~fe->param.horkage_off;
487
488 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
489 fe->param.name);
490 }
491}
492
493
494
495
496
497
498
499
500
501
502
503
504
505int atapi_cmd_type(u8 opcode)
506{
507 switch (opcode) {
508 case GPCMD_READ_10:
509 case GPCMD_READ_12:
510 return ATAPI_READ;
511
512 case GPCMD_WRITE_10:
513 case GPCMD_WRITE_12:
514 case GPCMD_WRITE_AND_VERIFY_10:
515 return ATAPI_WRITE;
516
517 case GPCMD_READ_CD:
518 case GPCMD_READ_CD_MSF:
519 return ATAPI_READ_CD;
520
521 case ATA_16:
522 case ATA_12:
523 if (atapi_passthru16)
524 return ATAPI_PASS_THRU;
525
526 default:
527 return ATAPI_MISC;
528 }
529}
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
545{
546 fis[0] = 0x27;
547 fis[1] = pmp & 0xf;
548 if (is_cmd)
549 fis[1] |= (1 << 7);
550
551 fis[2] = tf->command;
552 fis[3] = tf->feature;
553
554 fis[4] = tf->lbal;
555 fis[5] = tf->lbam;
556 fis[6] = tf->lbah;
557 fis[7] = tf->device;
558
559 fis[8] = tf->hob_lbal;
560 fis[9] = tf->hob_lbam;
561 fis[10] = tf->hob_lbah;
562 fis[11] = tf->hob_feature;
563
564 fis[12] = tf->nsect;
565 fis[13] = tf->hob_nsect;
566 fis[14] = 0;
567 fis[15] = tf->ctl;
568
569 fis[16] = 0;
570 fis[17] = 0;
571 fis[18] = 0;
572 fis[19] = 0;
573}
574
575
576
577
578
579
580
581
582
583
584
585
586void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
587{
588 tf->command = fis[2];
589 tf->feature = fis[3];
590
591 tf->lbal = fis[4];
592 tf->lbam = fis[5];
593 tf->lbah = fis[6];
594 tf->device = fis[7];
595
596 tf->hob_lbal = fis[8];
597 tf->hob_lbam = fis[9];
598 tf->hob_lbah = fis[10];
599
600 tf->nsect = fis[12];
601 tf->hob_nsect = fis[13];
602}
603
604static const u8 ata_rw_cmds[] = {
605
606 ATA_CMD_READ_MULTI,
607 ATA_CMD_WRITE_MULTI,
608 ATA_CMD_READ_MULTI_EXT,
609 ATA_CMD_WRITE_MULTI_EXT,
610 0,
611 0,
612 0,
613 ATA_CMD_WRITE_MULTI_FUA_EXT,
614
615 ATA_CMD_PIO_READ,
616 ATA_CMD_PIO_WRITE,
617 ATA_CMD_PIO_READ_EXT,
618 ATA_CMD_PIO_WRITE_EXT,
619 0,
620 0,
621 0,
622 0,
623
624 ATA_CMD_READ,
625 ATA_CMD_WRITE,
626 ATA_CMD_READ_EXT,
627 ATA_CMD_WRITE_EXT,
628 0,
629 0,
630 0,
631 ATA_CMD_WRITE_FUA_EXT
632};
633
634
635
636
637
638
639
640
641
642
643
644
645static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
646{
647 u8 cmd;
648
649 int index, fua, lba48, write;
650
651 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
652 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
653 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
654
655 if (dev->flags & ATA_DFLAG_PIO) {
656 tf->protocol = ATA_PROT_PIO;
657 index = dev->multi_count ? 0 : 8;
658 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
659
660 tf->protocol = ATA_PROT_PIO;
661 index = dev->multi_count ? 0 : 8;
662 } else {
663 tf->protocol = ATA_PROT_DMA;
664 index = 16;
665 }
666
667 cmd = ata_rw_cmds[index + fua + lba48 + write];
668 if (cmd) {
669 tf->command = cmd;
670 return 0;
671 }
672 return -1;
673}
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
691{
692 u64 block = 0;
693
694 if (tf->flags & ATA_TFLAG_LBA) {
695 if (tf->flags & ATA_TFLAG_LBA48) {
696 block |= (u64)tf->hob_lbah << 40;
697 block |= (u64)tf->hob_lbam << 32;
698 block |= (u64)tf->hob_lbal << 24;
699 } else
700 block |= (tf->device & 0xf) << 24;
701
702 block |= tf->lbah << 16;
703 block |= tf->lbam << 8;
704 block |= tf->lbal;
705 } else {
706 u32 cyl, head, sect;
707
708 cyl = tf->lbam | (tf->lbah << 8);
709 head = tf->device & 0xf;
710 sect = tf->lbal;
711
712 if (!sect) {
713 ata_dev_warn(dev,
714 "device reported invalid CHS sector 0\n");
715 sect = 1;
716 }
717
718 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
719 }
720
721 return block;
722}
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
745 u64 block, u32 n_block, unsigned int tf_flags,
746 unsigned int tag)
747{
748 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
749 tf->flags |= tf_flags;
750
751 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
752
753 if (!lba_48_ok(block, n_block))
754 return -ERANGE;
755
756 tf->protocol = ATA_PROT_NCQ;
757 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
758
759 if (tf->flags & ATA_TFLAG_WRITE)
760 tf->command = ATA_CMD_FPDMA_WRITE;
761 else
762 tf->command = ATA_CMD_FPDMA_READ;
763
764 tf->nsect = tag << 3;
765 tf->hob_feature = (n_block >> 8) & 0xff;
766 tf->feature = n_block & 0xff;
767
768 tf->hob_lbah = (block >> 40) & 0xff;
769 tf->hob_lbam = (block >> 32) & 0xff;
770 tf->hob_lbal = (block >> 24) & 0xff;
771 tf->lbah = (block >> 16) & 0xff;
772 tf->lbam = (block >> 8) & 0xff;
773 tf->lbal = block & 0xff;
774
775 tf->device = 1 << 6;
776 if (tf->flags & ATA_TFLAG_FUA)
777 tf->device |= 1 << 7;
778 } else if (dev->flags & ATA_DFLAG_LBA) {
779 tf->flags |= ATA_TFLAG_LBA;
780
781 if (lba_28_ok(block, n_block)) {
782
783 tf->device |= (block >> 24) & 0xf;
784 } else if (lba_48_ok(block, n_block)) {
785 if (!(dev->flags & ATA_DFLAG_LBA48))
786 return -ERANGE;
787
788
789 tf->flags |= ATA_TFLAG_LBA48;
790
791 tf->hob_nsect = (n_block >> 8) & 0xff;
792
793 tf->hob_lbah = (block >> 40) & 0xff;
794 tf->hob_lbam = (block >> 32) & 0xff;
795 tf->hob_lbal = (block >> 24) & 0xff;
796 } else
797
798 return -ERANGE;
799
800 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
801 return -EINVAL;
802
803 tf->nsect = n_block & 0xff;
804
805 tf->lbah = (block >> 16) & 0xff;
806 tf->lbam = (block >> 8) & 0xff;
807 tf->lbal = block & 0xff;
808
809 tf->device |= ATA_LBA;
810 } else {
811
812 u32 sect, head, cyl, track;
813
814
815 if (!lba_28_ok(block, n_block))
816 return -ERANGE;
817
818 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
819 return -EINVAL;
820
821
822 track = (u32)block / dev->sectors;
823 cyl = track / dev->heads;
824 head = track % dev->heads;
825 sect = (u32)block % dev->sectors + 1;
826
827 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
828 (u32)block, track, cyl, head, sect);
829
830
831
832
833
834 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
835 return -ERANGE;
836
837 tf->nsect = n_block & 0xff;
838 tf->lbal = sect;
839 tf->lbam = cyl;
840 tf->lbah = cyl >> 8;
841 tf->device |= head;
842 }
843
844 return 0;
845}
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862unsigned long ata_pack_xfermask(unsigned long pio_mask,
863 unsigned long mwdma_mask,
864 unsigned long udma_mask)
865{
866 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
867 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
868 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
869}
870
871
872
873
874
875
876
877
878
879
880
881void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
882 unsigned long *mwdma_mask, unsigned long *udma_mask)
883{
884 if (pio_mask)
885 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
886 if (mwdma_mask)
887 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
888 if (udma_mask)
889 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
890}
891
892static const struct ata_xfer_ent {
893 int shift, bits;
894 u8 base;
895} ata_xfer_tbl[] = {
896 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
897 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
898 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
899 { -1, },
900};
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915u8 ata_xfer_mask2mode(unsigned long xfer_mask)
916{
917 int highbit = fls(xfer_mask) - 1;
918 const struct ata_xfer_ent *ent;
919
920 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
921 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
922 return ent->base + highbit - ent->shift;
923 return 0xff;
924}
925
926
927
928
929
930
931
932
933
934
935
936
937
938unsigned long ata_xfer_mode2mask(u8 xfer_mode)
939{
940 const struct ata_xfer_ent *ent;
941
942 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
943 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
944 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
945 & ~((1 << ent->shift) - 1);
946 return 0;
947}
948
949
950
951
952
953
954
955
956
957
958
959
960
961int ata_xfer_mode2shift(unsigned long xfer_mode)
962{
963 const struct ata_xfer_ent *ent;
964
965 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
966 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
967 return ent->shift;
968 return -1;
969}
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985const char *ata_mode_string(unsigned long xfer_mask)
986{
987 static const char * const xfer_mode_str[] = {
988 "PIO0",
989 "PIO1",
990 "PIO2",
991 "PIO3",
992 "PIO4",
993 "PIO5",
994 "PIO6",
995 "MWDMA0",
996 "MWDMA1",
997 "MWDMA2",
998 "MWDMA3",
999 "MWDMA4",
1000 "UDMA/16",
1001 "UDMA/25",
1002 "UDMA/33",
1003 "UDMA/44",
1004 "UDMA/66",
1005 "UDMA/100",
1006 "UDMA/133",
1007 "UDMA7",
1008 };
1009 int highbit;
1010
1011 highbit = fls(xfer_mask) - 1;
1012 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1013 return xfer_mode_str[highbit];
1014 return "<n/a>";
1015}
1016
1017const char *sata_spd_string(unsigned int spd)
1018{
1019 static const char * const spd_str[] = {
1020 "1.5 Gbps",
1021 "3.0 Gbps",
1022 "6.0 Gbps",
1023 };
1024
1025 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1026 return "<unknown>";
1027 return spd_str[spd - 1];
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1046{
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1069 DPRINTK("found ATA device by sig\n");
1070 return ATA_DEV_ATA;
1071 }
1072
1073 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1074 DPRINTK("found ATAPI device by sig\n");
1075 return ATA_DEV_ATAPI;
1076 }
1077
1078 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1079 DPRINTK("found PMP device by sig\n");
1080 return ATA_DEV_PMP;
1081 }
1082
1083 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1084 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1085 return ATA_DEV_SEMB;
1086 }
1087
1088 DPRINTK("unknown device\n");
1089 return ATA_DEV_UNKNOWN;
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107void ata_id_string(const u16 *id, unsigned char *s,
1108 unsigned int ofs, unsigned int len)
1109{
1110 unsigned int c;
1111
1112 BUG_ON(len & 1);
1113
1114 while (len > 0) {
1115 c = id[ofs] >> 8;
1116 *s = c;
1117 s++;
1118
1119 c = id[ofs] & 0xff;
1120 *s = c;
1121 s++;
1122
1123 ofs++;
1124 len -= 2;
1125 }
1126}
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142void ata_id_c_string(const u16 *id, unsigned char *s,
1143 unsigned int ofs, unsigned int len)
1144{
1145 unsigned char *p;
1146
1147 ata_id_string(id, s, ofs, len - 1);
1148
1149 p = s + strnlen(s, len - 1);
1150 while (p > s && p[-1] == ' ')
1151 p--;
1152 *p = '\0';
1153}
1154
1155static u64 ata_id_n_sectors(const u16 *id)
1156{
1157 if (ata_id_has_lba(id)) {
1158 if (ata_id_has_lba48(id))
1159 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1160 else
1161 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1162 } else {
1163 if (ata_id_current_chs_valid(id))
1164 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1165 id[ATA_ID_CUR_SECTORS];
1166 else
1167 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1168 id[ATA_ID_SECTORS];
1169 }
1170}
1171
1172u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1173{
1174 u64 sectors = 0;
1175
1176 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1177 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1178 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1179 sectors |= (tf->lbah & 0xff) << 16;
1180 sectors |= (tf->lbam & 0xff) << 8;
1181 sectors |= (tf->lbal & 0xff);
1182
1183 return sectors;
1184}
1185
1186u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1187{
1188 u64 sectors = 0;
1189
1190 sectors |= (tf->device & 0x0f) << 24;
1191 sectors |= (tf->lbah & 0xff) << 16;
1192 sectors |= (tf->lbam & 0xff) << 8;
1193 sectors |= (tf->lbal & 0xff);
1194
1195 return sectors;
1196}
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1211{
1212 unsigned int err_mask;
1213 struct ata_taskfile tf;
1214 int lba48 = ata_id_has_lba48(dev->id);
1215
1216 ata_tf_init(dev, &tf);
1217
1218
1219 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1220
1221 if (lba48) {
1222 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1223 tf.flags |= ATA_TFLAG_LBA48;
1224 } else
1225 tf.command = ATA_CMD_READ_NATIVE_MAX;
1226
1227 tf.protocol |= ATA_PROT_NODATA;
1228 tf.device |= ATA_LBA;
1229
1230 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1231 if (err_mask) {
1232 ata_dev_warn(dev,
1233 "failed to read native max address (err_mask=0x%x)\n",
1234 err_mask);
1235 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1236 return -EACCES;
1237 return -EIO;
1238 }
1239
1240 if (lba48)
1241 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1242 else
1243 *max_sectors = ata_tf_to_lba(&tf) + 1;
1244 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1245 (*max_sectors)--;
1246 return 0;
1247}
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1262{
1263 unsigned int err_mask;
1264 struct ata_taskfile tf;
1265 int lba48 = ata_id_has_lba48(dev->id);
1266
1267 new_sectors--;
1268
1269 ata_tf_init(dev, &tf);
1270
1271 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1272
1273 if (lba48) {
1274 tf.command = ATA_CMD_SET_MAX_EXT;
1275 tf.flags |= ATA_TFLAG_LBA48;
1276
1277 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1278 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1279 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1280 } else {
1281 tf.command = ATA_CMD_SET_MAX;
1282
1283 tf.device |= (new_sectors >> 24) & 0xf;
1284 }
1285
1286 tf.protocol |= ATA_PROT_NODATA;
1287 tf.device |= ATA_LBA;
1288
1289 tf.lbal = (new_sectors >> 0) & 0xff;
1290 tf.lbam = (new_sectors >> 8) & 0xff;
1291 tf.lbah = (new_sectors >> 16) & 0xff;
1292
1293 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1294 if (err_mask) {
1295 ata_dev_warn(dev,
1296 "failed to set max address (err_mask=0x%x)\n",
1297 err_mask);
1298 if (err_mask == AC_ERR_DEV &&
1299 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1300 return -EACCES;
1301 return -EIO;
1302 }
1303
1304 return 0;
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318static int ata_hpa_resize(struct ata_device *dev)
1319{
1320 struct ata_eh_context *ehc = &dev->link->eh_context;
1321 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1322 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1323 u64 sectors = ata_id_n_sectors(dev->id);
1324 u64 native_sectors;
1325 int rc;
1326
1327
1328 if (dev->class != ATA_DEV_ATA ||
1329 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1330 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1331 return 0;
1332
1333
1334 rc = ata_read_native_max_address(dev, &native_sectors);
1335 if (rc) {
1336
1337
1338
1339 if (rc == -EACCES || !unlock_hpa) {
1340 ata_dev_warn(dev,
1341 "HPA support seems broken, skipping HPA handling\n");
1342 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1343
1344
1345 if (rc == -EACCES)
1346 rc = 0;
1347 }
1348
1349 return rc;
1350 }
1351 dev->n_native_sectors = native_sectors;
1352
1353
1354 if (native_sectors <= sectors || !unlock_hpa) {
1355 if (!print_info || native_sectors == sectors)
1356 return 0;
1357
1358 if (native_sectors > sectors)
1359 ata_dev_info(dev,
1360 "HPA detected: current %llu, native %llu\n",
1361 (unsigned long long)sectors,
1362 (unsigned long long)native_sectors);
1363 else if (native_sectors < sectors)
1364 ata_dev_warn(dev,
1365 "native sectors (%llu) is smaller than sectors (%llu)\n",
1366 (unsigned long long)native_sectors,
1367 (unsigned long long)sectors);
1368 return 0;
1369 }
1370
1371
1372 rc = ata_set_max_sectors(dev, native_sectors);
1373 if (rc == -EACCES) {
1374
1375 ata_dev_warn(dev,
1376 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1377 (unsigned long long)sectors,
1378 (unsigned long long)native_sectors);
1379 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1380 return 0;
1381 } else if (rc)
1382 return rc;
1383
1384
1385 rc = ata_dev_reread_id(dev, 0);
1386 if (rc) {
1387 ata_dev_err(dev,
1388 "failed to re-read IDENTIFY data after HPA resizing\n");
1389 return rc;
1390 }
1391
1392 if (print_info) {
1393 u64 new_sectors = ata_id_n_sectors(dev->id);
1394 ata_dev_info(dev,
1395 "HPA unlocked: %llu -> %llu, native %llu\n",
1396 (unsigned long long)sectors,
1397 (unsigned long long)new_sectors,
1398 (unsigned long long)native_sectors);
1399 }
1400
1401 return 0;
1402}
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415static inline void ata_dump_id(const u16 *id)
1416{
1417 DPRINTK("49==0x%04x "
1418 "53==0x%04x "
1419 "63==0x%04x "
1420 "64==0x%04x "
1421 "75==0x%04x \n",
1422 id[49],
1423 id[53],
1424 id[63],
1425 id[64],
1426 id[75]);
1427 DPRINTK("80==0x%04x "
1428 "81==0x%04x "
1429 "82==0x%04x "
1430 "83==0x%04x "
1431 "84==0x%04x \n",
1432 id[80],
1433 id[81],
1434 id[82],
1435 id[83],
1436 id[84]);
1437 DPRINTK("88==0x%04x "
1438 "93==0x%04x\n",
1439 id[88],
1440 id[93]);
1441}
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458unsigned long ata_id_xfermask(const u16 *id)
1459{
1460 unsigned long pio_mask, mwdma_mask, udma_mask;
1461
1462
1463 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1464 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1465 pio_mask <<= 3;
1466 pio_mask |= 0x7;
1467 } else {
1468
1469
1470
1471
1472 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1473 if (mode < 5)
1474 pio_mask = (2 << mode) - 1;
1475 else
1476 pio_mask = 1;
1477
1478
1479
1480
1481
1482
1483
1484 }
1485
1486 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1487
1488 if (ata_id_is_cfa(id)) {
1489
1490
1491
1492 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1493 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1494
1495 if (pio)
1496 pio_mask |= (1 << 5);
1497 if (pio > 1)
1498 pio_mask |= (1 << 6);
1499 if (dma)
1500 mwdma_mask |= (1 << 3);
1501 if (dma > 1)
1502 mwdma_mask |= (1 << 4);
1503 }
1504
1505 udma_mask = 0;
1506 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1507 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1508
1509 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1510}
1511
1512static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1513{
1514 struct completion *waiting = qc->private_data;
1515
1516 complete(waiting);
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541unsigned ata_exec_internal_sg(struct ata_device *dev,
1542 struct ata_taskfile *tf, const u8 *cdb,
1543 int dma_dir, struct scatterlist *sgl,
1544 unsigned int n_elem, unsigned long timeout)
1545{
1546 struct ata_link *link = dev->link;
1547 struct ata_port *ap = link->ap;
1548 u8 command = tf->command;
1549 int auto_timeout = 0;
1550 struct ata_queued_cmd *qc;
1551 unsigned int tag, preempted_tag;
1552 u32 preempted_sactive, preempted_qc_active;
1553 int preempted_nr_active_links;
1554 DECLARE_COMPLETION_ONSTACK(wait);
1555 unsigned long flags;
1556 unsigned int err_mask;
1557 int rc;
1558
1559 spin_lock_irqsave(ap->lock, flags);
1560
1561
1562 if (ap->pflags & ATA_PFLAG_FROZEN) {
1563 spin_unlock_irqrestore(ap->lock, flags);
1564 return AC_ERR_SYSTEM;
1565 }
1566
1567
1568
1569
1570
1571
1572
1573
1574 if (ap->ops->error_handler)
1575 tag = ATA_TAG_INTERNAL;
1576 else
1577 tag = 0;
1578
1579 if (test_and_set_bit(tag, &ap->qc_allocated))
1580 BUG();
1581 qc = __ata_qc_from_tag(ap, tag);
1582
1583 qc->tag = tag;
1584 qc->scsicmd = NULL;
1585 qc->ap = ap;
1586 qc->dev = dev;
1587 ata_qc_reinit(qc);
1588
1589 preempted_tag = link->active_tag;
1590 preempted_sactive = link->sactive;
1591 preempted_qc_active = ap->qc_active;
1592 preempted_nr_active_links = ap->nr_active_links;
1593 link->active_tag = ATA_TAG_POISON;
1594 link->sactive = 0;
1595 ap->qc_active = 0;
1596 ap->nr_active_links = 0;
1597
1598
1599 qc->tf = *tf;
1600 if (cdb)
1601 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1602 qc->flags |= ATA_QCFLAG_RESULT_TF;
1603 qc->dma_dir = dma_dir;
1604 if (dma_dir != DMA_NONE) {
1605 unsigned int i, buflen = 0;
1606 struct scatterlist *sg;
1607
1608 for_each_sg(sgl, sg, n_elem, i)
1609 buflen += sg->length;
1610
1611 ata_sg_init(qc, sgl, n_elem);
1612 qc->nbytes = buflen;
1613 }
1614
1615 qc->private_data = &wait;
1616 qc->complete_fn = ata_qc_complete_internal;
1617
1618 ata_qc_issue(qc);
1619
1620 spin_unlock_irqrestore(ap->lock, flags);
1621
1622 if (!timeout) {
1623 if (ata_probe_timeout)
1624 timeout = ata_probe_timeout * 1000;
1625 else {
1626 timeout = ata_internal_cmd_timeout(dev, command);
1627 auto_timeout = 1;
1628 }
1629 }
1630
1631 if (ap->ops->error_handler)
1632 ata_eh_release(ap);
1633
1634 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1635
1636 if (ap->ops->error_handler)
1637 ata_eh_acquire(ap);
1638
1639 ata_sff_flush_pio_task(ap);
1640
1641 if (!rc) {
1642 spin_lock_irqsave(ap->lock, flags);
1643
1644
1645
1646
1647
1648
1649 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1650 qc->err_mask |= AC_ERR_TIMEOUT;
1651
1652 if (ap->ops->error_handler)
1653 ata_port_freeze(ap);
1654 else
1655 ata_qc_complete(qc);
1656
1657 if (ata_msg_warn(ap))
1658 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1659 command);
1660 }
1661
1662 spin_unlock_irqrestore(ap->lock, flags);
1663 }
1664
1665
1666 if (ap->ops->post_internal_cmd)
1667 ap->ops->post_internal_cmd(qc);
1668
1669
1670 if (qc->flags & ATA_QCFLAG_FAILED) {
1671 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1672 qc->err_mask |= AC_ERR_DEV;
1673
1674 if (!qc->err_mask)
1675 qc->err_mask |= AC_ERR_OTHER;
1676
1677 if (qc->err_mask & ~AC_ERR_OTHER)
1678 qc->err_mask &= ~AC_ERR_OTHER;
1679 }
1680
1681
1682 spin_lock_irqsave(ap->lock, flags);
1683
1684 *tf = qc->result_tf;
1685 err_mask = qc->err_mask;
1686
1687 ata_qc_free(qc);
1688 link->active_tag = preempted_tag;
1689 link->sactive = preempted_sactive;
1690 ap->qc_active = preempted_qc_active;
1691 ap->nr_active_links = preempted_nr_active_links;
1692
1693 spin_unlock_irqrestore(ap->lock, flags);
1694
1695 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1696 ata_internal_cmd_timed_out(dev, command);
1697
1698 return err_mask;
1699}
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720unsigned ata_exec_internal(struct ata_device *dev,
1721 struct ata_taskfile *tf, const u8 *cdb,
1722 int dma_dir, void *buf, unsigned int buflen,
1723 unsigned long timeout)
1724{
1725 struct scatterlist *psg = NULL, sg;
1726 unsigned int n_elem = 0;
1727
1728 if (dma_dir != DMA_NONE) {
1729 WARN_ON(!buf);
1730 sg_init_one(&sg, buf, buflen);
1731 psg = &sg;
1732 n_elem++;
1733 }
1734
1735 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1736 timeout);
1737}
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1754{
1755 struct ata_taskfile tf;
1756
1757 ata_tf_init(dev, &tf);
1758
1759 tf.command = cmd;
1760 tf.flags |= ATA_TFLAG_DEVICE;
1761 tf.protocol = ATA_PROT_NODATA;
1762
1763 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1764}
1765
1766
1767
1768
1769
1770
1771
1772
1773unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1774{
1775
1776
1777
1778
1779 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1780 return 0;
1781
1782
1783
1784 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1785 return 0;
1786
1787 if (ata_id_is_cfa(adev->id)
1788 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1789 return 0;
1790
1791 if (adev->pio_mode > XFER_PIO_2)
1792 return 1;
1793
1794 if (ata_id_has_iordy(adev->id))
1795 return 1;
1796 return 0;
1797}
1798
1799
1800
1801
1802
1803
1804
1805
1806static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1807{
1808
1809 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
1810 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1811
1812 if (pio) {
1813
1814 if (pio > 240)
1815 return 3 << ATA_SHIFT_PIO;
1816 return 7 << ATA_SHIFT_PIO;
1817 }
1818 }
1819 return 3 << ATA_SHIFT_PIO;
1820}
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832unsigned int ata_do_dev_read_id(struct ata_device *dev,
1833 struct ata_taskfile *tf, u16 *id)
1834{
1835 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1836 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1837}
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1861 unsigned int flags, u16 *id)
1862{
1863 struct ata_port *ap = dev->link->ap;
1864 unsigned int class = *p_class;
1865 struct ata_taskfile tf;
1866 unsigned int err_mask = 0;
1867 const char *reason;
1868 bool is_semb = class == ATA_DEV_SEMB;
1869 int may_fallback = 1, tried_spinup = 0;
1870 int rc;
1871
1872 if (ata_msg_ctl(ap))
1873 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1874
1875retry:
1876 ata_tf_init(dev, &tf);
1877
1878 switch (class) {
1879 case ATA_DEV_SEMB:
1880 class = ATA_DEV_ATA;
1881 case ATA_DEV_ATA:
1882 tf.command = ATA_CMD_ID_ATA;
1883 break;
1884 case ATA_DEV_ATAPI:
1885 tf.command = ATA_CMD_ID_ATAPI;
1886 break;
1887 default:
1888 rc = -ENODEV;
1889 reason = "unsupported class";
1890 goto err_out;
1891 }
1892
1893 tf.protocol = ATA_PROT_PIO;
1894
1895
1896
1897
1898 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1899
1900
1901
1902
1903 tf.flags |= ATA_TFLAG_POLLING;
1904
1905 if (ap->ops->read_id)
1906 err_mask = ap->ops->read_id(dev, &tf, id);
1907 else
1908 err_mask = ata_do_dev_read_id(dev, &tf, id);
1909
1910 if (err_mask) {
1911 if (err_mask & AC_ERR_NODEV_HINT) {
1912 ata_dev_dbg(dev, "NODEV after polling detection\n");
1913 return -ENOENT;
1914 }
1915
1916 if (is_semb) {
1917 ata_dev_info(dev,
1918 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1919
1920 *p_class = ATA_DEV_SEMB_UNSUP;
1921 return 0;
1922 }
1923
1924 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1925
1926
1927
1928
1929
1930 if (may_fallback) {
1931 may_fallback = 0;
1932
1933 if (class == ATA_DEV_ATA)
1934 class = ATA_DEV_ATAPI;
1935 else
1936 class = ATA_DEV_ATA;
1937 goto retry;
1938 }
1939
1940
1941
1942
1943
1944 ata_dev_dbg(dev,
1945 "both IDENTIFYs aborted, assuming NODEV\n");
1946 return -ENOENT;
1947 }
1948
1949 rc = -EIO;
1950 reason = "I/O error";
1951 goto err_out;
1952 }
1953
1954 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1955 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1956 "class=%d may_fallback=%d tried_spinup=%d\n",
1957 class, may_fallback, tried_spinup);
1958 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1959 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1960 }
1961
1962
1963
1964
1965 may_fallback = 0;
1966
1967 swap_buf_le16(id, ATA_ID_WORDS);
1968
1969
1970 rc = -EINVAL;
1971 reason = "device reports invalid type";
1972
1973 if (class == ATA_DEV_ATA) {
1974 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1975 goto err_out;
1976 } else {
1977 if (ata_id_is_ata(id))
1978 goto err_out;
1979 }
1980
1981 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1982 tried_spinup = 1;
1983
1984
1985
1986
1987
1988 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1989 if (err_mask && id[2] != 0x738c) {
1990 rc = -EIO;
1991 reason = "SPINUP failed";
1992 goto err_out;
1993 }
1994
1995
1996
1997
1998 if (id[2] == 0x37c8)
1999 goto retry;
2000 }
2001
2002 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2015 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2016 if (err_mask) {
2017 rc = -EIO;
2018 reason = "INIT_DEV_PARAMS failed";
2019 goto err_out;
2020 }
2021
2022
2023
2024
2025 flags &= ~ATA_READID_POSTRESET;
2026 goto retry;
2027 }
2028 }
2029
2030 *p_class = class;
2031
2032 return 0;
2033
2034 err_out:
2035 if (ata_msg_warn(ap))
2036 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2037 reason, err_mask);
2038 return rc;
2039}
2040
2041static int ata_do_link_spd_horkage(struct ata_device *dev)
2042{
2043 struct ata_link *plink = ata_dev_phys_link(dev);
2044 u32 target, target_limit;
2045
2046 if (!sata_scr_valid(plink))
2047 return 0;
2048
2049 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2050 target = 1;
2051 else
2052 return 0;
2053
2054 target_limit = (1 << target) - 1;
2055
2056
2057 if (plink->sata_spd_limit <= target_limit)
2058 return 0;
2059
2060 plink->sata_spd_limit = target_limit;
2061
2062
2063
2064
2065
2066 if (plink->sata_spd > target) {
2067 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2068 sata_spd_string(target));
2069 return -EAGAIN;
2070 }
2071 return 0;
2072}
2073
2074static inline u8 ata_dev_knobble(struct ata_device *dev)
2075{
2076 struct ata_port *ap = dev->link->ap;
2077
2078 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2079 return 0;
2080
2081 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2082}
2083
2084static int ata_dev_config_ncq(struct ata_device *dev,
2085 char *desc, size_t desc_sz)
2086{
2087 struct ata_port *ap = dev->link->ap;
2088 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2089 unsigned int err_mask;
2090 char *aa_desc = "";
2091
2092 if (!ata_id_has_ncq(dev->id)) {
2093 desc[0] = '\0';
2094 return 0;
2095 }
2096 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2097 snprintf(desc, desc_sz, "NCQ (not used)");
2098 return 0;
2099 }
2100 if (ap->flags & ATA_FLAG_NCQ) {
2101 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2102 dev->flags |= ATA_DFLAG_NCQ;
2103 }
2104
2105 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2106 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2107 ata_id_has_fpdma_aa(dev->id)) {
2108 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2109 SATA_FPDMA_AA);
2110 if (err_mask) {
2111 ata_dev_err(dev,
2112 "failed to enable AA (error_mask=0x%x)\n",
2113 err_mask);
2114 if (err_mask != AC_ERR_DEV) {
2115 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2116 return -EIO;
2117 }
2118 } else
2119 aa_desc = ", AA";
2120 }
2121
2122 if (hdepth >= ddepth)
2123 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2124 else
2125 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2126 ddepth, aa_desc);
2127 return 0;
2128}
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143int ata_dev_configure(struct ata_device *dev)
2144{
2145 struct ata_port *ap = dev->link->ap;
2146 struct ata_eh_context *ehc = &dev->link->eh_context;
2147 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2148 const u16 *id = dev->id;
2149 unsigned long xfer_mask;
2150 char revbuf[7];
2151 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2152 char modelbuf[ATA_ID_PROD_LEN+1];
2153 int rc;
2154
2155 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2156 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2157 return 0;
2158 }
2159
2160 if (ata_msg_probe(ap))
2161 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2162
2163
2164 dev->horkage |= ata_dev_blacklisted(dev);
2165 ata_force_horkage(dev);
2166
2167 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2168 ata_dev_info(dev, "unsupported device, disabling\n");
2169 ata_dev_disable(dev);
2170 return 0;
2171 }
2172
2173 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2174 dev->class == ATA_DEV_ATAPI) {
2175 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2176 atapi_enabled ? "not supported with this driver"
2177 : "disabled");
2178 ata_dev_disable(dev);
2179 return 0;
2180 }
2181
2182 rc = ata_do_link_spd_horkage(dev);
2183 if (rc)
2184 return rc;
2185
2186
2187 rc = ata_acpi_on_devcfg(dev);
2188 if (rc)
2189 return rc;
2190
2191
2192 rc = ata_hpa_resize(dev);
2193 if (rc)
2194 return rc;
2195
2196
2197 if (ata_msg_probe(ap))
2198 ata_dev_dbg(dev,
2199 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2200 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2201 __func__,
2202 id[49], id[82], id[83], id[84],
2203 id[85], id[86], id[87], id[88]);
2204
2205
2206 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2207 dev->max_sectors = 0;
2208 dev->cdb_len = 0;
2209 dev->n_sectors = 0;
2210 dev->cylinders = 0;
2211 dev->heads = 0;
2212 dev->sectors = 0;
2213 dev->multi_count = 0;
2214
2215
2216
2217
2218
2219
2220 xfer_mask = ata_id_xfermask(id);
2221
2222 if (ata_msg_probe(ap))
2223 ata_dump_id(id);
2224
2225
2226 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2227 sizeof(fwrevbuf));
2228
2229 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2230 sizeof(modelbuf));
2231
2232
2233 if (dev->class == ATA_DEV_ATA) {
2234 if (ata_id_is_cfa(id)) {
2235
2236 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2237 ata_dev_warn(dev,
2238 "supports DRM functions and may not be fully accessible\n");
2239 snprintf(revbuf, 7, "CFA");
2240 } else {
2241 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2242
2243 if (ata_id_has_tpm(id))
2244 ata_dev_warn(dev,
2245 "supports DRM functions and may not be fully accessible\n");
2246 }
2247
2248 dev->n_sectors = ata_id_n_sectors(id);
2249
2250
2251 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2252 unsigned int max = dev->id[47] & 0xff;
2253 unsigned int cnt = dev->id[59] & 0xff;
2254
2255 if (is_power_of_2(max) && is_power_of_2(cnt))
2256 if (cnt <= max)
2257 dev->multi_count = cnt;
2258 }
2259
2260 if (ata_id_has_lba(id)) {
2261 const char *lba_desc;
2262 char ncq_desc[24];
2263
2264 lba_desc = "LBA";
2265 dev->flags |= ATA_DFLAG_LBA;
2266 if (ata_id_has_lba48(id)) {
2267 dev->flags |= ATA_DFLAG_LBA48;
2268 lba_desc = "LBA48";
2269
2270 if (dev->n_sectors >= (1UL << 28) &&
2271 ata_id_has_flush_ext(id))
2272 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2273 }
2274
2275
2276 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2277 if (rc)
2278 return rc;
2279
2280
2281 if (ata_msg_drv(ap) && print_info) {
2282 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2283 revbuf, modelbuf, fwrevbuf,
2284 ata_mode_string(xfer_mask));
2285 ata_dev_info(dev,
2286 "%llu sectors, multi %u: %s %s\n",
2287 (unsigned long long)dev->n_sectors,
2288 dev->multi_count, lba_desc, ncq_desc);
2289 }
2290 } else {
2291
2292
2293
2294 dev->cylinders = id[1];
2295 dev->heads = id[3];
2296 dev->sectors = id[6];
2297
2298 if (ata_id_current_chs_valid(id)) {
2299
2300 dev->cylinders = id[54];
2301 dev->heads = id[55];
2302 dev->sectors = id[56];
2303 }
2304
2305
2306 if (ata_msg_drv(ap) && print_info) {
2307 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2308 revbuf, modelbuf, fwrevbuf,
2309 ata_mode_string(xfer_mask));
2310 ata_dev_info(dev,
2311 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2312 (unsigned long long)dev->n_sectors,
2313 dev->multi_count, dev->cylinders,
2314 dev->heads, dev->sectors);
2315 }
2316 }
2317
2318 dev->cdb_len = 16;
2319 }
2320
2321
2322 else if (dev->class == ATA_DEV_ATAPI) {
2323 const char *cdb_intr_string = "";
2324 const char *atapi_an_string = "";
2325 const char *dma_dir_string = "";
2326 u32 sntf;
2327
2328 rc = atapi_cdb_len(id);
2329 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2330 if (ata_msg_warn(ap))
2331 ata_dev_warn(dev, "unsupported CDB len\n");
2332 rc = -EINVAL;
2333 goto err_out_nosup;
2334 }
2335 dev->cdb_len = (unsigned int) rc;
2336
2337
2338
2339
2340
2341
2342 if (atapi_an &&
2343 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2344 (!sata_pmp_attached(ap) ||
2345 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2346 unsigned int err_mask;
2347
2348
2349 err_mask = ata_dev_set_feature(dev,
2350 SETFEATURES_SATA_ENABLE, SATA_AN);
2351 if (err_mask)
2352 ata_dev_err(dev,
2353 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2354 err_mask);
2355 else {
2356 dev->flags |= ATA_DFLAG_AN;
2357 atapi_an_string = ", ATAPI AN";
2358 }
2359 }
2360
2361 if (ata_id_cdb_intr(dev->id)) {
2362 dev->flags |= ATA_DFLAG_CDB_INTR;
2363 cdb_intr_string = ", CDB intr";
2364 }
2365
2366 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2367 dev->flags |= ATA_DFLAG_DMADIR;
2368 dma_dir_string = ", DMADIR";
2369 }
2370
2371
2372 if (ata_msg_drv(ap) && print_info)
2373 ata_dev_info(dev,
2374 "ATAPI: %s, %s, max %s%s%s%s\n",
2375 modelbuf, fwrevbuf,
2376 ata_mode_string(xfer_mask),
2377 cdb_intr_string, atapi_an_string,
2378 dma_dir_string);
2379 }
2380
2381
2382 dev->max_sectors = ATA_MAX_SECTORS;
2383 if (dev->flags & ATA_DFLAG_LBA48)
2384 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2385
2386
2387
2388 if (ata_dev_knobble(dev)) {
2389 if (ata_msg_drv(ap) && print_info)
2390 ata_dev_info(dev, "applying bridge limits\n");
2391 dev->udma_mask &= ATA_UDMA5;
2392 dev->max_sectors = ATA_MAX_SECTORS;
2393 }
2394
2395 if ((dev->class == ATA_DEV_ATAPI) &&
2396 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2397 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2398 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2399 }
2400
2401 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2402 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2403 dev->max_sectors);
2404
2405 if (ap->ops->dev_config)
2406 ap->ops->dev_config(dev);
2407
2408 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2409
2410
2411
2412
2413
2414
2415 if (print_info) {
2416 ata_dev_warn(dev,
2417"Drive reports diagnostics failure. This may indicate a drive\n");
2418 ata_dev_warn(dev,
2419"fault or invalid emulation. Contact drive vendor for information.\n");
2420 }
2421 }
2422
2423 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2424 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2425 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2426 }
2427
2428 return 0;
2429
2430err_out_nosup:
2431 if (ata_msg_probe(ap))
2432 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2433 return rc;
2434}
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444int ata_cable_40wire(struct ata_port *ap)
2445{
2446 return ATA_CBL_PATA40;
2447}
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457int ata_cable_80wire(struct ata_port *ap)
2458{
2459 return ATA_CBL_PATA80;
2460}
2461
2462
2463
2464
2465
2466
2467
2468
2469int ata_cable_unknown(struct ata_port *ap)
2470{
2471 return ATA_CBL_PATA_UNK;
2472}
2473
2474
2475
2476
2477
2478
2479
2480
2481int ata_cable_ignore(struct ata_port *ap)
2482{
2483 return ATA_CBL_PATA_IGN;
2484}
2485
2486
2487
2488
2489
2490
2491
2492
2493int ata_cable_sata(struct ata_port *ap)
2494{
2495 return ATA_CBL_SATA;
2496}
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513int ata_bus_probe(struct ata_port *ap)
2514{
2515 unsigned int classes[ATA_MAX_DEVICES];
2516 int tries[ATA_MAX_DEVICES];
2517 int rc;
2518 struct ata_device *dev;
2519
2520 ata_for_each_dev(dev, &ap->link, ALL)
2521 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2522
2523 retry:
2524 ata_for_each_dev(dev, &ap->link, ALL) {
2525
2526
2527
2528
2529
2530
2531
2532 dev->pio_mode = XFER_PIO_0;
2533
2534
2535
2536
2537
2538
2539 if (ap->ops->set_piomode)
2540 ap->ops->set_piomode(ap, dev);
2541 }
2542
2543
2544 ap->ops->phy_reset(ap);
2545
2546 ata_for_each_dev(dev, &ap->link, ALL) {
2547 if (dev->class != ATA_DEV_UNKNOWN)
2548 classes[dev->devno] = dev->class;
2549 else
2550 classes[dev->devno] = ATA_DEV_NONE;
2551
2552 dev->class = ATA_DEV_UNKNOWN;
2553 }
2554
2555
2556
2557
2558
2559 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2560 if (tries[dev->devno])
2561 dev->class = classes[dev->devno];
2562
2563 if (!ata_dev_enabled(dev))
2564 continue;
2565
2566 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2567 dev->id);
2568 if (rc)
2569 goto fail;
2570 }
2571
2572
2573 if (ap->ops->cable_detect)
2574 ap->cbl = ap->ops->cable_detect(ap);
2575
2576
2577
2578
2579
2580
2581 ata_for_each_dev(dev, &ap->link, ENABLED)
2582 if (ata_id_is_sata(dev->id))
2583 ap->cbl = ATA_CBL_SATA;
2584
2585
2586
2587
2588 ata_for_each_dev(dev, &ap->link, ENABLED) {
2589 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2590 rc = ata_dev_configure(dev);
2591 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2592 if (rc)
2593 goto fail;
2594 }
2595
2596
2597 rc = ata_set_mode(&ap->link, &dev);
2598 if (rc)
2599 goto fail;
2600
2601 ata_for_each_dev(dev, &ap->link, ENABLED)
2602 return 0;
2603
2604 return -ENODEV;
2605
2606 fail:
2607 tries[dev->devno]--;
2608
2609 switch (rc) {
2610 case -EINVAL:
2611
2612 tries[dev->devno] = 0;
2613 break;
2614
2615 case -ENODEV:
2616
2617 tries[dev->devno] = min(tries[dev->devno], 1);
2618 case -EIO:
2619 if (tries[dev->devno] == 1) {
2620
2621
2622
2623 sata_down_spd_limit(&ap->link, 0);
2624 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2625 }
2626 }
2627
2628 if (!tries[dev->devno])
2629 ata_dev_disable(dev);
2630
2631 goto retry;
2632}
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643static void sata_print_link_status(struct ata_link *link)
2644{
2645 u32 sstatus, scontrol, tmp;
2646
2647 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2648 return;
2649 sata_scr_read(link, SCR_CONTROL, &scontrol);
2650
2651 if (ata_phys_link_online(link)) {
2652 tmp = (sstatus >> 4) & 0xf;
2653 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2654 sata_spd_string(tmp), sstatus, scontrol);
2655 } else {
2656 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2657 sstatus, scontrol);
2658 }
2659}
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669struct ata_device *ata_dev_pair(struct ata_device *adev)
2670{
2671 struct ata_link *link = adev->link;
2672 struct ata_device *pair = &link->device[1 - adev->devno];
2673 if (!ata_dev_enabled(pair))
2674 return NULL;
2675 return pair;
2676}
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2699{
2700 u32 sstatus, spd, mask;
2701 int rc, bit;
2702
2703 if (!sata_scr_valid(link))
2704 return -EOPNOTSUPP;
2705
2706
2707
2708
2709 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2710 if (rc == 0 && ata_sstatus_online(sstatus))
2711 spd = (sstatus >> 4) & 0xf;
2712 else
2713 spd = link->sata_spd;
2714
2715 mask = link->sata_spd_limit;
2716 if (mask <= 1)
2717 return -EINVAL;
2718
2719
2720 bit = fls(mask) - 1;
2721 mask &= ~(1 << bit);
2722
2723
2724
2725
2726 if (spd > 1)
2727 mask &= (1 << (spd - 1)) - 1;
2728 else
2729 mask &= 1;
2730
2731
2732 if (!mask)
2733 return -EINVAL;
2734
2735 if (spd_limit) {
2736 if (mask & ((1 << spd_limit) - 1))
2737 mask &= (1 << spd_limit) - 1;
2738 else {
2739 bit = ffs(mask) - 1;
2740 mask = 1 << bit;
2741 }
2742 }
2743
2744 link->sata_spd_limit = mask;
2745
2746 ata_link_warn(link, "limiting SATA link speed to %s\n",
2747 sata_spd_string(fls(mask)));
2748
2749 return 0;
2750}
2751
2752static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2753{
2754 struct ata_link *host_link = &link->ap->link;
2755 u32 limit, target, spd;
2756
2757 limit = link->sata_spd_limit;
2758
2759
2760
2761
2762
2763 if (!ata_is_host_link(link) && host_link->sata_spd)
2764 limit &= (1 << host_link->sata_spd) - 1;
2765
2766 if (limit == UINT_MAX)
2767 target = 0;
2768 else
2769 target = fls(limit);
2770
2771 spd = (*scontrol >> 4) & 0xf;
2772 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2773
2774 return spd != target;
2775}
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792static int sata_set_spd_needed(struct ata_link *link)
2793{
2794 u32 scontrol;
2795
2796 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2797 return 1;
2798
2799 return __sata_set_spd_needed(link, &scontrol);
2800}
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815int sata_set_spd(struct ata_link *link)
2816{
2817 u32 scontrol;
2818 int rc;
2819
2820 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2821 return rc;
2822
2823 if (!__sata_set_spd_needed(link, &scontrol))
2824 return 0;
2825
2826 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2827 return rc;
2828
2829 return 1;
2830}
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844static const struct ata_timing ata_timing[] = {
2845
2846 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2847 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2848 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2849 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2850 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2851 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2852 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2853
2854 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2855 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2856 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2857
2858 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2859 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2860 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2861 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2862 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2863
2864
2865 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2866 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2867 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2868 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2869 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2870 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2871 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
2872
2873 { 0xFF }
2874};
2875
2876#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2877#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2878
2879static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2880{
2881 q->setup = EZ(t->setup * 1000, T);
2882 q->act8b = EZ(t->act8b * 1000, T);
2883 q->rec8b = EZ(t->rec8b * 1000, T);
2884 q->cyc8b = EZ(t->cyc8b * 1000, T);
2885 q->active = EZ(t->active * 1000, T);
2886 q->recover = EZ(t->recover * 1000, T);
2887 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2888 q->cycle = EZ(t->cycle * 1000, T);
2889 q->udma = EZ(t->udma * 1000, UT);
2890}
2891
2892void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2893 struct ata_timing *m, unsigned int what)
2894{
2895 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2896 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2897 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2898 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2899 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2900 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2901 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2902 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2903 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2904}
2905
2906const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2907{
2908 const struct ata_timing *t = ata_timing;
2909
2910 while (xfer_mode > t->mode)
2911 t++;
2912
2913 if (xfer_mode == t->mode)
2914 return t;
2915 return NULL;
2916}
2917
2918int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2919 struct ata_timing *t, int T, int UT)
2920{
2921 const u16 *id = adev->id;
2922 const struct ata_timing *s;
2923 struct ata_timing p;
2924
2925
2926
2927
2928
2929 if (!(s = ata_timing_find_mode(speed)))
2930 return -EINVAL;
2931
2932 memcpy(t, s, sizeof(*s));
2933
2934
2935
2936
2937
2938
2939 if (id[ATA_ID_FIELD_VALID] & 2) {
2940 memset(&p, 0, sizeof(p));
2941
2942 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
2943 if (speed <= XFER_PIO_2)
2944 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
2945 else if ((speed <= XFER_PIO_4) ||
2946 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
2947 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
2948 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
2949 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
2950
2951 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2952 }
2953
2954
2955
2956
2957
2958 ata_timing_quantize(t, t, T, UT);
2959
2960
2961
2962
2963
2964
2965
2966 if (speed > XFER_PIO_6) {
2967 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2968 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2969 }
2970
2971
2972
2973
2974
2975 if (t->act8b + t->rec8b < t->cyc8b) {
2976 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2977 t->rec8b = t->cyc8b - t->act8b;
2978 }
2979
2980 if (t->active + t->recover < t->cycle) {
2981 t->active += (t->cycle - (t->active + t->recover)) / 2;
2982 t->recover = t->cycle - t->active;
2983 }
2984
2985
2986
2987
2988 if (t->active + t->recover > t->cycle)
2989 t->cycle = t->active + t->recover;
2990
2991 return 0;
2992}
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3011{
3012 u8 base_mode = 0xff, last_mode = 0xff;
3013 const struct ata_xfer_ent *ent;
3014 const struct ata_timing *t;
3015
3016 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3017 if (ent->shift == xfer_shift)
3018 base_mode = ent->base;
3019
3020 for (t = ata_timing_find_mode(base_mode);
3021 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3022 unsigned short this_cycle;
3023
3024 switch (xfer_shift) {
3025 case ATA_SHIFT_PIO:
3026 case ATA_SHIFT_MWDMA:
3027 this_cycle = t->cycle;
3028 break;
3029 case ATA_SHIFT_UDMA:
3030 this_cycle = t->udma;
3031 break;
3032 default:
3033 return 0xff;
3034 }
3035
3036 if (cycle > this_cycle)
3037 break;
3038
3039 last_mode = t->mode;
3040 }
3041
3042 return last_mode;
3043}
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3061{
3062 char buf[32];
3063 unsigned long orig_mask, xfer_mask;
3064 unsigned long pio_mask, mwdma_mask, udma_mask;
3065 int quiet, highbit;
3066
3067 quiet = !!(sel & ATA_DNXFER_QUIET);
3068 sel &= ~ATA_DNXFER_QUIET;
3069
3070 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3071 dev->mwdma_mask,
3072 dev->udma_mask);
3073 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3074
3075 switch (sel) {
3076 case ATA_DNXFER_PIO:
3077 highbit = fls(pio_mask) - 1;
3078 pio_mask &= ~(1 << highbit);
3079 break;
3080
3081 case ATA_DNXFER_DMA:
3082 if (udma_mask) {
3083 highbit = fls(udma_mask) - 1;
3084 udma_mask &= ~(1 << highbit);
3085 if (!udma_mask)
3086 return -ENOENT;
3087 } else if (mwdma_mask) {
3088 highbit = fls(mwdma_mask) - 1;
3089 mwdma_mask &= ~(1 << highbit);
3090 if (!mwdma_mask)
3091 return -ENOENT;
3092 }
3093 break;
3094
3095 case ATA_DNXFER_40C:
3096 udma_mask &= ATA_UDMA_MASK_40C;
3097 break;
3098
3099 case ATA_DNXFER_FORCE_PIO0:
3100 pio_mask &= 1;
3101 case ATA_DNXFER_FORCE_PIO:
3102 mwdma_mask = 0;
3103 udma_mask = 0;
3104 break;
3105
3106 default:
3107 BUG();
3108 }
3109
3110 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3111
3112 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3113 return -ENOENT;
3114
3115 if (!quiet) {
3116 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3117 snprintf(buf, sizeof(buf), "%s:%s",
3118 ata_mode_string(xfer_mask),
3119 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3120 else
3121 snprintf(buf, sizeof(buf), "%s",
3122 ata_mode_string(xfer_mask));
3123
3124 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3125 }
3126
3127 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3128 &dev->udma_mask);
3129
3130 return 0;
3131}
3132
3133static int ata_dev_set_mode(struct ata_device *dev)
3134{
3135 struct ata_port *ap = dev->link->ap;
3136 struct ata_eh_context *ehc = &dev->link->eh_context;
3137 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3138 const char *dev_err_whine = "";
3139 int ign_dev_err = 0;
3140 unsigned int err_mask = 0;
3141 int rc;
3142
3143 dev->flags &= ~ATA_DFLAG_PIO;
3144 if (dev->xfer_shift == ATA_SHIFT_PIO)
3145 dev->flags |= ATA_DFLAG_PIO;
3146
3147 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3148 dev_err_whine = " (SET_XFERMODE skipped)";
3149 else {
3150 if (nosetxfer)
3151 ata_dev_warn(dev,
3152 "NOSETXFER but PATA detected - can't "
3153 "skip SETXFER, might malfunction\n");
3154 err_mask = ata_dev_set_xfermode(dev);
3155 }
3156
3157 if (err_mask & ~AC_ERR_DEV)
3158 goto fail;
3159
3160
3161 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3162 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3163 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3164 if (rc)
3165 return rc;
3166
3167 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3168
3169 if (ata_id_is_cfa(dev->id))
3170 ign_dev_err = 1;
3171
3172
3173 if (ata_id_major_version(dev->id) == 0 &&
3174 dev->pio_mode <= XFER_PIO_2)
3175 ign_dev_err = 1;
3176
3177
3178
3179 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3180 ign_dev_err = 1;
3181 }
3182
3183
3184 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3185 dev->dma_mode == XFER_MW_DMA_0 &&
3186 (dev->id[63] >> 8) & 1)
3187 ign_dev_err = 1;
3188
3189
3190 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3191 ign_dev_err = 1;
3192
3193 if (err_mask & AC_ERR_DEV) {
3194 if (!ign_dev_err)
3195 goto fail;
3196 else
3197 dev_err_whine = " (device error ignored)";
3198 }
3199
3200 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3201 dev->xfer_shift, (int)dev->xfer_mode);
3202
3203 ata_dev_info(dev, "configured for %s%s\n",
3204 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3205 dev_err_whine);
3206
3207 return 0;
3208
3209 fail:
3210 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3211 return -EIO;
3212}
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3232{
3233 struct ata_port *ap = link->ap;
3234 struct ata_device *dev;
3235 int rc = 0, used_dma = 0, found = 0;
3236
3237
3238 ata_for_each_dev(dev, link, ENABLED) {
3239 unsigned long pio_mask, dma_mask;
3240 unsigned int mode_mask;
3241
3242 mode_mask = ATA_DMA_MASK_ATA;
3243 if (dev->class == ATA_DEV_ATAPI)
3244 mode_mask = ATA_DMA_MASK_ATAPI;
3245 else if (ata_id_is_cfa(dev->id))
3246 mode_mask = ATA_DMA_MASK_CFA;
3247
3248 ata_dev_xfermask(dev);
3249 ata_force_xfermask(dev);
3250
3251 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3252
3253 if (libata_dma_mask & mode_mask)
3254 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3255 dev->udma_mask);
3256 else
3257 dma_mask = 0;
3258
3259 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3260 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3261
3262 found = 1;
3263 if (ata_dma_enabled(dev))
3264 used_dma = 1;
3265 }
3266 if (!found)
3267 goto out;
3268
3269
3270 ata_for_each_dev(dev, link, ENABLED) {
3271 if (dev->pio_mode == 0xff) {
3272 ata_dev_warn(dev, "no PIO support\n");
3273 rc = -EINVAL;
3274 goto out;
3275 }
3276
3277 dev->xfer_mode = dev->pio_mode;
3278 dev->xfer_shift = ATA_SHIFT_PIO;
3279 if (ap->ops->set_piomode)
3280 ap->ops->set_piomode(ap, dev);
3281 }
3282
3283
3284 ata_for_each_dev(dev, link, ENABLED) {
3285 if (!ata_dma_enabled(dev))
3286 continue;
3287
3288 dev->xfer_mode = dev->dma_mode;
3289 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3290 if (ap->ops->set_dmamode)
3291 ap->ops->set_dmamode(ap, dev);
3292 }
3293
3294
3295 ata_for_each_dev(dev, link, ENABLED) {
3296 rc = ata_dev_set_mode(dev);
3297 if (rc)
3298 goto out;
3299 }
3300
3301
3302
3303
3304 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3305 ap->host->simplex_claimed = ap;
3306
3307 out:
3308 if (rc)
3309 *r_failed_dev = dev;
3310 return rc;
3311}
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3334 int (*check_ready)(struct ata_link *link))
3335{
3336 unsigned long start = jiffies;
3337 unsigned long nodev_deadline;
3338 int warned = 0;
3339
3340
3341 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3342 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3343 else
3344 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3345
3346
3347
3348
3349
3350 WARN_ON(link == link->ap->slave_link);
3351
3352 if (time_after(nodev_deadline, deadline))
3353 nodev_deadline = deadline;
3354
3355 while (1) {
3356 unsigned long now = jiffies;
3357 int ready, tmp;
3358
3359 ready = tmp = check_ready(link);
3360 if (ready > 0)
3361 return 0;
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374 if (ready == -ENODEV) {
3375 if (ata_link_online(link))
3376 ready = 0;
3377 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3378 !ata_link_offline(link) &&
3379 time_before(now, nodev_deadline))
3380 ready = 0;
3381 }
3382
3383 if (ready)
3384 return ready;
3385 if (time_after(now, deadline))
3386 return -EBUSY;
3387
3388 if (!warned && time_after(now, start + 5 * HZ) &&
3389 (deadline - now > 3 * HZ)) {
3390 ata_link_warn(link,
3391 "link is slow to respond, please be patient "
3392 "(ready=%d)\n", tmp);
3393 warned = 1;
3394 }
3395
3396 ata_msleep(link->ap, 50);
3397 }
3398}
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3415 int (*check_ready)(struct ata_link *link))
3416{
3417 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3418
3419 return ata_wait_ready(link, deadline, check_ready);
3420}
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3445 unsigned long deadline)
3446{
3447 unsigned long interval = params[0];
3448 unsigned long duration = params[1];
3449 unsigned long last_jiffies, t;
3450 u32 last, cur;
3451 int rc;
3452
3453 t = ata_deadline(jiffies, params[2]);
3454 if (time_before(t, deadline))
3455 deadline = t;
3456
3457 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3458 return rc;
3459 cur &= 0xf;
3460
3461 last = cur;
3462 last_jiffies = jiffies;
3463
3464 while (1) {
3465 ata_msleep(link->ap, interval);
3466 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3467 return rc;
3468 cur &= 0xf;
3469
3470
3471 if (cur == last) {
3472 if (cur == 1 && time_before(jiffies, deadline))
3473 continue;
3474 if (time_after(jiffies,
3475 ata_deadline(last_jiffies, duration)))
3476 return 0;
3477 continue;
3478 }
3479
3480
3481 last = cur;
3482 last_jiffies = jiffies;
3483
3484
3485
3486
3487 if (time_after(jiffies, deadline))
3488 return -EPIPE;
3489 }
3490}
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506int sata_link_resume(struct ata_link *link, const unsigned long *params,
3507 unsigned long deadline)
3508{
3509 int tries = ATA_LINK_RESUME_TRIES;
3510 u32 scontrol, serror;
3511 int rc;
3512
3513 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3514 return rc;
3515
3516
3517
3518
3519
3520
3521 do {
3522 scontrol = (scontrol & 0x0f0) | 0x300;
3523 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3524 return rc;
3525
3526
3527
3528
3529
3530 ata_msleep(link->ap, 200);
3531
3532
3533 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3534 return rc;
3535 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3536
3537 if ((scontrol & 0xf0f) != 0x300) {
3538 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3539 scontrol);
3540 return 0;
3541 }
3542
3543 if (tries < ATA_LINK_RESUME_TRIES)
3544 ata_link_warn(link, "link resume succeeded after %d retries\n",
3545 ATA_LINK_RESUME_TRIES - tries);
3546
3547 if ((rc = sata_link_debounce(link, params, deadline)))
3548 return rc;
3549
3550
3551 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3552 rc = sata_scr_write(link, SCR_ERROR, serror);
3553
3554 return rc != -EINVAL ? rc : 0;
3555}
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3576 bool spm_wakeup)
3577{
3578 struct ata_eh_context *ehc = &link->eh_context;
3579 bool woken_up = false;
3580 u32 scontrol;
3581 int rc;
3582
3583 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3584 if (rc)
3585 return rc;
3586
3587 switch (policy) {
3588 case ATA_LPM_MAX_POWER:
3589
3590 scontrol |= (0x3 << 8);
3591
3592 if (spm_wakeup) {
3593 scontrol |= (0x4 << 12);
3594 woken_up = true;
3595 }
3596 break;
3597 case ATA_LPM_MED_POWER:
3598
3599 scontrol &= ~(0x1 << 8);
3600 scontrol |= (0x2 << 8);
3601 break;
3602 case ATA_LPM_MIN_POWER:
3603 if (ata_link_nr_enabled(link) > 0)
3604
3605 scontrol &= ~(0x3 << 8);
3606 else {
3607
3608 scontrol &= ~0xf;
3609 scontrol |= (0x1 << 2);
3610 }
3611 break;
3612 default:
3613 WARN_ON(1);
3614 }
3615
3616 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3617 if (rc)
3618 return rc;
3619
3620
3621 if (woken_up)
3622 msleep(10);
3623
3624
3625 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3626 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3627}
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3647{
3648 struct ata_port *ap = link->ap;
3649 struct ata_eh_context *ehc = &link->eh_context;
3650 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3651 int rc;
3652
3653
3654 if (ehc->i.action & ATA_EH_HARDRESET)
3655 return 0;
3656
3657
3658 if (ap->flags & ATA_FLAG_SATA) {
3659 rc = sata_link_resume(link, timing, deadline);
3660
3661 if (rc && rc != -EOPNOTSUPP)
3662 ata_link_warn(link,
3663 "failed to resume link for reset (errno=%d)\n",
3664 rc);
3665 }
3666
3667
3668 if (ata_phys_link_offline(link))
3669 ehc->i.action &= ~ATA_EH_SOFTRESET;
3670
3671 return 0;
3672}
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3699 unsigned long deadline,
3700 bool *online, int (*check_ready)(struct ata_link *))
3701{
3702 u32 scontrol;
3703 int rc;
3704
3705 DPRINTK("ENTER\n");
3706
3707 if (online)
3708 *online = false;
3709
3710 if (sata_set_spd_needed(link)) {
3711
3712
3713
3714
3715
3716 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3717 goto out;
3718
3719 scontrol = (scontrol & 0x0f0) | 0x304;
3720
3721 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3722 goto out;
3723
3724 sata_set_spd(link);
3725 }
3726
3727
3728 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3729 goto out;
3730
3731 scontrol = (scontrol & 0x0f0) | 0x301;
3732
3733 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3734 goto out;
3735
3736
3737
3738
3739 ata_msleep(link->ap, 1);
3740
3741
3742 rc = sata_link_resume(link, timing, deadline);
3743 if (rc)
3744 goto out;
3745
3746 if (ata_phys_link_offline(link))
3747 goto out;
3748
3749
3750 if (online)
3751 *online = true;
3752
3753 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3754
3755
3756
3757
3758
3759 if (check_ready) {
3760 unsigned long pmp_deadline;
3761
3762 pmp_deadline = ata_deadline(jiffies,
3763 ATA_TMOUT_PMP_SRST_WAIT);
3764 if (time_after(pmp_deadline, deadline))
3765 pmp_deadline = deadline;
3766 ata_wait_ready(link, pmp_deadline, check_ready);
3767 }
3768 rc = -EAGAIN;
3769 goto out;
3770 }
3771
3772 rc = 0;
3773 if (check_ready)
3774 rc = ata_wait_ready(link, deadline, check_ready);
3775 out:
3776 if (rc && rc != -EAGAIN) {
3777
3778 if (online)
3779 *online = false;
3780 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3781 }
3782 DPRINTK("EXIT, rc=%d\n", rc);
3783 return rc;
3784}
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3801 unsigned long deadline)
3802{
3803 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3804 bool online;
3805 int rc;
3806
3807
3808 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3809 return online ? -EAGAIN : rc;
3810}
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3825{
3826 u32 serror;
3827
3828 DPRINTK("ENTER\n");
3829
3830
3831 if (!sata_scr_read(link, SCR_ERROR, &serror))
3832 sata_scr_write(link, SCR_ERROR, serror);
3833
3834
3835 sata_print_link_status(link);
3836
3837 DPRINTK("EXIT\n");
3838}
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3857 const u16 *new_id)
3858{
3859 const u16 *old_id = dev->id;
3860 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3861 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3862
3863 if (dev->class != new_class) {
3864 ata_dev_info(dev, "class mismatch %d != %d\n",
3865 dev->class, new_class);
3866 return 0;
3867 }
3868
3869 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3870 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3871 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3872 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3873
3874 if (strcmp(model[0], model[1])) {
3875 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3876 model[0], model[1]);
3877 return 0;
3878 }
3879
3880 if (strcmp(serial[0], serial[1])) {
3881 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3882 serial[0], serial[1]);
3883 return 0;
3884 }
3885
3886 return 1;
3887}
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3904{
3905 unsigned int class = dev->class;
3906 u16 *id = (void *)dev->link->ap->sector_buf;
3907 int rc;
3908
3909
3910 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3911 if (rc)
3912 return rc;
3913
3914
3915 if (!ata_dev_same_device(dev, class, id))
3916 return -ENODEV;
3917
3918 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3919 return 0;
3920}
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3938 unsigned int readid_flags)
3939{
3940 u64 n_sectors = dev->n_sectors;
3941 u64 n_native_sectors = dev->n_native_sectors;
3942 int rc;
3943
3944 if (!ata_dev_enabled(dev))
3945 return -ENODEV;
3946
3947
3948 if (ata_class_enabled(new_class) &&
3949 new_class != ATA_DEV_ATA &&
3950 new_class != ATA_DEV_ATAPI &&
3951 new_class != ATA_DEV_SEMB) {
3952 ata_dev_info(dev, "class mismatch %u != %u\n",
3953 dev->class, new_class);
3954 rc = -ENODEV;
3955 goto fail;
3956 }
3957
3958
3959 rc = ata_dev_reread_id(dev, readid_flags);
3960 if (rc)
3961 goto fail;
3962
3963
3964 rc = ata_dev_configure(dev);
3965 if (rc)
3966 goto fail;
3967
3968
3969 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3970 dev->n_sectors == n_sectors)
3971 return 0;
3972
3973
3974 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3975 (unsigned long long)n_sectors,
3976 (unsigned long long)dev->n_sectors);
3977
3978
3979
3980
3981
3982
3983 if (dev->n_native_sectors == n_native_sectors &&
3984 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3985 ata_dev_warn(dev,
3986 "new n_sectors matches native, probably "
3987 "late HPA unlock, n_sectors updated\n");
3988
3989 return 0;
3990 }
3991
3992
3993
3994
3995
3996
3997
3998 if (dev->n_native_sectors == n_native_sectors &&
3999 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4000 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4001 ata_dev_warn(dev,
4002 "old n_sectors matches native, probably "
4003 "late HPA lock, will try to unlock HPA\n");
4004
4005 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4006 rc = -EIO;
4007 } else
4008 rc = -ENODEV;
4009
4010
4011 dev->n_native_sectors = n_native_sectors;
4012 dev->n_sectors = n_sectors;
4013 fail:
4014 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4015 return rc;
4016}
4017
4018struct ata_blacklist_entry {
4019 const char *model_num;
4020 const char *model_rev;
4021 unsigned long horkage;
4022};
4023
4024static const struct ata_blacklist_entry ata_device_blacklist [] = {
4025
4026 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4027 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4028 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4029 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4030 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4031 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4032 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4033 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4034 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4035 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4036 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4037 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4038 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4039 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4040 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4041 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4042 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4043 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4044 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4045 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4046 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4047 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4048 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4049 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4050 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4051 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4052 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4053 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4054
4055 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4056
4057
4058 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4059 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4060
4061
4062
4063
4064
4065 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4066 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4067
4068 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4069
4070 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4071 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4072 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4073 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4074 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4075
4076
4077 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4078 ATA_HORKAGE_FIRMWARE_WARN },
4079
4080 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4081 ATA_HORKAGE_FIRMWARE_WARN },
4082
4083 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4084 ATA_HORKAGE_FIRMWARE_WARN },
4085
4086 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4087 ATA_HORKAGE_FIRMWARE_WARN },
4088
4089
4090
4091 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4092 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4093 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4094
4095
4096 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4097
4098
4099 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4100 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4101 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4102 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4103
4104
4105 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4106
4107
4108 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4109 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4110 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4111
4112
4113 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4114
4115 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4116
4117
4118 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4119
4120
4121 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4122
4123
4124
4125
4126
4127 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4128 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4129 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4130 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4131 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4132
4133
4134 { }
4135};
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164static int glob_match (const char *text, const char *pattern)
4165{
4166 do {
4167
4168 if (*text == *pattern || *pattern == '?') {
4169 if (!*pattern++)
4170 return 0;
4171 } else {
4172
4173 if (!*text || *pattern != '[')
4174 break;
4175 while (*++pattern && *pattern != ']' && *text != *pattern) {
4176 if (*pattern == '-' && *(pattern - 1) != '[')
4177 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4178 ++pattern;
4179 break;
4180 }
4181 }
4182 if (!*pattern || *pattern == ']')
4183 return 1;
4184 while (*pattern && *pattern++ != ']');
4185 }
4186 } while (*++text && *pattern);
4187
4188
4189 if (*pattern == '*') {
4190 if (!*++pattern)
4191 return 0;
4192
4193 while (*text) {
4194 if (glob_match(text, pattern) == 0)
4195 return 0;
4196 ++text;
4197 }
4198 }
4199 if (!*text && !*pattern)
4200 return 0;
4201 return 1;
4202}
4203
4204static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4205{
4206 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4207 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4208 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4209
4210 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4211 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4212
4213 while (ad->model_num) {
4214 if (!glob_match(model_num, ad->model_num)) {
4215 if (ad->model_rev == NULL)
4216 return ad->horkage;
4217 if (!glob_match(model_rev, ad->model_rev))
4218 return ad->horkage;
4219 }
4220 ad++;
4221 }
4222 return 0;
4223}
4224
4225static int ata_dma_blacklisted(const struct ata_device *dev)
4226{
4227
4228
4229
4230
4231 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4232 (dev->flags & ATA_DFLAG_CDB_INTR))
4233 return 1;
4234 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4235}
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245static int ata_is_40wire(struct ata_device *dev)
4246{
4247 if (dev->horkage & ATA_HORKAGE_IVB)
4248 return ata_drive_40wire_relaxed(dev->id);
4249 return ata_drive_40wire(dev->id);
4250}
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265static int cable_is_40wire(struct ata_port *ap)
4266{
4267 struct ata_link *link;
4268 struct ata_device *dev;
4269
4270
4271 if (ap->cbl == ATA_CBL_PATA40)
4272 return 1;
4273
4274
4275 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4276 return 0;
4277
4278
4279
4280
4281
4282 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4283 return 0;
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294 ata_for_each_link(link, ap, EDGE) {
4295 ata_for_each_dev(dev, link, ENABLED) {
4296 if (!ata_is_40wire(dev))
4297 return 0;
4298 }
4299 }
4300 return 1;
4301}
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315static void ata_dev_xfermask(struct ata_device *dev)
4316{
4317 struct ata_link *link = dev->link;
4318 struct ata_port *ap = link->ap;
4319 struct ata_host *host = ap->host;
4320 unsigned long xfer_mask;
4321
4322
4323 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4324 ap->mwdma_mask, ap->udma_mask);
4325
4326
4327 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4328 dev->mwdma_mask, dev->udma_mask);
4329 xfer_mask &= ata_id_xfermask(dev->id);
4330
4331
4332
4333
4334
4335 if (ata_dev_pair(dev)) {
4336
4337 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4338
4339 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4340 }
4341
4342 if (ata_dma_blacklisted(dev)) {
4343 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4344 ata_dev_warn(dev,
4345 "device is on DMA blacklist, disabling DMA\n");
4346 }
4347
4348 if ((host->flags & ATA_HOST_SIMPLEX) &&
4349 host->simplex_claimed && host->simplex_claimed != ap) {
4350 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4351 ata_dev_warn(dev,
4352 "simplex DMA is claimed by other device, disabling DMA\n");
4353 }
4354
4355 if (ap->flags & ATA_FLAG_NO_IORDY)
4356 xfer_mask &= ata_pio_mask_no_iordy(dev);
4357
4358 if (ap->ops->mode_filter)
4359 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4370
4371 if (cable_is_40wire(ap)) {
4372 ata_dev_warn(dev,
4373 "limited to UDMA/33 due to 40-wire cable\n");
4374 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4375 }
4376
4377 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4378 &dev->mwdma_mask, &dev->udma_mask);
4379}
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4396{
4397 struct ata_taskfile tf;
4398 unsigned int err_mask;
4399
4400
4401 DPRINTK("set features - xfer mode\n");
4402
4403
4404
4405
4406 ata_tf_init(dev, &tf);
4407 tf.command = ATA_CMD_SET_FEATURES;
4408 tf.feature = SETFEATURES_XFER;
4409 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4410 tf.protocol = ATA_PROT_NODATA;
4411
4412 if (ata_pio_need_iordy(dev))
4413 tf.nsect = dev->xfer_mode;
4414
4415 else if (ata_id_has_iordy(dev->id))
4416 tf.nsect = 0x01;
4417 else
4418 return 0;
4419
4420 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4421
4422 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4423 return err_mask;
4424}
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4442{
4443 struct ata_taskfile tf;
4444 unsigned int err_mask;
4445
4446
4447 DPRINTK("set features - SATA features\n");
4448
4449 ata_tf_init(dev, &tf);
4450 tf.command = ATA_CMD_SET_FEATURES;
4451 tf.feature = enable;
4452 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4453 tf.protocol = ATA_PROT_NODATA;
4454 tf.nsect = feature;
4455
4456 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4457
4458 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4459 return err_mask;
4460}
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474static unsigned int ata_dev_init_params(struct ata_device *dev,
4475 u16 heads, u16 sectors)
4476{
4477 struct ata_taskfile tf;
4478 unsigned int err_mask;
4479
4480
4481 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4482 return AC_ERR_INVALID;
4483
4484
4485 DPRINTK("init dev params \n");
4486
4487 ata_tf_init(dev, &tf);
4488 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4489 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4490 tf.protocol = ATA_PROT_NODATA;
4491 tf.nsect = sectors;
4492 tf.device |= (heads - 1) & 0x0f;
4493
4494 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4495
4496
4497
4498 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4499 err_mask = 0;
4500
4501 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4502 return err_mask;
4503}
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514void ata_sg_clean(struct ata_queued_cmd *qc)
4515{
4516 struct ata_port *ap = qc->ap;
4517 struct scatterlist *sg = qc->sg;
4518 int dir = qc->dma_dir;
4519
4520 WARN_ON_ONCE(sg == NULL);
4521
4522 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4523
4524 if (qc->n_elem)
4525 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4526
4527 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4528 qc->sg = NULL;
4529}
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545int atapi_check_dma(struct ata_queued_cmd *qc)
4546{
4547 struct ata_port *ap = qc->ap;
4548
4549
4550
4551
4552 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4553 unlikely(qc->nbytes & 15))
4554 return 1;
4555
4556 if (ap->ops->check_atapi_dma)
4557 return ap->ops->check_atapi_dma(qc);
4558
4559 return 0;
4560}
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577int ata_std_qc_defer(struct ata_queued_cmd *qc)
4578{
4579 struct ata_link *link = qc->dev->link;
4580
4581 if (qc->tf.protocol == ATA_PROT_NCQ) {
4582 if (!ata_tag_valid(link->active_tag))
4583 return 0;
4584 } else {
4585 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4586 return 0;
4587 }
4588
4589 return ATA_DEFER_LINK;
4590}
4591
4592void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4608 unsigned int n_elem)
4609{
4610 qc->sg = sg;
4611 qc->n_elem = n_elem;
4612 qc->cursg = qc->sg;
4613}
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628static int ata_sg_setup(struct ata_queued_cmd *qc)
4629{
4630 struct ata_port *ap = qc->ap;
4631 unsigned int n_elem;
4632
4633 VPRINTK("ENTER, ata%u\n", ap->print_id);
4634
4635 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4636 if (n_elem < 1)
4637 return -1;
4638
4639 DPRINTK("%d sg elements mapped\n", n_elem);
4640 qc->orig_n_elem = qc->n_elem;
4641 qc->n_elem = n_elem;
4642 qc->flags |= ATA_QCFLAG_DMAMAP;
4643
4644 return 0;
4645}
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659void swap_buf_le16(u16 *buf, unsigned int buf_words)
4660{
4661#ifdef __BIG_ENDIAN
4662 unsigned int i;
4663
4664 for (i = 0; i < buf_words; i++)
4665 buf[i] = le16_to_cpu(buf[i]);
4666#endif
4667}
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4678{
4679 struct ata_queued_cmd *qc = NULL;
4680 unsigned int i;
4681
4682
4683 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4684 return NULL;
4685
4686
4687 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4688 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4689 qc = __ata_qc_from_tag(ap, i);
4690 break;
4691 }
4692
4693 if (qc)
4694 qc->tag = i;
4695
4696 return qc;
4697}
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4708{
4709 struct ata_port *ap = dev->link->ap;
4710 struct ata_queued_cmd *qc;
4711
4712 qc = ata_qc_new(ap);
4713 if (qc) {
4714 qc->scsicmd = NULL;
4715 qc->ap = ap;
4716 qc->dev = dev;
4717
4718 ata_qc_reinit(qc);
4719 }
4720
4721 return qc;
4722}
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734void ata_qc_free(struct ata_queued_cmd *qc)
4735{
4736 struct ata_port *ap;
4737 unsigned int tag;
4738
4739 WARN_ON_ONCE(qc == NULL);
4740 ap = qc->ap;
4741
4742 qc->flags = 0;
4743 tag = qc->tag;
4744 if (likely(ata_tag_valid(tag))) {
4745 qc->tag = ATA_TAG_POISON;
4746 clear_bit(tag, &ap->qc_allocated);
4747 }
4748}
4749
4750void __ata_qc_complete(struct ata_queued_cmd *qc)
4751{
4752 struct ata_port *ap;
4753 struct ata_link *link;
4754
4755 WARN_ON_ONCE(qc == NULL);
4756 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4757 ap = qc->ap;
4758 link = qc->dev->link;
4759
4760 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4761 ata_sg_clean(qc);
4762
4763
4764 if (qc->tf.protocol == ATA_PROT_NCQ) {
4765 link->sactive &= ~(1 << qc->tag);
4766 if (!link->sactive)
4767 ap->nr_active_links--;
4768 } else {
4769 link->active_tag = ATA_TAG_POISON;
4770 ap->nr_active_links--;
4771 }
4772
4773
4774 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4775 ap->excl_link == link))
4776 ap->excl_link = NULL;
4777
4778
4779
4780
4781
4782 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4783 ap->qc_active &= ~(1 << qc->tag);
4784
4785
4786 qc->complete_fn(qc);
4787}
4788
4789static void fill_result_tf(struct ata_queued_cmd *qc)
4790{
4791 struct ata_port *ap = qc->ap;
4792
4793 qc->result_tf.flags = qc->tf.flags;
4794 ap->ops->qc_fill_rtf(qc);
4795}
4796
4797static void ata_verify_xfer(struct ata_queued_cmd *qc)
4798{
4799 struct ata_device *dev = qc->dev;
4800
4801 if (ata_is_nodata(qc->tf.protocol))
4802 return;
4803
4804 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4805 return;
4806
4807 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4808}
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825void ata_qc_complete(struct ata_queued_cmd *qc)
4826{
4827 struct ata_port *ap = qc->ap;
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842 if (ap->ops->error_handler) {
4843 struct ata_device *dev = qc->dev;
4844 struct ata_eh_info *ehi = &dev->link->eh_info;
4845
4846 if (unlikely(qc->err_mask))
4847 qc->flags |= ATA_QCFLAG_FAILED;
4848
4849
4850
4851
4852
4853 if (unlikely(ata_tag_internal(qc->tag))) {
4854 fill_result_tf(qc);
4855 __ata_qc_complete(qc);
4856 return;
4857 }
4858
4859
4860
4861
4862
4863 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4864 fill_result_tf(qc);
4865 ata_qc_schedule_eh(qc);
4866 return;
4867 }
4868
4869 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4870
4871
4872 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4873 fill_result_tf(qc);
4874
4875
4876
4877
4878 switch (qc->tf.command) {
4879 case ATA_CMD_SET_FEATURES:
4880 if (qc->tf.feature != SETFEATURES_WC_ON &&
4881 qc->tf.feature != SETFEATURES_WC_OFF)
4882 break;
4883
4884 case ATA_CMD_INIT_DEV_PARAMS:
4885 case ATA_CMD_SET_MULTI:
4886
4887 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4888 ata_port_schedule_eh(ap);
4889 break;
4890
4891 case ATA_CMD_SLEEP:
4892 dev->flags |= ATA_DFLAG_SLEEPING;
4893 break;
4894 }
4895
4896 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4897 ata_verify_xfer(qc);
4898
4899 __ata_qc_complete(qc);
4900 } else {
4901 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4902 return;
4903
4904
4905 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4906 fill_result_tf(qc);
4907
4908 __ata_qc_complete(qc);
4909 }
4910}
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4933{
4934 int nr_done = 0;
4935 u32 done_mask;
4936
4937 done_mask = ap->qc_active ^ qc_active;
4938
4939 if (unlikely(done_mask & qc_active)) {
4940 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
4941 ap->qc_active, qc_active);
4942 return -EINVAL;
4943 }
4944
4945 while (done_mask) {
4946 struct ata_queued_cmd *qc;
4947 unsigned int tag = __ffs(done_mask);
4948
4949 qc = ata_qc_from_tag(ap, tag);
4950 if (qc) {
4951 ata_qc_complete(qc);
4952 nr_done++;
4953 }
4954 done_mask &= ~(1 << tag);
4955 }
4956
4957 return nr_done;
4958}
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972void ata_qc_issue(struct ata_queued_cmd *qc)
4973{
4974 struct ata_port *ap = qc->ap;
4975 struct ata_link *link = qc->dev->link;
4976 u8 prot = qc->tf.protocol;
4977
4978
4979
4980
4981
4982 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4983
4984 if (ata_is_ncq(prot)) {
4985 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
4986
4987 if (!link->sactive)
4988 ap->nr_active_links++;
4989 link->sactive |= 1 << qc->tag;
4990 } else {
4991 WARN_ON_ONCE(link->sactive);
4992
4993 ap->nr_active_links++;
4994 link->active_tag = qc->tag;
4995 }
4996
4997 qc->flags |= ATA_QCFLAG_ACTIVE;
4998 ap->qc_active |= 1 << qc->tag;
4999
5000
5001
5002
5003
5004 if (WARN_ON_ONCE(ata_is_data(prot) &&
5005 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5006 goto sys_err;
5007
5008 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5009 (ap->flags & ATA_FLAG_PIO_DMA)))
5010 if (ata_sg_setup(qc))
5011 goto sys_err;
5012
5013
5014 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5015 link->eh_info.action |= ATA_EH_RESET;
5016 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5017 ata_link_abort(link);
5018 return;
5019 }
5020
5021 ap->ops->qc_prep(qc);
5022
5023 qc->err_mask |= ap->ops->qc_issue(qc);
5024 if (unlikely(qc->err_mask))
5025 goto err;
5026 return;
5027
5028sys_err:
5029 qc->err_mask |= AC_ERR_SYSTEM;
5030err:
5031 ata_qc_complete(qc);
5032}
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046int sata_scr_valid(struct ata_link *link)
5047{
5048 struct ata_port *ap = link->ap;
5049
5050 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5051}
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5070{
5071 if (ata_is_host_link(link)) {
5072 if (sata_scr_valid(link))
5073 return link->ap->ops->scr_read(link, reg, val);
5074 return -EOPNOTSUPP;
5075 }
5076
5077 return sata_pmp_scr_read(link, reg, val);
5078}
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096int sata_scr_write(struct ata_link *link, int reg, u32 val)
5097{
5098 if (ata_is_host_link(link)) {
5099 if (sata_scr_valid(link))
5100 return link->ap->ops->scr_write(link, reg, val);
5101 return -EOPNOTSUPP;
5102 }
5103
5104 return sata_pmp_scr_write(link, reg, val);
5105}
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5123{
5124 if (ata_is_host_link(link)) {
5125 int rc;
5126
5127 if (sata_scr_valid(link)) {
5128 rc = link->ap->ops->scr_write(link, reg, val);
5129 if (rc == 0)
5130 rc = link->ap->ops->scr_read(link, reg, &val);
5131 return rc;
5132 }
5133 return -EOPNOTSUPP;
5134 }
5135
5136 return sata_pmp_scr_write(link, reg, val);
5137}
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152
5153bool ata_phys_link_online(struct ata_link *link)
5154{
5155 u32 sstatus;
5156
5157 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5158 ata_sstatus_online(sstatus))
5159 return true;
5160 return false;
5161}
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177bool ata_phys_link_offline(struct ata_link *link)
5178{
5179 u32 sstatus;
5180
5181 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5182 !ata_sstatus_online(sstatus))
5183 return true;
5184 return false;
5185}
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200
5201
5202
5203bool ata_link_online(struct ata_link *link)
5204{
5205 struct ata_link *slave = link->ap->slave_link;
5206
5207 WARN_ON(link == slave);
5208
5209 return ata_phys_link_online(link) ||
5210 (slave && ata_phys_link_online(slave));
5211}
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229bool ata_link_offline(struct ata_link *link)
5230{
5231 struct ata_link *slave = link->ap->slave_link;
5232
5233 WARN_ON(link == slave);
5234
5235 return ata_phys_link_offline(link) &&
5236 (!slave || ata_phys_link_offline(slave));
5237}
5238
5239#ifdef CONFIG_PM
5240static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5241 unsigned int action, unsigned int ehi_flags,
5242 int wait)
5243{
5244 struct ata_link *link;
5245 unsigned long flags;
5246 int rc;
5247
5248
5249
5250
5251 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5252 ata_port_wait_eh(ap);
5253 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5254 }
5255
5256
5257 spin_lock_irqsave(ap->lock, flags);
5258
5259 ap->pm_mesg = mesg;
5260 if (wait) {
5261 rc = 0;
5262 ap->pm_result = &rc;
5263 }
5264
5265 ap->pflags |= ATA_PFLAG_PM_PENDING;
5266 ata_for_each_link(link, ap, HOST_FIRST) {
5267 link->eh_info.action |= action;
5268 link->eh_info.flags |= ehi_flags;
5269 }
5270
5271 ata_port_schedule_eh(ap);
5272
5273 spin_unlock_irqrestore(ap->lock, flags);
5274
5275
5276 if (wait) {
5277 ata_port_wait_eh(ap);
5278 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5279 }
5280
5281 return rc;
5282}
5283
5284#define to_ata_port(d) container_of(d, struct ata_port, tdev)
5285
5286static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
5287{
5288 struct ata_port *ap = to_ata_port(dev);
5289 unsigned int ehi_flags = ATA_EHI_QUIET;
5290 int rc;
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300 if (mesg.event == PM_EVENT_SUSPEND)
5301 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5302
5303 rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, 1);
5304 return rc;
5305}
5306
5307static int ata_port_suspend(struct device *dev)
5308{
5309 if (pm_runtime_suspended(dev))
5310 return 0;
5311
5312 return ata_port_suspend_common(dev, PMSG_SUSPEND);
5313}
5314
5315static int ata_port_do_freeze(struct device *dev)
5316{
5317 if (pm_runtime_suspended(dev))
5318 pm_runtime_resume(dev);
5319
5320 return ata_port_suspend_common(dev, PMSG_FREEZE);
5321}
5322
5323static int ata_port_poweroff(struct device *dev)
5324{
5325 if (pm_runtime_suspended(dev))
5326 return 0;
5327
5328 return ata_port_suspend_common(dev, PMSG_HIBERNATE);
5329}
5330
5331static int ata_port_resume_common(struct device *dev)
5332{
5333 struct ata_port *ap = to_ata_port(dev);
5334 int rc;
5335
5336 rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
5337 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 1);
5338 return rc;
5339}
5340
5341static int ata_port_resume(struct device *dev)
5342{
5343 int rc;
5344
5345 rc = ata_port_resume_common(dev);
5346 if (!rc) {
5347 pm_runtime_disable(dev);
5348 pm_runtime_set_active(dev);
5349 pm_runtime_enable(dev);
5350 }
5351
5352 return rc;
5353}
5354
5355static int ata_port_runtime_idle(struct device *dev)
5356{
5357 return pm_runtime_suspend(dev);
5358}
5359
5360static const struct dev_pm_ops ata_port_pm_ops = {
5361 .suspend = ata_port_suspend,
5362 .resume = ata_port_resume,
5363 .freeze = ata_port_do_freeze,
5364 .thaw = ata_port_resume,
5365 .poweroff = ata_port_poweroff,
5366 .restore = ata_port_resume,
5367
5368 .runtime_suspend = ata_port_suspend,
5369 .runtime_resume = ata_port_resume_common,
5370 .runtime_idle = ata_port_runtime_idle,
5371};
5372
5373
5374
5375
5376
5377
5378
5379
5380int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5381{
5382 host->dev->power.power_state = mesg;
5383 return 0;
5384}
5385
5386
5387
5388
5389
5390
5391
5392void ata_host_resume(struct ata_host *host)
5393{
5394 host->dev->power.power_state = PMSG_ON;
5395}
5396#endif
5397
5398struct device_type ata_port_type = {
5399 .name = "ata_port",
5400#ifdef CONFIG_PM
5401 .pm = &ata_port_pm_ops,
5402#endif
5403};
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414void ata_dev_init(struct ata_device *dev)
5415{
5416 struct ata_link *link = ata_dev_phys_link(dev);
5417 struct ata_port *ap = link->ap;
5418 unsigned long flags;
5419
5420
5421 link->sata_spd_limit = link->hw_sata_spd_limit;
5422 link->sata_spd = 0;
5423
5424
5425
5426
5427
5428 spin_lock_irqsave(ap->lock, flags);
5429 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5430 dev->horkage = 0;
5431 spin_unlock_irqrestore(ap->lock, flags);
5432
5433 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5434 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5435 dev->pio_mask = UINT_MAX;
5436 dev->mwdma_mask = UINT_MAX;
5437 dev->udma_mask = UINT_MAX;
5438}
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5452{
5453 int i;
5454
5455
5456 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5457 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5458
5459 link->ap = ap;
5460 link->pmp = pmp;
5461 link->active_tag = ATA_TAG_POISON;
5462 link->hw_sata_spd_limit = UINT_MAX;
5463
5464
5465 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5466 struct ata_device *dev = &link->device[i];
5467
5468 dev->link = link;
5469 dev->devno = dev - link->device;
5470#ifdef CONFIG_ATA_ACPI
5471 dev->gtf_filter = ata_acpi_gtf_filter;
5472#endif
5473 ata_dev_init(dev);
5474 }
5475}
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490int sata_link_init_spd(struct ata_link *link)
5491{
5492 u8 spd;
5493 int rc;
5494
5495 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5496 if (rc)
5497 return rc;
5498
5499 spd = (link->saved_scontrol >> 4) & 0xf;
5500 if (spd)
5501 link->hw_sata_spd_limit &= (1 << spd) - 1;
5502
5503 ata_force_link_limits(link);
5504
5505 link->sata_spd_limit = link->hw_sata_spd_limit;
5506
5507 return 0;
5508}
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522struct ata_port *ata_port_alloc(struct ata_host *host)
5523{
5524 struct ata_port *ap;
5525
5526 DPRINTK("ENTER\n");
5527
5528 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5529 if (!ap)
5530 return NULL;
5531
5532 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5533 ap->lock = &host->lock;
5534 ap->print_id = -1;
5535 ap->host = host;
5536 ap->dev = host->dev;
5537
5538#if defined(ATA_VERBOSE_DEBUG)
5539
5540 ap->msg_enable = 0x00FF;
5541#elif defined(ATA_DEBUG)
5542 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5543#else
5544 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5545#endif
5546
5547 mutex_init(&ap->scsi_scan_mutex);
5548 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5549 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5550 INIT_LIST_HEAD(&ap->eh_done_q);
5551 init_waitqueue_head(&ap->eh_wait_q);
5552 init_completion(&ap->park_req_pending);
5553 init_timer_deferrable(&ap->fastdrain_timer);
5554 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5555 ap->fastdrain_timer.data = (unsigned long)ap;
5556
5557 ap->cbl = ATA_CBL_NONE;
5558
5559 ata_link_init(ap, &ap->link, 0);
5560
5561#ifdef ATA_IRQ_TRAP
5562 ap->stats.unhandled_irq = 1;
5563 ap->stats.idle_irq = 1;
5564#endif
5565 ata_sff_port_init(ap);
5566
5567 return ap;
5568}
5569
5570static void ata_host_release(struct device *gendev, void *res)
5571{
5572 struct ata_host *host = dev_get_drvdata(gendev);
5573 int i;
5574
5575 for (i = 0; i < host->n_ports; i++) {
5576 struct ata_port *ap = host->ports[i];
5577
5578 if (!ap)
5579 continue;
5580
5581 if (ap->scsi_host)
5582 scsi_host_put(ap->scsi_host);
5583
5584 kfree(ap->pmp_link);
5585 kfree(ap->slave_link);
5586 kfree(ap);
5587 host->ports[i] = NULL;
5588 }
5589
5590 dev_set_drvdata(gendev, NULL);
5591}
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5614{
5615 struct ata_host *host;
5616 size_t sz;
5617 int i;
5618
5619 DPRINTK("ENTER\n");
5620
5621 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5622 return NULL;
5623
5624
5625 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5626
5627 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5628 if (!host)
5629 goto err_out;
5630
5631 devres_add(dev, host);
5632 dev_set_drvdata(dev, host);
5633
5634 spin_lock_init(&host->lock);
5635 mutex_init(&host->eh_mutex);
5636 host->dev = dev;
5637 host->n_ports = max_ports;
5638
5639
5640 for (i = 0; i < max_ports; i++) {
5641 struct ata_port *ap;
5642
5643 ap = ata_port_alloc(host);
5644 if (!ap)
5645 goto err_out;
5646
5647 ap->port_no = i;
5648 host->ports[i] = ap;
5649 }
5650
5651 devres_remove_group(dev, NULL);
5652 return host;
5653
5654 err_out:
5655 devres_release_group(dev, NULL);
5656 return NULL;
5657}
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5676 const struct ata_port_info * const * ppi,
5677 int n_ports)
5678{
5679 const struct ata_port_info *pi;
5680 struct ata_host *host;
5681 int i, j;
5682
5683 host = ata_host_alloc(dev, n_ports);
5684 if (!host)
5685 return NULL;
5686
5687 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5688 struct ata_port *ap = host->ports[i];
5689
5690 if (ppi[j])
5691 pi = ppi[j++];
5692
5693 ap->pio_mask = pi->pio_mask;
5694 ap->mwdma_mask = pi->mwdma_mask;
5695 ap->udma_mask = pi->udma_mask;
5696 ap->flags |= pi->flags;
5697 ap->link.flags |= pi->link_flags;
5698 ap->ops = pi->port_ops;
5699
5700 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5701 host->ops = pi->port_ops;
5702 }
5703
5704 return host;
5705}
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753int ata_slave_link_init(struct ata_port *ap)
5754{
5755 struct ata_link *link;
5756
5757 WARN_ON(ap->slave_link);
5758 WARN_ON(ap->flags & ATA_FLAG_PMP);
5759
5760 link = kzalloc(sizeof(*link), GFP_KERNEL);
5761 if (!link)
5762 return -ENOMEM;
5763
5764 ata_link_init(ap, link, 1);
5765 ap->slave_link = link;
5766 return 0;
5767}
5768
5769static void ata_host_stop(struct device *gendev, void *res)
5770{
5771 struct ata_host *host = dev_get_drvdata(gendev);
5772 int i;
5773
5774 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5775
5776 for (i = 0; i < host->n_ports; i++) {
5777 struct ata_port *ap = host->ports[i];
5778
5779 if (ap->ops->port_stop)
5780 ap->ops->port_stop(ap);
5781 }
5782
5783 if (host->ops->host_stop)
5784 host->ops->host_stop(host);
5785}
5786
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807static void ata_finalize_port_ops(struct ata_port_operations *ops)
5808{
5809 static DEFINE_SPINLOCK(lock);
5810 const struct ata_port_operations *cur;
5811 void **begin = (void **)ops;
5812 void **end = (void **)&ops->inherits;
5813 void **pp;
5814
5815 if (!ops || !ops->inherits)
5816 return;
5817
5818 spin_lock(&lock);
5819
5820 for (cur = ops->inherits; cur; cur = cur->inherits) {
5821 void **inherit = (void **)cur;
5822
5823 for (pp = begin; pp < end; pp++, inherit++)
5824 if (!*pp)
5825 *pp = *inherit;
5826 }
5827
5828 for (pp = begin; pp < end; pp++)
5829 if (IS_ERR(*pp))
5830 *pp = NULL;
5831
5832 ops->inherits = NULL;
5833
5834 spin_unlock(&lock);
5835}
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853int ata_host_start(struct ata_host *host)
5854{
5855 int have_stop = 0;
5856 void *start_dr = NULL;
5857 int i, rc;
5858
5859 if (host->flags & ATA_HOST_STARTED)
5860 return 0;
5861
5862 ata_finalize_port_ops(host->ops);
5863
5864 for (i = 0; i < host->n_ports; i++) {
5865 struct ata_port *ap = host->ports[i];
5866
5867 ata_finalize_port_ops(ap->ops);
5868
5869 if (!host->ops && !ata_port_is_dummy(ap))
5870 host->ops = ap->ops;
5871
5872 if (ap->ops->port_stop)
5873 have_stop = 1;
5874 }
5875
5876 if (host->ops->host_stop)
5877 have_stop = 1;
5878
5879 if (have_stop) {
5880 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5881 if (!start_dr)
5882 return -ENOMEM;
5883 }
5884
5885 for (i = 0; i < host->n_ports; i++) {
5886 struct ata_port *ap = host->ports[i];
5887
5888 if (ap->ops->port_start) {
5889 rc = ap->ops->port_start(ap);
5890 if (rc) {
5891 if (rc != -ENODEV)
5892 dev_err(host->dev,
5893 "failed to start port %d (errno=%d)\n",
5894 i, rc);
5895 goto err_out;
5896 }
5897 }
5898 ata_eh_freeze_port(ap);
5899 }
5900
5901 if (start_dr)
5902 devres_add(host->dev, start_dr);
5903 host->flags |= ATA_HOST_STARTED;
5904 return 0;
5905
5906 err_out:
5907 while (--i >= 0) {
5908 struct ata_port *ap = host->ports[i];
5909
5910 if (ap->ops->port_stop)
5911 ap->ops->port_stop(ap);
5912 }
5913 devres_free(start_dr);
5914 return rc;
5915}
5916
5917
5918
5919
5920
5921
5922
5923
5924
5925
5926
5927
5928
5929void ata_host_init(struct ata_host *host, struct device *dev,
5930 unsigned long flags, struct ata_port_operations *ops)
5931{
5932 spin_lock_init(&host->lock);
5933 mutex_init(&host->eh_mutex);
5934 host->dev = dev;
5935 host->flags = flags;
5936 host->ops = ops;
5937}
5938
5939int ata_port_probe(struct ata_port *ap)
5940{
5941 int rc = 0;
5942
5943
5944 if (ap->ops->error_handler) {
5945 struct ata_eh_info *ehi = &ap->link.eh_info;
5946 unsigned long flags;
5947
5948
5949 spin_lock_irqsave(ap->lock, flags);
5950
5951 ehi->probe_mask |= ATA_ALL_DEVICES;
5952 ehi->action |= ATA_EH_RESET;
5953 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5954
5955 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5956 ap->pflags |= ATA_PFLAG_LOADING;
5957 ata_port_schedule_eh(ap);
5958
5959 spin_unlock_irqrestore(ap->lock, flags);
5960
5961
5962 ata_port_wait_eh(ap);
5963 } else {
5964 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5965 rc = ata_bus_probe(ap);
5966 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5967 }
5968 return rc;
5969}
5970
5971
5972static void async_port_probe(void *data, async_cookie_t cookie)
5973{
5974 struct ata_port *ap = data;
5975
5976
5977
5978
5979
5980
5981
5982
5983 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5984 async_synchronize_cookie(cookie);
5985
5986 (void)ata_port_probe(ap);
5987
5988
5989 async_synchronize_cookie(cookie);
5990
5991 ata_scsi_scan_host(ap, 1);
5992}
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6011{
6012 int i, rc;
6013
6014
6015 if (!(host->flags & ATA_HOST_STARTED)) {
6016 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6017 WARN_ON(1);
6018 return -EINVAL;
6019 }
6020
6021
6022
6023
6024
6025 for (i = host->n_ports; host->ports[i]; i++)
6026 kfree(host->ports[i]);
6027
6028
6029 for (i = 0; i < host->n_ports; i++)
6030 host->ports[i]->print_id = ata_print_id++;
6031
6032
6033
6034 for (i = 0; i < host->n_ports; i++) {
6035 rc = ata_tport_add(host->dev,host->ports[i]);
6036 if (rc) {
6037 goto err_tadd;
6038 }
6039 }
6040
6041 rc = ata_scsi_add_hosts(host, sht);
6042 if (rc)
6043 goto err_tadd;
6044
6045
6046 ata_acpi_associate(host);
6047
6048
6049 for (i = 0; i < host->n_ports; i++) {
6050 struct ata_port *ap = host->ports[i];
6051 unsigned long xfer_mask;
6052
6053
6054 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6055 ap->cbl = ATA_CBL_SATA;
6056
6057
6058 sata_link_init_spd(&ap->link);
6059 if (ap->slave_link)
6060 sata_link_init_spd(ap->slave_link);
6061
6062
6063 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6064 ap->udma_mask);
6065
6066 if (!ata_port_is_dummy(ap)) {
6067 ata_port_info(ap, "%cATA max %s %s\n",
6068 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6069 ata_mode_string(xfer_mask),
6070 ap->link.eh_info.desc);
6071 ata_ehi_clear_desc(&ap->link.eh_info);
6072 } else
6073 ata_port_info(ap, "DUMMY\n");
6074 }
6075
6076
6077 for (i = 0; i < host->n_ports; i++) {
6078 struct ata_port *ap = host->ports[i];
6079 async_schedule(async_port_probe, ap);
6080 }
6081
6082 return 0;
6083
6084 err_tadd:
6085 while (--i >= 0) {
6086 ata_tport_delete(host->ports[i]);
6087 }
6088 return rc;
6089
6090}
6091
6092
6093
6094
6095
6096
6097
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115int ata_host_activate(struct ata_host *host, int irq,
6116 irq_handler_t irq_handler, unsigned long irq_flags,
6117 struct scsi_host_template *sht)
6118{
6119 int i, rc;
6120
6121 rc = ata_host_start(host);
6122 if (rc)
6123 return rc;
6124
6125
6126 if (!irq) {
6127 WARN_ON(irq_handler);
6128 return ata_host_register(host, sht);
6129 }
6130
6131 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6132 dev_driver_string(host->dev), host);
6133 if (rc)
6134 return rc;
6135
6136 for (i = 0; i < host->n_ports; i++)
6137 ata_port_desc(host->ports[i], "irq %d", irq);
6138
6139 rc = ata_host_register(host, sht);
6140
6141 if (rc)
6142 devm_free_irq(host->dev, irq, host);
6143
6144 return rc;
6145}
6146
6147
6148
6149
6150
6151
6152
6153
6154
6155
6156
6157
6158static void ata_port_detach(struct ata_port *ap)
6159{
6160 unsigned long flags;
6161
6162 if (!ap->ops->error_handler)
6163 goto skip_eh;
6164
6165
6166 spin_lock_irqsave(ap->lock, flags);
6167 ap->pflags |= ATA_PFLAG_UNLOADING;
6168 ata_port_schedule_eh(ap);
6169 spin_unlock_irqrestore(ap->lock, flags);
6170
6171
6172 ata_port_wait_eh(ap);
6173
6174
6175 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6176
6177 cancel_delayed_work_sync(&ap->hotplug_task);
6178
6179 skip_eh:
6180 if (ap->pmp_link) {
6181 int i;
6182 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6183 ata_tlink_delete(&ap->pmp_link[i]);
6184 }
6185 ata_tport_delete(ap);
6186
6187
6188 scsi_remove_host(ap->scsi_host);
6189}
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200void ata_host_detach(struct ata_host *host)
6201{
6202 int i;
6203
6204 for (i = 0; i < host->n_ports; i++)
6205 ata_port_detach(host->ports[i]);
6206
6207
6208 ata_acpi_dissociate(host);
6209}
6210
6211#ifdef CONFIG_PCI
6212
6213
6214
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224void ata_pci_remove_one(struct pci_dev *pdev)
6225{
6226 struct device *dev = &pdev->dev;
6227 struct ata_host *host = dev_get_drvdata(dev);
6228
6229 ata_host_detach(host);
6230}
6231
6232
6233int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6234{
6235 unsigned long tmp = 0;
6236
6237 switch (bits->width) {
6238 case 1: {
6239 u8 tmp8 = 0;
6240 pci_read_config_byte(pdev, bits->reg, &tmp8);
6241 tmp = tmp8;
6242 break;
6243 }
6244 case 2: {
6245 u16 tmp16 = 0;
6246 pci_read_config_word(pdev, bits->reg, &tmp16);
6247 tmp = tmp16;
6248 break;
6249 }
6250 case 4: {
6251 u32 tmp32 = 0;
6252 pci_read_config_dword(pdev, bits->reg, &tmp32);
6253 tmp = tmp32;
6254 break;
6255 }
6256
6257 default:
6258 return -EINVAL;
6259 }
6260
6261 tmp &= bits->mask;
6262
6263 return (tmp == bits->val) ? 1 : 0;
6264}
6265
6266#ifdef CONFIG_PM
6267void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6268{
6269 pci_save_state(pdev);
6270 pci_disable_device(pdev);
6271
6272 if (mesg.event & PM_EVENT_SLEEP)
6273 pci_set_power_state(pdev, PCI_D3hot);
6274}
6275
6276int ata_pci_device_do_resume(struct pci_dev *pdev)
6277{
6278 int rc;
6279
6280 pci_set_power_state(pdev, PCI_D0);
6281 pci_restore_state(pdev);
6282
6283 rc = pcim_enable_device(pdev);
6284 if (rc) {
6285 dev_err(&pdev->dev,
6286 "failed to enable device after resume (%d)\n", rc);
6287 return rc;
6288 }
6289
6290 pci_set_master(pdev);
6291 return 0;
6292}
6293
6294int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6295{
6296 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6297 int rc = 0;
6298
6299 rc = ata_host_suspend(host, mesg);
6300 if (rc)
6301 return rc;
6302
6303 ata_pci_device_do_suspend(pdev, mesg);
6304
6305 return 0;
6306}
6307
6308int ata_pci_device_resume(struct pci_dev *pdev)
6309{
6310 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6311 int rc;
6312
6313 rc = ata_pci_device_do_resume(pdev);
6314 if (rc == 0)
6315 ata_host_resume(host);
6316 return rc;
6317}
6318#endif
6319
6320#endif
6321
6322static int __init ata_parse_force_one(char **cur,
6323 struct ata_force_ent *force_ent,
6324 const char **reason)
6325{
6326
6327
6328
6329
6330
6331 static struct ata_force_param force_tbl[] __initdata = {
6332 { "40c", .cbl = ATA_CBL_PATA40 },
6333 { "80c", .cbl = ATA_CBL_PATA80 },
6334 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6335 { "unk", .cbl = ATA_CBL_PATA_UNK },
6336 { "ign", .cbl = ATA_CBL_PATA_IGN },
6337 { "sata", .cbl = ATA_CBL_SATA },
6338 { "1.5Gbps", .spd_limit = 1 },
6339 { "3.0Gbps", .spd_limit = 2 },
6340 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6341 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6342 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6343 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6344 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6345 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6346 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6347 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6348 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6349 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6350 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6351 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6352 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6353 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6354 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6355 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6356 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6357 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6358 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6359 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6360 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6361 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6362 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6363 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6364 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6365 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6366 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6367 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6368 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6369 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6370 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6371 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6372 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6373 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6374 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6375 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6376 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6377 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6378 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6379 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6380 };
6381 char *start = *cur, *p = *cur;
6382 char *id, *val, *endp;
6383 const struct ata_force_param *match_fp = NULL;
6384 int nr_matches = 0, i;
6385
6386
6387 while (*p != '\0' && *p != ',')
6388 p++;
6389
6390 if (*p == '\0')
6391 *cur = p;
6392 else
6393 *cur = p + 1;
6394
6395 *p = '\0';
6396
6397
6398 p = strchr(start, ':');
6399 if (!p) {
6400 val = strstrip(start);
6401 goto parse_val;
6402 }
6403 *p = '\0';
6404
6405 id = strstrip(start);
6406 val = strstrip(p + 1);
6407
6408
6409 p = strchr(id, '.');
6410 if (p) {
6411 *p++ = '\0';
6412 force_ent->device = simple_strtoul(p, &endp, 10);
6413 if (p == endp || *endp != '\0') {
6414 *reason = "invalid device";
6415 return -EINVAL;
6416 }
6417 }
6418
6419 force_ent->port = simple_strtoul(id, &endp, 10);
6420 if (p == endp || *endp != '\0') {
6421 *reason = "invalid port/link";
6422 return -EINVAL;
6423 }
6424
6425 parse_val:
6426
6427 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6428 const struct ata_force_param *fp = &force_tbl[i];
6429
6430 if (strncasecmp(val, fp->name, strlen(val)))
6431 continue;
6432
6433 nr_matches++;
6434 match_fp = fp;
6435
6436 if (strcasecmp(val, fp->name) == 0) {
6437 nr_matches = 1;
6438 break;
6439 }
6440 }
6441
6442 if (!nr_matches) {
6443 *reason = "unknown value";
6444 return -EINVAL;
6445 }
6446 if (nr_matches > 1) {
6447 *reason = "ambigious value";
6448 return -EINVAL;
6449 }
6450
6451 force_ent->param = *match_fp;
6452
6453 return 0;
6454}
6455
6456static void __init ata_parse_force_param(void)
6457{
6458 int idx = 0, size = 1;
6459 int last_port = -1, last_device = -1;
6460 char *p, *cur, *next;
6461
6462
6463 for (p = ata_force_param_buf; *p; p++)
6464 if (*p == ',')
6465 size++;
6466
6467 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6468 if (!ata_force_tbl) {
6469 printk(KERN_WARNING "ata: failed to extend force table, "
6470 "libata.force ignored\n");
6471 return;
6472 }
6473
6474
6475 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6476 const char *reason = "";
6477 struct ata_force_ent te = { .port = -1, .device = -1 };
6478
6479 next = cur;
6480 if (ata_parse_force_one(&next, &te, &reason)) {
6481 printk(KERN_WARNING "ata: failed to parse force "
6482 "parameter \"%s\" (%s)\n",
6483 cur, reason);
6484 continue;
6485 }
6486
6487 if (te.port == -1) {
6488 te.port = last_port;
6489 te.device = last_device;
6490 }
6491
6492 ata_force_tbl[idx++] = te;
6493
6494 last_port = te.port;
6495 last_device = te.device;
6496 }
6497
6498 ata_force_tbl_size = idx;
6499}
6500
6501static int __init ata_init(void)
6502{
6503 int rc;
6504
6505 ata_parse_force_param();
6506
6507 rc = ata_sff_init();
6508 if (rc) {
6509 kfree(ata_force_tbl);
6510 return rc;
6511 }
6512
6513 libata_transport_init();
6514 ata_scsi_transport_template = ata_attach_transport();
6515 if (!ata_scsi_transport_template) {
6516 ata_sff_exit();
6517 rc = -ENOMEM;
6518 goto err_out;
6519 }
6520
6521 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6522 return 0;
6523
6524err_out:
6525 return rc;
6526}
6527
6528static void __exit ata_exit(void)
6529{
6530 ata_release_transport(ata_scsi_transport_template);
6531 libata_transport_exit();
6532 ata_sff_exit();
6533 kfree(ata_force_tbl);
6534}
6535
6536subsys_initcall(ata_init);
6537module_exit(ata_exit);
6538
6539static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6540
6541int ata_ratelimit(void)
6542{
6543 return __ratelimit(&ratelimit);
6544}
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555
6556
6557
6558
6559
6560void ata_msleep(struct ata_port *ap, unsigned int msecs)
6561{
6562 bool owns_eh = ap && ap->host->eh_owner == current;
6563
6564 if (owns_eh)
6565 ata_eh_release(ap);
6566
6567 msleep(msecs);
6568
6569 if (owns_eh)
6570 ata_eh_acquire(ap);
6571}
6572
6573
6574
6575
6576
6577
6578
6579
6580
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6598 unsigned long interval, unsigned long timeout)
6599{
6600 unsigned long deadline;
6601 u32 tmp;
6602
6603 tmp = ioread32(reg);
6604
6605
6606
6607
6608
6609 deadline = ata_deadline(jiffies, timeout);
6610
6611 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6612 ata_msleep(ap, interval);
6613 tmp = ioread32(reg);
6614 }
6615
6616 return tmp;
6617}
6618
6619
6620
6621
6622static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6623{
6624 return AC_ERR_SYSTEM;
6625}
6626
6627static void ata_dummy_error_handler(struct ata_port *ap)
6628{
6629
6630}
6631
6632struct ata_port_operations ata_dummy_port_ops = {
6633 .qc_prep = ata_noop_qc_prep,
6634 .qc_issue = ata_dummy_qc_issue,
6635 .error_handler = ata_dummy_error_handler,
6636};
6637
6638const struct ata_port_info ata_dummy_port_info = {
6639 .port_ops = &ata_dummy_port_ops,
6640};
6641
6642
6643
6644
6645int ata_port_printk(const struct ata_port *ap, const char *level,
6646 const char *fmt, ...)
6647{
6648 struct va_format vaf;
6649 va_list args;
6650 int r;
6651
6652 va_start(args, fmt);
6653
6654 vaf.fmt = fmt;
6655 vaf.va = &args;
6656
6657 r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
6658
6659 va_end(args);
6660
6661 return r;
6662}
6663EXPORT_SYMBOL(ata_port_printk);
6664
6665int ata_link_printk(const struct ata_link *link, const char *level,
6666 const char *fmt, ...)
6667{
6668 struct va_format vaf;
6669 va_list args;
6670 int r;
6671
6672 va_start(args, fmt);
6673
6674 vaf.fmt = fmt;
6675 vaf.va = &args;
6676
6677 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6678 r = printk("%sata%u.%02u: %pV",
6679 level, link->ap->print_id, link->pmp, &vaf);
6680 else
6681 r = printk("%sata%u: %pV",
6682 level, link->ap->print_id, &vaf);
6683
6684 va_end(args);
6685
6686 return r;
6687}
6688EXPORT_SYMBOL(ata_link_printk);
6689
6690int ata_dev_printk(const struct ata_device *dev, const char *level,
6691 const char *fmt, ...)
6692{
6693 struct va_format vaf;
6694 va_list args;
6695 int r;
6696
6697 va_start(args, fmt);
6698
6699 vaf.fmt = fmt;
6700 vaf.va = &args;
6701
6702 r = printk("%sata%u.%02u: %pV",
6703 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6704 &vaf);
6705
6706 va_end(args);
6707
6708 return r;
6709}
6710EXPORT_SYMBOL(ata_dev_printk);
6711
6712void ata_print_version(const struct device *dev, const char *version)
6713{
6714 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6715}
6716EXPORT_SYMBOL(ata_print_version);
6717
6718
6719
6720
6721
6722
6723
6724EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6725EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6726EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6727EXPORT_SYMBOL_GPL(ata_base_port_ops);
6728EXPORT_SYMBOL_GPL(sata_port_ops);
6729EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6730EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6731EXPORT_SYMBOL_GPL(ata_link_next);
6732EXPORT_SYMBOL_GPL(ata_dev_next);
6733EXPORT_SYMBOL_GPL(ata_std_bios_param);
6734EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6735EXPORT_SYMBOL_GPL(ata_host_init);
6736EXPORT_SYMBOL_GPL(ata_host_alloc);
6737EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6738EXPORT_SYMBOL_GPL(ata_slave_link_init);
6739EXPORT_SYMBOL_GPL(ata_host_start);
6740EXPORT_SYMBOL_GPL(ata_host_register);
6741EXPORT_SYMBOL_GPL(ata_host_activate);
6742EXPORT_SYMBOL_GPL(ata_host_detach);
6743EXPORT_SYMBOL_GPL(ata_sg_init);
6744EXPORT_SYMBOL_GPL(ata_qc_complete);
6745EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6746EXPORT_SYMBOL_GPL(atapi_cmd_type);
6747EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6748EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6749EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6750EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6751EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6752EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6753EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6754EXPORT_SYMBOL_GPL(ata_mode_string);
6755EXPORT_SYMBOL_GPL(ata_id_xfermask);
6756EXPORT_SYMBOL_GPL(ata_do_set_mode);
6757EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6758EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6759EXPORT_SYMBOL_GPL(ata_dev_disable);
6760EXPORT_SYMBOL_GPL(sata_set_spd);
6761EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6762EXPORT_SYMBOL_GPL(sata_link_debounce);
6763EXPORT_SYMBOL_GPL(sata_link_resume);
6764EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6765EXPORT_SYMBOL_GPL(ata_std_prereset);
6766EXPORT_SYMBOL_GPL(sata_link_hardreset);
6767EXPORT_SYMBOL_GPL(sata_std_hardreset);
6768EXPORT_SYMBOL_GPL(ata_std_postreset);
6769EXPORT_SYMBOL_GPL(ata_dev_classify);
6770EXPORT_SYMBOL_GPL(ata_dev_pair);
6771EXPORT_SYMBOL_GPL(ata_ratelimit);
6772EXPORT_SYMBOL_GPL(ata_msleep);
6773EXPORT_SYMBOL_GPL(ata_wait_register);
6774EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6775EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6776EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6777EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6778EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
6779EXPORT_SYMBOL_GPL(sata_scr_valid);
6780EXPORT_SYMBOL_GPL(sata_scr_read);
6781EXPORT_SYMBOL_GPL(sata_scr_write);
6782EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6783EXPORT_SYMBOL_GPL(ata_link_online);
6784EXPORT_SYMBOL_GPL(ata_link_offline);
6785#ifdef CONFIG_PM
6786EXPORT_SYMBOL_GPL(ata_host_suspend);
6787EXPORT_SYMBOL_GPL(ata_host_resume);
6788#endif
6789EXPORT_SYMBOL_GPL(ata_id_string);
6790EXPORT_SYMBOL_GPL(ata_id_c_string);
6791EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6792EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6793
6794EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6795EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6796EXPORT_SYMBOL_GPL(ata_timing_compute);
6797EXPORT_SYMBOL_GPL(ata_timing_merge);
6798EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6799
6800#ifdef CONFIG_PCI
6801EXPORT_SYMBOL_GPL(pci_test_config_bits);
6802EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6803#ifdef CONFIG_PM
6804EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6805EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6806EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6807EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6808#endif
6809#endif
6810
6811EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6812EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6813EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6814EXPORT_SYMBOL_GPL(ata_port_desc);
6815#ifdef CONFIG_PCI
6816EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6817#endif
6818EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6819EXPORT_SYMBOL_GPL(ata_link_abort);
6820EXPORT_SYMBOL_GPL(ata_port_abort);
6821EXPORT_SYMBOL_GPL(ata_port_freeze);
6822EXPORT_SYMBOL_GPL(sata_async_notification);
6823EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6824EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6825EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6826EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6827EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6828EXPORT_SYMBOL_GPL(ata_do_eh);
6829EXPORT_SYMBOL_GPL(ata_std_error_handler);
6830
6831EXPORT_SYMBOL_GPL(ata_cable_40wire);
6832EXPORT_SYMBOL_GPL(ata_cable_80wire);
6833EXPORT_SYMBOL_GPL(ata_cable_unknown);
6834EXPORT_SYMBOL_GPL(ata_cable_ignore);
6835EXPORT_SYMBOL_GPL(ata_cable_sata);
6836