1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
57#include <linux/scatterlist.h>
58#include <linux/io.h>
59#include <linux/async.h>
60#include <linux/log2.h>
61#include <linux/slab.h>
62#include <scsi/scsi.h>
63#include <scsi/scsi_cmnd.h>
64#include <scsi/scsi_host.h>
65#include <linux/libata.h>
66#include <asm/byteorder.h>
67#include <linux/cdrom.h>
68#include <linux/ratelimit.h>
69#include <linux/pm_runtime.h>
70
71#include "libata.h"
72#include "libata-transport.h"
73
74
75const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
76const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
77const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
78
79const struct ata_port_operations ata_base_port_ops = {
80 .prereset = ata_std_prereset,
81 .postreset = ata_std_postreset,
82 .error_handler = ata_std_error_handler,
83};
84
85const struct ata_port_operations sata_port_ops = {
86 .inherits = &ata_base_port_ops,
87
88 .qc_defer = ata_std_qc_defer,
89 .hardreset = sata_std_hardreset,
90};
91
92static unsigned int ata_dev_init_params(struct ata_device *dev,
93 u16 heads, u16 sectors);
94static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
95static void ata_dev_xfermask(struct ata_device *dev);
96static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
97
98atomic_t ata_print_id = ATOMIC_INIT(0);
99
100struct ata_force_param {
101 const char *name;
102 unsigned int cbl;
103 int spd_limit;
104 unsigned long xfer_mask;
105 unsigned int horkage_on;
106 unsigned int horkage_off;
107 unsigned int lflags;
108};
109
110struct ata_force_ent {
111 int port;
112 int device;
113 struct ata_force_param param;
114};
115
116static struct ata_force_ent *ata_force_tbl;
117static int ata_force_tbl_size;
118
119static char ata_force_param_buf[PAGE_SIZE] __initdata;
120
121module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
122MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
123
124static int atapi_enabled = 1;
125module_param(atapi_enabled, int, 0444);
126MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
127
128static int atapi_dmadir = 0;
129module_param(atapi_dmadir, int, 0444);
130MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
131
132int atapi_passthru16 = 1;
133module_param(atapi_passthru16, int, 0444);
134MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
135
136int libata_fua = 0;
137module_param_named(fua, libata_fua, int, 0444);
138MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
139
140static int ata_ignore_hpa;
141module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
142MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
143
144static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
145module_param_named(dma, libata_dma_mask, int, 0444);
146MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
147
148static int ata_probe_timeout;
149module_param(ata_probe_timeout, int, 0444);
150MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
151
152int libata_noacpi = 0;
153module_param_named(noacpi, libata_noacpi, int, 0444);
154MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
155
156int libata_allow_tpm = 0;
157module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
158MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
159
160static int atapi_an;
161module_param(atapi_an, int, 0444);
162MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
163
164MODULE_AUTHOR("Jeff Garzik");
165MODULE_DESCRIPTION("Library module for ATA devices");
166MODULE_LICENSE("GPL");
167MODULE_VERSION(DRV_VERSION);
168
169
170static bool ata_sstatus_online(u32 sstatus)
171{
172 return (sstatus & 0xf) == 0x3;
173}
174
175
176
177
178
179
180
181
182
183
184
185
186
187struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
188 enum ata_link_iter_mode mode)
189{
190 BUG_ON(mode != ATA_LITER_EDGE &&
191 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
192
193
194 if (!link)
195 switch (mode) {
196 case ATA_LITER_EDGE:
197 case ATA_LITER_PMP_FIRST:
198 if (sata_pmp_attached(ap))
199 return ap->pmp_link;
200
201 case ATA_LITER_HOST_FIRST:
202 return &ap->link;
203 }
204
205
206 if (link == &ap->link)
207 switch (mode) {
208 case ATA_LITER_HOST_FIRST:
209 if (sata_pmp_attached(ap))
210 return ap->pmp_link;
211
212 case ATA_LITER_PMP_FIRST:
213 if (unlikely(ap->slave_link))
214 return ap->slave_link;
215
216 case ATA_LITER_EDGE:
217 return NULL;
218 }
219
220
221 if (unlikely(link == ap->slave_link))
222 return NULL;
223
224
225 if (++link < ap->pmp_link + ap->nr_pmp_links)
226 return link;
227
228 if (mode == ATA_LITER_PMP_FIRST)
229 return &ap->link;
230
231 return NULL;
232}
233
234
235
236
237
238
239
240
241
242
243
244
245
246struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
247 enum ata_dev_iter_mode mode)
248{
249 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
250 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
251
252
253 if (!dev)
254 switch (mode) {
255 case ATA_DITER_ENABLED:
256 case ATA_DITER_ALL:
257 dev = link->device;
258 goto check;
259 case ATA_DITER_ENABLED_REVERSE:
260 case ATA_DITER_ALL_REVERSE:
261 dev = link->device + ata_link_max_devices(link) - 1;
262 goto check;
263 }
264
265 next:
266
267 switch (mode) {
268 case ATA_DITER_ENABLED:
269 case ATA_DITER_ALL:
270 if (++dev < link->device + ata_link_max_devices(link))
271 goto check;
272 return NULL;
273 case ATA_DITER_ENABLED_REVERSE:
274 case ATA_DITER_ALL_REVERSE:
275 if (--dev >= link->device)
276 goto check;
277 return NULL;
278 }
279
280 check:
281 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
282 !ata_dev_enabled(dev))
283 goto next;
284 return dev;
285}
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301struct ata_link *ata_dev_phys_link(struct ata_device *dev)
302{
303 struct ata_port *ap = dev->link->ap;
304
305 if (!ap->slave_link)
306 return dev->link;
307 if (!dev->devno)
308 return &ap->link;
309 return ap->slave_link;
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325void ata_force_cbl(struct ata_port *ap)
326{
327 int i;
328
329 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
330 const struct ata_force_ent *fe = &ata_force_tbl[i];
331
332 if (fe->port != -1 && fe->port != ap->print_id)
333 continue;
334
335 if (fe->param.cbl == ATA_CBL_NONE)
336 continue;
337
338 ap->cbl = fe->param.cbl;
339 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
340 return;
341 }
342}
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360static void ata_force_link_limits(struct ata_link *link)
361{
362 bool did_spd = false;
363 int linkno = link->pmp;
364 int i;
365
366 if (ata_is_host_link(link))
367 linkno += 15;
368
369 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
370 const struct ata_force_ent *fe = &ata_force_tbl[i];
371
372 if (fe->port != -1 && fe->port != link->ap->print_id)
373 continue;
374
375 if (fe->device != -1 && fe->device != linkno)
376 continue;
377
378
379 if (!did_spd && fe->param.spd_limit) {
380 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
381 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
382 fe->param.name);
383 did_spd = true;
384 }
385
386
387 if (fe->param.lflags) {
388 link->flags |= fe->param.lflags;
389 ata_link_notice(link,
390 "FORCE: link flag 0x%x forced -> 0x%x\n",
391 fe->param.lflags, link->flags);
392 }
393 }
394}
395
396
397
398
399
400
401
402
403
404
405
406
407static void ata_force_xfermask(struct ata_device *dev)
408{
409 int devno = dev->link->pmp + dev->devno;
410 int alt_devno = devno;
411 int i;
412
413
414 if (ata_is_host_link(dev->link))
415 alt_devno += 15;
416
417 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
418 const struct ata_force_ent *fe = &ata_force_tbl[i];
419 unsigned long pio_mask, mwdma_mask, udma_mask;
420
421 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
422 continue;
423
424 if (fe->device != -1 && fe->device != devno &&
425 fe->device != alt_devno)
426 continue;
427
428 if (!fe->param.xfer_mask)
429 continue;
430
431 ata_unpack_xfermask(fe->param.xfer_mask,
432 &pio_mask, &mwdma_mask, &udma_mask);
433 if (udma_mask)
434 dev->udma_mask = udma_mask;
435 else if (mwdma_mask) {
436 dev->udma_mask = 0;
437 dev->mwdma_mask = mwdma_mask;
438 } else {
439 dev->udma_mask = 0;
440 dev->mwdma_mask = 0;
441 dev->pio_mask = pio_mask;
442 }
443
444 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
445 fe->param.name);
446 return;
447 }
448}
449
450
451
452
453
454
455
456
457
458
459
460
461static void ata_force_horkage(struct ata_device *dev)
462{
463 int devno = dev->link->pmp + dev->devno;
464 int alt_devno = devno;
465 int i;
466
467
468 if (ata_is_host_link(dev->link))
469 alt_devno += 15;
470
471 for (i = 0; i < ata_force_tbl_size; i++) {
472 const struct ata_force_ent *fe = &ata_force_tbl[i];
473
474 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
475 continue;
476
477 if (fe->device != -1 && fe->device != devno &&
478 fe->device != alt_devno)
479 continue;
480
481 if (!(~dev->horkage & fe->param.horkage_on) &&
482 !(dev->horkage & fe->param.horkage_off))
483 continue;
484
485 dev->horkage |= fe->param.horkage_on;
486 dev->horkage &= ~fe->param.horkage_off;
487
488 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
489 fe->param.name);
490 }
491}
492
493
494
495
496
497
498
499
500
501
502
503
504
505int atapi_cmd_type(u8 opcode)
506{
507 switch (opcode) {
508 case GPCMD_READ_10:
509 case GPCMD_READ_12:
510 return ATAPI_READ;
511
512 case GPCMD_WRITE_10:
513 case GPCMD_WRITE_12:
514 case GPCMD_WRITE_AND_VERIFY_10:
515 return ATAPI_WRITE;
516
517 case GPCMD_READ_CD:
518 case GPCMD_READ_CD_MSF:
519 return ATAPI_READ_CD;
520
521 case ATA_16:
522 case ATA_12:
523 if (atapi_passthru16)
524 return ATAPI_PASS_THRU;
525
526 default:
527 return ATAPI_MISC;
528 }
529}
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
545{
546 fis[0] = 0x27;
547 fis[1] = pmp & 0xf;
548 if (is_cmd)
549 fis[1] |= (1 << 7);
550
551 fis[2] = tf->command;
552 fis[3] = tf->feature;
553
554 fis[4] = tf->lbal;
555 fis[5] = tf->lbam;
556 fis[6] = tf->lbah;
557 fis[7] = tf->device;
558
559 fis[8] = tf->hob_lbal;
560 fis[9] = tf->hob_lbam;
561 fis[10] = tf->hob_lbah;
562 fis[11] = tf->hob_feature;
563
564 fis[12] = tf->nsect;
565 fis[13] = tf->hob_nsect;
566 fis[14] = 0;
567 fis[15] = tf->ctl;
568
569 fis[16] = 0;
570 fis[17] = 0;
571 fis[18] = 0;
572 fis[19] = 0;
573}
574
575
576
577
578
579
580
581
582
583
584
585
586void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
587{
588 tf->command = fis[2];
589 tf->feature = fis[3];
590
591 tf->lbal = fis[4];
592 tf->lbam = fis[5];
593 tf->lbah = fis[6];
594 tf->device = fis[7];
595
596 tf->hob_lbal = fis[8];
597 tf->hob_lbam = fis[9];
598 tf->hob_lbah = fis[10];
599
600 tf->nsect = fis[12];
601 tf->hob_nsect = fis[13];
602}
603
604static const u8 ata_rw_cmds[] = {
605
606 ATA_CMD_READ_MULTI,
607 ATA_CMD_WRITE_MULTI,
608 ATA_CMD_READ_MULTI_EXT,
609 ATA_CMD_WRITE_MULTI_EXT,
610 0,
611 0,
612 0,
613 ATA_CMD_WRITE_MULTI_FUA_EXT,
614
615 ATA_CMD_PIO_READ,
616 ATA_CMD_PIO_WRITE,
617 ATA_CMD_PIO_READ_EXT,
618 ATA_CMD_PIO_WRITE_EXT,
619 0,
620 0,
621 0,
622 0,
623
624 ATA_CMD_READ,
625 ATA_CMD_WRITE,
626 ATA_CMD_READ_EXT,
627 ATA_CMD_WRITE_EXT,
628 0,
629 0,
630 0,
631 ATA_CMD_WRITE_FUA_EXT
632};
633
634
635
636
637
638
639
640
641
642
643
644
645static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
646{
647 u8 cmd;
648
649 int index, fua, lba48, write;
650
651 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
652 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
653 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
654
655 if (dev->flags & ATA_DFLAG_PIO) {
656 tf->protocol = ATA_PROT_PIO;
657 index = dev->multi_count ? 0 : 8;
658 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
659
660 tf->protocol = ATA_PROT_PIO;
661 index = dev->multi_count ? 0 : 8;
662 } else {
663 tf->protocol = ATA_PROT_DMA;
664 index = 16;
665 }
666
667 cmd = ata_rw_cmds[index + fua + lba48 + write];
668 if (cmd) {
669 tf->command = cmd;
670 return 0;
671 }
672 return -1;
673}
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
691{
692 u64 block = 0;
693
694 if (tf->flags & ATA_TFLAG_LBA) {
695 if (tf->flags & ATA_TFLAG_LBA48) {
696 block |= (u64)tf->hob_lbah << 40;
697 block |= (u64)tf->hob_lbam << 32;
698 block |= (u64)tf->hob_lbal << 24;
699 } else
700 block |= (tf->device & 0xf) << 24;
701
702 block |= tf->lbah << 16;
703 block |= tf->lbam << 8;
704 block |= tf->lbal;
705 } else {
706 u32 cyl, head, sect;
707
708 cyl = tf->lbam | (tf->lbah << 8);
709 head = tf->device & 0xf;
710 sect = tf->lbal;
711
712 if (!sect) {
713 ata_dev_warn(dev,
714 "device reported invalid CHS sector 0\n");
715 sect = 1;
716 }
717
718 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
719 }
720
721 return block;
722}
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
745 u64 block, u32 n_block, unsigned int tf_flags,
746 unsigned int tag)
747{
748 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
749 tf->flags |= tf_flags;
750
751 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
752
753 if (!lba_48_ok(block, n_block))
754 return -ERANGE;
755
756 tf->protocol = ATA_PROT_NCQ;
757 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
758
759 if (tf->flags & ATA_TFLAG_WRITE)
760 tf->command = ATA_CMD_FPDMA_WRITE;
761 else
762 tf->command = ATA_CMD_FPDMA_READ;
763
764 tf->nsect = tag << 3;
765 tf->hob_feature = (n_block >> 8) & 0xff;
766 tf->feature = n_block & 0xff;
767
768 tf->hob_lbah = (block >> 40) & 0xff;
769 tf->hob_lbam = (block >> 32) & 0xff;
770 tf->hob_lbal = (block >> 24) & 0xff;
771 tf->lbah = (block >> 16) & 0xff;
772 tf->lbam = (block >> 8) & 0xff;
773 tf->lbal = block & 0xff;
774
775 tf->device = 1 << 6;
776 if (tf->flags & ATA_TFLAG_FUA)
777 tf->device |= 1 << 7;
778 } else if (dev->flags & ATA_DFLAG_LBA) {
779 tf->flags |= ATA_TFLAG_LBA;
780
781 if (lba_28_ok(block, n_block)) {
782
783 tf->device |= (block >> 24) & 0xf;
784 } else if (lba_48_ok(block, n_block)) {
785 if (!(dev->flags & ATA_DFLAG_LBA48))
786 return -ERANGE;
787
788
789 tf->flags |= ATA_TFLAG_LBA48;
790
791 tf->hob_nsect = (n_block >> 8) & 0xff;
792
793 tf->hob_lbah = (block >> 40) & 0xff;
794 tf->hob_lbam = (block >> 32) & 0xff;
795 tf->hob_lbal = (block >> 24) & 0xff;
796 } else
797
798 return -ERANGE;
799
800 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
801 return -EINVAL;
802
803 tf->nsect = n_block & 0xff;
804
805 tf->lbah = (block >> 16) & 0xff;
806 tf->lbam = (block >> 8) & 0xff;
807 tf->lbal = block & 0xff;
808
809 tf->device |= ATA_LBA;
810 } else {
811
812 u32 sect, head, cyl, track;
813
814
815 if (!lba_28_ok(block, n_block))
816 return -ERANGE;
817
818 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
819 return -EINVAL;
820
821
822 track = (u32)block / dev->sectors;
823 cyl = track / dev->heads;
824 head = track % dev->heads;
825 sect = (u32)block % dev->sectors + 1;
826
827 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
828 (u32)block, track, cyl, head, sect);
829
830
831
832
833
834 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
835 return -ERANGE;
836
837 tf->nsect = n_block & 0xff;
838 tf->lbal = sect;
839 tf->lbam = cyl;
840 tf->lbah = cyl >> 8;
841 tf->device |= head;
842 }
843
844 return 0;
845}
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862unsigned long ata_pack_xfermask(unsigned long pio_mask,
863 unsigned long mwdma_mask,
864 unsigned long udma_mask)
865{
866 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
867 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
868 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
869}
870
871
872
873
874
875
876
877
878
879
880
881void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
882 unsigned long *mwdma_mask, unsigned long *udma_mask)
883{
884 if (pio_mask)
885 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
886 if (mwdma_mask)
887 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
888 if (udma_mask)
889 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
890}
891
892static const struct ata_xfer_ent {
893 int shift, bits;
894 u8 base;
895} ata_xfer_tbl[] = {
896 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
897 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
898 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
899 { -1, },
900};
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915u8 ata_xfer_mask2mode(unsigned long xfer_mask)
916{
917 int highbit = fls(xfer_mask) - 1;
918 const struct ata_xfer_ent *ent;
919
920 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
921 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
922 return ent->base + highbit - ent->shift;
923 return 0xff;
924}
925
926
927
928
929
930
931
932
933
934
935
936
937
938unsigned long ata_xfer_mode2mask(u8 xfer_mode)
939{
940 const struct ata_xfer_ent *ent;
941
942 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
943 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
944 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
945 & ~((1 << ent->shift) - 1);
946 return 0;
947}
948
949
950
951
952
953
954
955
956
957
958
959
960
961int ata_xfer_mode2shift(unsigned long xfer_mode)
962{
963 const struct ata_xfer_ent *ent;
964
965 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
966 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
967 return ent->shift;
968 return -1;
969}
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985const char *ata_mode_string(unsigned long xfer_mask)
986{
987 static const char * const xfer_mode_str[] = {
988 "PIO0",
989 "PIO1",
990 "PIO2",
991 "PIO3",
992 "PIO4",
993 "PIO5",
994 "PIO6",
995 "MWDMA0",
996 "MWDMA1",
997 "MWDMA2",
998 "MWDMA3",
999 "MWDMA4",
1000 "UDMA/16",
1001 "UDMA/25",
1002 "UDMA/33",
1003 "UDMA/44",
1004 "UDMA/66",
1005 "UDMA/100",
1006 "UDMA/133",
1007 "UDMA7",
1008 };
1009 int highbit;
1010
1011 highbit = fls(xfer_mask) - 1;
1012 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1013 return xfer_mode_str[highbit];
1014 return "<n/a>";
1015}
1016
1017const char *sata_spd_string(unsigned int spd)
1018{
1019 static const char * const spd_str[] = {
1020 "1.5 Gbps",
1021 "3.0 Gbps",
1022 "6.0 Gbps",
1023 };
1024
1025 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1026 return "<unknown>";
1027 return spd_str[spd - 1];
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1046{
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1069 DPRINTK("found ATA device by sig\n");
1070 return ATA_DEV_ATA;
1071 }
1072
1073 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1074 DPRINTK("found ATAPI device by sig\n");
1075 return ATA_DEV_ATAPI;
1076 }
1077
1078 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1079 DPRINTK("found PMP device by sig\n");
1080 return ATA_DEV_PMP;
1081 }
1082
1083 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1084 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1085 return ATA_DEV_SEMB;
1086 }
1087
1088 DPRINTK("unknown device\n");
1089 return ATA_DEV_UNKNOWN;
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107void ata_id_string(const u16 *id, unsigned char *s,
1108 unsigned int ofs, unsigned int len)
1109{
1110 unsigned int c;
1111
1112 BUG_ON(len & 1);
1113
1114 while (len > 0) {
1115 c = id[ofs] >> 8;
1116 *s = c;
1117 s++;
1118
1119 c = id[ofs] & 0xff;
1120 *s = c;
1121 s++;
1122
1123 ofs++;
1124 len -= 2;
1125 }
1126}
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142void ata_id_c_string(const u16 *id, unsigned char *s,
1143 unsigned int ofs, unsigned int len)
1144{
1145 unsigned char *p;
1146
1147 ata_id_string(id, s, ofs, len - 1);
1148
1149 p = s + strnlen(s, len - 1);
1150 while (p > s && p[-1] == ' ')
1151 p--;
1152 *p = '\0';
1153}
1154
1155static u64 ata_id_n_sectors(const u16 *id)
1156{
1157 if (ata_id_has_lba(id)) {
1158 if (ata_id_has_lba48(id))
1159 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1160 else
1161 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1162 } else {
1163 if (ata_id_current_chs_valid(id))
1164 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1165 id[ATA_ID_CUR_SECTORS];
1166 else
1167 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1168 id[ATA_ID_SECTORS];
1169 }
1170}
1171
1172u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1173{
1174 u64 sectors = 0;
1175
1176 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1177 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1178 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1179 sectors |= (tf->lbah & 0xff) << 16;
1180 sectors |= (tf->lbam & 0xff) << 8;
1181 sectors |= (tf->lbal & 0xff);
1182
1183 return sectors;
1184}
1185
1186u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1187{
1188 u64 sectors = 0;
1189
1190 sectors |= (tf->device & 0x0f) << 24;
1191 sectors |= (tf->lbah & 0xff) << 16;
1192 sectors |= (tf->lbam & 0xff) << 8;
1193 sectors |= (tf->lbal & 0xff);
1194
1195 return sectors;
1196}
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1211{
1212 unsigned int err_mask;
1213 struct ata_taskfile tf;
1214 int lba48 = ata_id_has_lba48(dev->id);
1215
1216 ata_tf_init(dev, &tf);
1217
1218
1219 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1220
1221 if (lba48) {
1222 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1223 tf.flags |= ATA_TFLAG_LBA48;
1224 } else
1225 tf.command = ATA_CMD_READ_NATIVE_MAX;
1226
1227 tf.protocol |= ATA_PROT_NODATA;
1228 tf.device |= ATA_LBA;
1229
1230 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1231 if (err_mask) {
1232 ata_dev_warn(dev,
1233 "failed to read native max address (err_mask=0x%x)\n",
1234 err_mask);
1235 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1236 return -EACCES;
1237 return -EIO;
1238 }
1239
1240 if (lba48)
1241 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1242 else
1243 *max_sectors = ata_tf_to_lba(&tf) + 1;
1244 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1245 (*max_sectors)--;
1246 return 0;
1247}
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1262{
1263 unsigned int err_mask;
1264 struct ata_taskfile tf;
1265 int lba48 = ata_id_has_lba48(dev->id);
1266
1267 new_sectors--;
1268
1269 ata_tf_init(dev, &tf);
1270
1271 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1272
1273 if (lba48) {
1274 tf.command = ATA_CMD_SET_MAX_EXT;
1275 tf.flags |= ATA_TFLAG_LBA48;
1276
1277 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1278 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1279 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1280 } else {
1281 tf.command = ATA_CMD_SET_MAX;
1282
1283 tf.device |= (new_sectors >> 24) & 0xf;
1284 }
1285
1286 tf.protocol |= ATA_PROT_NODATA;
1287 tf.device |= ATA_LBA;
1288
1289 tf.lbal = (new_sectors >> 0) & 0xff;
1290 tf.lbam = (new_sectors >> 8) & 0xff;
1291 tf.lbah = (new_sectors >> 16) & 0xff;
1292
1293 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1294 if (err_mask) {
1295 ata_dev_warn(dev,
1296 "failed to set max address (err_mask=0x%x)\n",
1297 err_mask);
1298 if (err_mask == AC_ERR_DEV &&
1299 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1300 return -EACCES;
1301 return -EIO;
1302 }
1303
1304 return 0;
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318static int ata_hpa_resize(struct ata_device *dev)
1319{
1320 struct ata_eh_context *ehc = &dev->link->eh_context;
1321 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1322 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1323 u64 sectors = ata_id_n_sectors(dev->id);
1324 u64 native_sectors;
1325 int rc;
1326
1327
1328 if (dev->class != ATA_DEV_ATA ||
1329 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1330 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1331 return 0;
1332
1333
1334 rc = ata_read_native_max_address(dev, &native_sectors);
1335 if (rc) {
1336
1337
1338
1339 if (rc == -EACCES || !unlock_hpa) {
1340 ata_dev_warn(dev,
1341 "HPA support seems broken, skipping HPA handling\n");
1342 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1343
1344
1345 if (rc == -EACCES)
1346 rc = 0;
1347 }
1348
1349 return rc;
1350 }
1351 dev->n_native_sectors = native_sectors;
1352
1353
1354 if (native_sectors <= sectors || !unlock_hpa) {
1355 if (!print_info || native_sectors == sectors)
1356 return 0;
1357
1358 if (native_sectors > sectors)
1359 ata_dev_info(dev,
1360 "HPA detected: current %llu, native %llu\n",
1361 (unsigned long long)sectors,
1362 (unsigned long long)native_sectors);
1363 else if (native_sectors < sectors)
1364 ata_dev_warn(dev,
1365 "native sectors (%llu) is smaller than sectors (%llu)\n",
1366 (unsigned long long)native_sectors,
1367 (unsigned long long)sectors);
1368 return 0;
1369 }
1370
1371
1372 rc = ata_set_max_sectors(dev, native_sectors);
1373 if (rc == -EACCES) {
1374
1375 ata_dev_warn(dev,
1376 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1377 (unsigned long long)sectors,
1378 (unsigned long long)native_sectors);
1379 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1380 return 0;
1381 } else if (rc)
1382 return rc;
1383
1384
1385 rc = ata_dev_reread_id(dev, 0);
1386 if (rc) {
1387 ata_dev_err(dev,
1388 "failed to re-read IDENTIFY data after HPA resizing\n");
1389 return rc;
1390 }
1391
1392 if (print_info) {
1393 u64 new_sectors = ata_id_n_sectors(dev->id);
1394 ata_dev_info(dev,
1395 "HPA unlocked: %llu -> %llu, native %llu\n",
1396 (unsigned long long)sectors,
1397 (unsigned long long)new_sectors,
1398 (unsigned long long)native_sectors);
1399 }
1400
1401 return 0;
1402}
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415static inline void ata_dump_id(const u16 *id)
1416{
1417 DPRINTK("49==0x%04x "
1418 "53==0x%04x "
1419 "63==0x%04x "
1420 "64==0x%04x "
1421 "75==0x%04x \n",
1422 id[49],
1423 id[53],
1424 id[63],
1425 id[64],
1426 id[75]);
1427 DPRINTK("80==0x%04x "
1428 "81==0x%04x "
1429 "82==0x%04x "
1430 "83==0x%04x "
1431 "84==0x%04x \n",
1432 id[80],
1433 id[81],
1434 id[82],
1435 id[83],
1436 id[84]);
1437 DPRINTK("88==0x%04x "
1438 "93==0x%04x\n",
1439 id[88],
1440 id[93]);
1441}
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458unsigned long ata_id_xfermask(const u16 *id)
1459{
1460 unsigned long pio_mask, mwdma_mask, udma_mask;
1461
1462
1463 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1464 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1465 pio_mask <<= 3;
1466 pio_mask |= 0x7;
1467 } else {
1468
1469
1470
1471
1472 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1473 if (mode < 5)
1474 pio_mask = (2 << mode) - 1;
1475 else
1476 pio_mask = 1;
1477
1478
1479
1480
1481
1482
1483
1484 }
1485
1486 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1487
1488 if (ata_id_is_cfa(id)) {
1489
1490
1491
1492 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1493 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1494
1495 if (pio)
1496 pio_mask |= (1 << 5);
1497 if (pio > 1)
1498 pio_mask |= (1 << 6);
1499 if (dma)
1500 mwdma_mask |= (1 << 3);
1501 if (dma > 1)
1502 mwdma_mask |= (1 << 4);
1503 }
1504
1505 udma_mask = 0;
1506 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1507 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1508
1509 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1510}
1511
1512static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1513{
1514 struct completion *waiting = qc->private_data;
1515
1516 complete(waiting);
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541unsigned ata_exec_internal_sg(struct ata_device *dev,
1542 struct ata_taskfile *tf, const u8 *cdb,
1543 int dma_dir, struct scatterlist *sgl,
1544 unsigned int n_elem, unsigned long timeout)
1545{
1546 struct ata_link *link = dev->link;
1547 struct ata_port *ap = link->ap;
1548 u8 command = tf->command;
1549 int auto_timeout = 0;
1550 struct ata_queued_cmd *qc;
1551 unsigned int tag, preempted_tag;
1552 u32 preempted_sactive, preempted_qc_active;
1553 int preempted_nr_active_links;
1554 DECLARE_COMPLETION_ONSTACK(wait);
1555 unsigned long flags;
1556 unsigned int err_mask;
1557 int rc;
1558
1559 spin_lock_irqsave(ap->lock, flags);
1560
1561
1562 if (ap->pflags & ATA_PFLAG_FROZEN) {
1563 spin_unlock_irqrestore(ap->lock, flags);
1564 return AC_ERR_SYSTEM;
1565 }
1566
1567
1568
1569
1570
1571
1572
1573
1574 if (ap->ops->error_handler)
1575 tag = ATA_TAG_INTERNAL;
1576 else
1577 tag = 0;
1578
1579 if (test_and_set_bit(tag, &ap->qc_allocated))
1580 BUG();
1581 qc = __ata_qc_from_tag(ap, tag);
1582
1583 qc->tag = tag;
1584 qc->scsicmd = NULL;
1585 qc->ap = ap;
1586 qc->dev = dev;
1587 ata_qc_reinit(qc);
1588
1589 preempted_tag = link->active_tag;
1590 preempted_sactive = link->sactive;
1591 preempted_qc_active = ap->qc_active;
1592 preempted_nr_active_links = ap->nr_active_links;
1593 link->active_tag = ATA_TAG_POISON;
1594 link->sactive = 0;
1595 ap->qc_active = 0;
1596 ap->nr_active_links = 0;
1597
1598
1599 qc->tf = *tf;
1600 if (cdb)
1601 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1602 qc->flags |= ATA_QCFLAG_RESULT_TF;
1603 qc->dma_dir = dma_dir;
1604 if (dma_dir != DMA_NONE) {
1605 unsigned int i, buflen = 0;
1606 struct scatterlist *sg;
1607
1608 for_each_sg(sgl, sg, n_elem, i)
1609 buflen += sg->length;
1610
1611 ata_sg_init(qc, sgl, n_elem);
1612 qc->nbytes = buflen;
1613 }
1614
1615 qc->private_data = &wait;
1616 qc->complete_fn = ata_qc_complete_internal;
1617
1618 ata_qc_issue(qc);
1619
1620 spin_unlock_irqrestore(ap->lock, flags);
1621
1622 if (!timeout) {
1623 if (ata_probe_timeout)
1624 timeout = ata_probe_timeout * 1000;
1625 else {
1626 timeout = ata_internal_cmd_timeout(dev, command);
1627 auto_timeout = 1;
1628 }
1629 }
1630
1631 if (ap->ops->error_handler)
1632 ata_eh_release(ap);
1633
1634 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1635
1636 if (ap->ops->error_handler)
1637 ata_eh_acquire(ap);
1638
1639 ata_sff_flush_pio_task(ap);
1640
1641 if (!rc) {
1642 spin_lock_irqsave(ap->lock, flags);
1643
1644
1645
1646
1647
1648
1649 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1650 qc->err_mask |= AC_ERR_TIMEOUT;
1651
1652 if (ap->ops->error_handler)
1653 ata_port_freeze(ap);
1654 else
1655 ata_qc_complete(qc);
1656
1657 if (ata_msg_warn(ap))
1658 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1659 command);
1660 }
1661
1662 spin_unlock_irqrestore(ap->lock, flags);
1663 }
1664
1665
1666 if (ap->ops->post_internal_cmd)
1667 ap->ops->post_internal_cmd(qc);
1668
1669
1670 if (qc->flags & ATA_QCFLAG_FAILED) {
1671 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1672 qc->err_mask |= AC_ERR_DEV;
1673
1674 if (!qc->err_mask)
1675 qc->err_mask |= AC_ERR_OTHER;
1676
1677 if (qc->err_mask & ~AC_ERR_OTHER)
1678 qc->err_mask &= ~AC_ERR_OTHER;
1679 }
1680
1681
1682 spin_lock_irqsave(ap->lock, flags);
1683
1684 *tf = qc->result_tf;
1685 err_mask = qc->err_mask;
1686
1687 ata_qc_free(qc);
1688 link->active_tag = preempted_tag;
1689 link->sactive = preempted_sactive;
1690 ap->qc_active = preempted_qc_active;
1691 ap->nr_active_links = preempted_nr_active_links;
1692
1693 spin_unlock_irqrestore(ap->lock, flags);
1694
1695 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1696 ata_internal_cmd_timed_out(dev, command);
1697
1698 return err_mask;
1699}
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720unsigned ata_exec_internal(struct ata_device *dev,
1721 struct ata_taskfile *tf, const u8 *cdb,
1722 int dma_dir, void *buf, unsigned int buflen,
1723 unsigned long timeout)
1724{
1725 struct scatterlist *psg = NULL, sg;
1726 unsigned int n_elem = 0;
1727
1728 if (dma_dir != DMA_NONE) {
1729 WARN_ON(!buf);
1730 sg_init_one(&sg, buf, buflen);
1731 psg = &sg;
1732 n_elem++;
1733 }
1734
1735 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1736 timeout);
1737}
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1754{
1755 struct ata_taskfile tf;
1756
1757 ata_tf_init(dev, &tf);
1758
1759 tf.command = cmd;
1760 tf.flags |= ATA_TFLAG_DEVICE;
1761 tf.protocol = ATA_PROT_NODATA;
1762
1763 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1764}
1765
1766
1767
1768
1769
1770
1771
1772
1773unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1774{
1775
1776
1777
1778
1779 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1780 return 0;
1781
1782
1783
1784 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1785 return 0;
1786
1787 if (ata_id_is_cfa(adev->id)
1788 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1789 return 0;
1790
1791 if (adev->pio_mode > XFER_PIO_2)
1792 return 1;
1793
1794 if (ata_id_has_iordy(adev->id))
1795 return 1;
1796 return 0;
1797}
1798
1799
1800
1801
1802
1803
1804
1805
1806static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1807{
1808
1809 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
1810 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1811
1812 if (pio) {
1813
1814 if (pio > 240)
1815 return 3 << ATA_SHIFT_PIO;
1816 return 7 << ATA_SHIFT_PIO;
1817 }
1818 }
1819 return 3 << ATA_SHIFT_PIO;
1820}
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832unsigned int ata_do_dev_read_id(struct ata_device *dev,
1833 struct ata_taskfile *tf, u16 *id)
1834{
1835 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1836 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1837}
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1861 unsigned int flags, u16 *id)
1862{
1863 struct ata_port *ap = dev->link->ap;
1864 unsigned int class = *p_class;
1865 struct ata_taskfile tf;
1866 unsigned int err_mask = 0;
1867 const char *reason;
1868 bool is_semb = class == ATA_DEV_SEMB;
1869 int may_fallback = 1, tried_spinup = 0;
1870 int rc;
1871
1872 if (ata_msg_ctl(ap))
1873 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1874
1875retry:
1876 ata_tf_init(dev, &tf);
1877
1878 switch (class) {
1879 case ATA_DEV_SEMB:
1880 class = ATA_DEV_ATA;
1881 case ATA_DEV_ATA:
1882 tf.command = ATA_CMD_ID_ATA;
1883 break;
1884 case ATA_DEV_ATAPI:
1885 tf.command = ATA_CMD_ID_ATAPI;
1886 break;
1887 default:
1888 rc = -ENODEV;
1889 reason = "unsupported class";
1890 goto err_out;
1891 }
1892
1893 tf.protocol = ATA_PROT_PIO;
1894
1895
1896
1897
1898 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1899
1900
1901
1902
1903 tf.flags |= ATA_TFLAG_POLLING;
1904
1905 if (ap->ops->read_id)
1906 err_mask = ap->ops->read_id(dev, &tf, id);
1907 else
1908 err_mask = ata_do_dev_read_id(dev, &tf, id);
1909
1910 if (err_mask) {
1911 if (err_mask & AC_ERR_NODEV_HINT) {
1912 ata_dev_dbg(dev, "NODEV after polling detection\n");
1913 return -ENOENT;
1914 }
1915
1916 if (is_semb) {
1917 ata_dev_info(dev,
1918 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1919
1920 *p_class = ATA_DEV_SEMB_UNSUP;
1921 return 0;
1922 }
1923
1924 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1925
1926
1927
1928
1929
1930 if (may_fallback) {
1931 may_fallback = 0;
1932
1933 if (class == ATA_DEV_ATA)
1934 class = ATA_DEV_ATAPI;
1935 else
1936 class = ATA_DEV_ATA;
1937 goto retry;
1938 }
1939
1940
1941
1942
1943
1944 ata_dev_dbg(dev,
1945 "both IDENTIFYs aborted, assuming NODEV\n");
1946 return -ENOENT;
1947 }
1948
1949 rc = -EIO;
1950 reason = "I/O error";
1951 goto err_out;
1952 }
1953
1954 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1955 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1956 "class=%d may_fallback=%d tried_spinup=%d\n",
1957 class, may_fallback, tried_spinup);
1958 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1959 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1960 }
1961
1962
1963
1964
1965 may_fallback = 0;
1966
1967 swap_buf_le16(id, ATA_ID_WORDS);
1968
1969
1970 rc = -EINVAL;
1971 reason = "device reports invalid type";
1972
1973 if (class == ATA_DEV_ATA) {
1974 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1975 goto err_out;
1976 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1977 ata_id_is_ata(id)) {
1978 ata_dev_dbg(dev,
1979 "host indicates ignore ATA devices, ignored\n");
1980 return -ENOENT;
1981 }
1982 } else {
1983 if (ata_id_is_ata(id))
1984 goto err_out;
1985 }
1986
1987 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1988 tried_spinup = 1;
1989
1990
1991
1992
1993
1994 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1995 if (err_mask && id[2] != 0x738c) {
1996 rc = -EIO;
1997 reason = "SPINUP failed";
1998 goto err_out;
1999 }
2000
2001
2002
2003
2004 if (id[2] == 0x37c8)
2005 goto retry;
2006 }
2007
2008 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2021 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2022 if (err_mask) {
2023 rc = -EIO;
2024 reason = "INIT_DEV_PARAMS failed";
2025 goto err_out;
2026 }
2027
2028
2029
2030
2031 flags &= ~ATA_READID_POSTRESET;
2032 goto retry;
2033 }
2034 }
2035
2036 *p_class = class;
2037
2038 return 0;
2039
2040 err_out:
2041 if (ata_msg_warn(ap))
2042 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2043 reason, err_mask);
2044 return rc;
2045}
2046
2047static int ata_do_link_spd_horkage(struct ata_device *dev)
2048{
2049 struct ata_link *plink = ata_dev_phys_link(dev);
2050 u32 target, target_limit;
2051
2052 if (!sata_scr_valid(plink))
2053 return 0;
2054
2055 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2056 target = 1;
2057 else
2058 return 0;
2059
2060 target_limit = (1 << target) - 1;
2061
2062
2063 if (plink->sata_spd_limit <= target_limit)
2064 return 0;
2065
2066 plink->sata_spd_limit = target_limit;
2067
2068
2069
2070
2071
2072 if (plink->sata_spd > target) {
2073 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2074 sata_spd_string(target));
2075 return -EAGAIN;
2076 }
2077 return 0;
2078}
2079
2080static inline u8 ata_dev_knobble(struct ata_device *dev)
2081{
2082 struct ata_port *ap = dev->link->ap;
2083
2084 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2085 return 0;
2086
2087 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2088}
2089
2090static int ata_dev_config_ncq(struct ata_device *dev,
2091 char *desc, size_t desc_sz)
2092{
2093 struct ata_port *ap = dev->link->ap;
2094 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2095 unsigned int err_mask;
2096 char *aa_desc = "";
2097
2098 if (!ata_id_has_ncq(dev->id)) {
2099 desc[0] = '\0';
2100 return 0;
2101 }
2102 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2103 snprintf(desc, desc_sz, "NCQ (not used)");
2104 return 0;
2105 }
2106 if (ap->flags & ATA_FLAG_NCQ) {
2107 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2108 dev->flags |= ATA_DFLAG_NCQ;
2109 }
2110
2111 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2112 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2113 ata_id_has_fpdma_aa(dev->id)) {
2114 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2115 SATA_FPDMA_AA);
2116 if (err_mask) {
2117 ata_dev_err(dev,
2118 "failed to enable AA (error_mask=0x%x)\n",
2119 err_mask);
2120 if (err_mask != AC_ERR_DEV) {
2121 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2122 return -EIO;
2123 }
2124 } else
2125 aa_desc = ", AA";
2126 }
2127
2128 if (hdepth >= ddepth)
2129 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2130 else
2131 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2132 ddepth, aa_desc);
2133 return 0;
2134}
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149int ata_dev_configure(struct ata_device *dev)
2150{
2151 struct ata_port *ap = dev->link->ap;
2152 struct ata_eh_context *ehc = &dev->link->eh_context;
2153 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2154 const u16 *id = dev->id;
2155 unsigned long xfer_mask;
2156 char revbuf[7];
2157 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2158 char modelbuf[ATA_ID_PROD_LEN+1];
2159 int rc;
2160
2161 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2162 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2163 return 0;
2164 }
2165
2166 if (ata_msg_probe(ap))
2167 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2168
2169
2170 dev->horkage |= ata_dev_blacklisted(dev);
2171 ata_force_horkage(dev);
2172
2173 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2174 ata_dev_info(dev, "unsupported device, disabling\n");
2175 ata_dev_disable(dev);
2176 return 0;
2177 }
2178
2179 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2180 dev->class == ATA_DEV_ATAPI) {
2181 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2182 atapi_enabled ? "not supported with this driver"
2183 : "disabled");
2184 ata_dev_disable(dev);
2185 return 0;
2186 }
2187
2188 rc = ata_do_link_spd_horkage(dev);
2189 if (rc)
2190 return rc;
2191
2192
2193 rc = ata_acpi_on_devcfg(dev);
2194 if (rc)
2195 return rc;
2196
2197
2198 rc = ata_hpa_resize(dev);
2199 if (rc)
2200 return rc;
2201
2202
2203 if (ata_msg_probe(ap))
2204 ata_dev_dbg(dev,
2205 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2206 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2207 __func__,
2208 id[49], id[82], id[83], id[84],
2209 id[85], id[86], id[87], id[88]);
2210
2211
2212 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2213 dev->max_sectors = 0;
2214 dev->cdb_len = 0;
2215 dev->n_sectors = 0;
2216 dev->cylinders = 0;
2217 dev->heads = 0;
2218 dev->sectors = 0;
2219 dev->multi_count = 0;
2220
2221
2222
2223
2224
2225
2226 xfer_mask = ata_id_xfermask(id);
2227
2228 if (ata_msg_probe(ap))
2229 ata_dump_id(id);
2230
2231
2232 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2233 sizeof(fwrevbuf));
2234
2235 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2236 sizeof(modelbuf));
2237
2238
2239 if (dev->class == ATA_DEV_ATA) {
2240 if (ata_id_is_cfa(id)) {
2241
2242 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2243 ata_dev_warn(dev,
2244 "supports DRM functions and may not be fully accessible\n");
2245 snprintf(revbuf, 7, "CFA");
2246 } else {
2247 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2248
2249 if (ata_id_has_tpm(id))
2250 ata_dev_warn(dev,
2251 "supports DRM functions and may not be fully accessible\n");
2252 }
2253
2254 dev->n_sectors = ata_id_n_sectors(id);
2255
2256
2257 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2258 unsigned int max = dev->id[47] & 0xff;
2259 unsigned int cnt = dev->id[59] & 0xff;
2260
2261 if (is_power_of_2(max) && is_power_of_2(cnt))
2262 if (cnt <= max)
2263 dev->multi_count = cnt;
2264 }
2265
2266 if (ata_id_has_lba(id)) {
2267 const char *lba_desc;
2268 char ncq_desc[24];
2269
2270 lba_desc = "LBA";
2271 dev->flags |= ATA_DFLAG_LBA;
2272 if (ata_id_has_lba48(id)) {
2273 dev->flags |= ATA_DFLAG_LBA48;
2274 lba_desc = "LBA48";
2275
2276 if (dev->n_sectors >= (1UL << 28) &&
2277 ata_id_has_flush_ext(id))
2278 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2279 }
2280
2281
2282 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2283 if (rc)
2284 return rc;
2285
2286
2287 if (ata_msg_drv(ap) && print_info) {
2288 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2289 revbuf, modelbuf, fwrevbuf,
2290 ata_mode_string(xfer_mask));
2291 ata_dev_info(dev,
2292 "%llu sectors, multi %u: %s %s\n",
2293 (unsigned long long)dev->n_sectors,
2294 dev->multi_count, lba_desc, ncq_desc);
2295 }
2296 } else {
2297
2298
2299
2300 dev->cylinders = id[1];
2301 dev->heads = id[3];
2302 dev->sectors = id[6];
2303
2304 if (ata_id_current_chs_valid(id)) {
2305
2306 dev->cylinders = id[54];
2307 dev->heads = id[55];
2308 dev->sectors = id[56];
2309 }
2310
2311
2312 if (ata_msg_drv(ap) && print_info) {
2313 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2314 revbuf, modelbuf, fwrevbuf,
2315 ata_mode_string(xfer_mask));
2316 ata_dev_info(dev,
2317 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2318 (unsigned long long)dev->n_sectors,
2319 dev->multi_count, dev->cylinders,
2320 dev->heads, dev->sectors);
2321 }
2322 }
2323
2324 dev->cdb_len = 16;
2325 }
2326
2327
2328 else if (dev->class == ATA_DEV_ATAPI) {
2329 const char *cdb_intr_string = "";
2330 const char *atapi_an_string = "";
2331 const char *dma_dir_string = "";
2332 u32 sntf;
2333
2334 rc = atapi_cdb_len(id);
2335 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2336 if (ata_msg_warn(ap))
2337 ata_dev_warn(dev, "unsupported CDB len\n");
2338 rc = -EINVAL;
2339 goto err_out_nosup;
2340 }
2341 dev->cdb_len = (unsigned int) rc;
2342
2343
2344
2345
2346
2347
2348 if (atapi_an &&
2349 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2350 (!sata_pmp_attached(ap) ||
2351 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2352 unsigned int err_mask;
2353
2354
2355 err_mask = ata_dev_set_feature(dev,
2356 SETFEATURES_SATA_ENABLE, SATA_AN);
2357 if (err_mask)
2358 ata_dev_err(dev,
2359 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2360 err_mask);
2361 else {
2362 dev->flags |= ATA_DFLAG_AN;
2363 atapi_an_string = ", ATAPI AN";
2364 }
2365 }
2366
2367 if (ata_id_cdb_intr(dev->id)) {
2368 dev->flags |= ATA_DFLAG_CDB_INTR;
2369 cdb_intr_string = ", CDB intr";
2370 }
2371
2372 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2373 dev->flags |= ATA_DFLAG_DMADIR;
2374 dma_dir_string = ", DMADIR";
2375 }
2376
2377
2378 if (ata_msg_drv(ap) && print_info)
2379 ata_dev_info(dev,
2380 "ATAPI: %s, %s, max %s%s%s%s\n",
2381 modelbuf, fwrevbuf,
2382 ata_mode_string(xfer_mask),
2383 cdb_intr_string, atapi_an_string,
2384 dma_dir_string);
2385 }
2386
2387
2388 dev->max_sectors = ATA_MAX_SECTORS;
2389 if (dev->flags & ATA_DFLAG_LBA48)
2390 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2391
2392
2393
2394 if (ata_dev_knobble(dev)) {
2395 if (ata_msg_drv(ap) && print_info)
2396 ata_dev_info(dev, "applying bridge limits\n");
2397 dev->udma_mask &= ATA_UDMA5;
2398 dev->max_sectors = ATA_MAX_SECTORS;
2399 }
2400
2401 if ((dev->class == ATA_DEV_ATAPI) &&
2402 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2403 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2404 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2405 }
2406
2407 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2408 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2409 dev->max_sectors);
2410
2411 if (ap->ops->dev_config)
2412 ap->ops->dev_config(dev);
2413
2414 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2415
2416
2417
2418
2419
2420
2421 if (print_info) {
2422 ata_dev_warn(dev,
2423"Drive reports diagnostics failure. This may indicate a drive\n");
2424 ata_dev_warn(dev,
2425"fault or invalid emulation. Contact drive vendor for information.\n");
2426 }
2427 }
2428
2429 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2430 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2431 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2432 }
2433
2434 return 0;
2435
2436err_out_nosup:
2437 if (ata_msg_probe(ap))
2438 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2439 return rc;
2440}
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450int ata_cable_40wire(struct ata_port *ap)
2451{
2452 return ATA_CBL_PATA40;
2453}
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463int ata_cable_80wire(struct ata_port *ap)
2464{
2465 return ATA_CBL_PATA80;
2466}
2467
2468
2469
2470
2471
2472
2473
2474
2475int ata_cable_unknown(struct ata_port *ap)
2476{
2477 return ATA_CBL_PATA_UNK;
2478}
2479
2480
2481
2482
2483
2484
2485
2486
2487int ata_cable_ignore(struct ata_port *ap)
2488{
2489 return ATA_CBL_PATA_IGN;
2490}
2491
2492
2493
2494
2495
2496
2497
2498
2499int ata_cable_sata(struct ata_port *ap)
2500{
2501 return ATA_CBL_SATA;
2502}
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519int ata_bus_probe(struct ata_port *ap)
2520{
2521 unsigned int classes[ATA_MAX_DEVICES];
2522 int tries[ATA_MAX_DEVICES];
2523 int rc;
2524 struct ata_device *dev;
2525
2526 ata_for_each_dev(dev, &ap->link, ALL)
2527 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2528
2529 retry:
2530 ata_for_each_dev(dev, &ap->link, ALL) {
2531
2532
2533
2534
2535
2536
2537
2538 dev->pio_mode = XFER_PIO_0;
2539
2540
2541
2542
2543
2544
2545 if (ap->ops->set_piomode)
2546 ap->ops->set_piomode(ap, dev);
2547 }
2548
2549
2550 ap->ops->phy_reset(ap);
2551
2552 ata_for_each_dev(dev, &ap->link, ALL) {
2553 if (dev->class != ATA_DEV_UNKNOWN)
2554 classes[dev->devno] = dev->class;
2555 else
2556 classes[dev->devno] = ATA_DEV_NONE;
2557
2558 dev->class = ATA_DEV_UNKNOWN;
2559 }
2560
2561
2562
2563
2564
2565 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2566 if (tries[dev->devno])
2567 dev->class = classes[dev->devno];
2568
2569 if (!ata_dev_enabled(dev))
2570 continue;
2571
2572 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2573 dev->id);
2574 if (rc)
2575 goto fail;
2576 }
2577
2578
2579 if (ap->ops->cable_detect)
2580 ap->cbl = ap->ops->cable_detect(ap);
2581
2582
2583
2584
2585
2586
2587 ata_for_each_dev(dev, &ap->link, ENABLED)
2588 if (ata_id_is_sata(dev->id))
2589 ap->cbl = ATA_CBL_SATA;
2590
2591
2592
2593
2594 ata_for_each_dev(dev, &ap->link, ENABLED) {
2595 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2596 rc = ata_dev_configure(dev);
2597 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2598 if (rc)
2599 goto fail;
2600 }
2601
2602
2603 rc = ata_set_mode(&ap->link, &dev);
2604 if (rc)
2605 goto fail;
2606
2607 ata_for_each_dev(dev, &ap->link, ENABLED)
2608 return 0;
2609
2610 return -ENODEV;
2611
2612 fail:
2613 tries[dev->devno]--;
2614
2615 switch (rc) {
2616 case -EINVAL:
2617
2618 tries[dev->devno] = 0;
2619 break;
2620
2621 case -ENODEV:
2622
2623 tries[dev->devno] = min(tries[dev->devno], 1);
2624 case -EIO:
2625 if (tries[dev->devno] == 1) {
2626
2627
2628
2629 sata_down_spd_limit(&ap->link, 0);
2630 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2631 }
2632 }
2633
2634 if (!tries[dev->devno])
2635 ata_dev_disable(dev);
2636
2637 goto retry;
2638}
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649static void sata_print_link_status(struct ata_link *link)
2650{
2651 u32 sstatus, scontrol, tmp;
2652
2653 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2654 return;
2655 sata_scr_read(link, SCR_CONTROL, &scontrol);
2656
2657 if (ata_phys_link_online(link)) {
2658 tmp = (sstatus >> 4) & 0xf;
2659 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2660 sata_spd_string(tmp), sstatus, scontrol);
2661 } else {
2662 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2663 sstatus, scontrol);
2664 }
2665}
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675struct ata_device *ata_dev_pair(struct ata_device *adev)
2676{
2677 struct ata_link *link = adev->link;
2678 struct ata_device *pair = &link->device[1 - adev->devno];
2679 if (!ata_dev_enabled(pair))
2680 return NULL;
2681 return pair;
2682}
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2705{
2706 u32 sstatus, spd, mask;
2707 int rc, bit;
2708
2709 if (!sata_scr_valid(link))
2710 return -EOPNOTSUPP;
2711
2712
2713
2714
2715 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2716 if (rc == 0 && ata_sstatus_online(sstatus))
2717 spd = (sstatus >> 4) & 0xf;
2718 else
2719 spd = link->sata_spd;
2720
2721 mask = link->sata_spd_limit;
2722 if (mask <= 1)
2723 return -EINVAL;
2724
2725
2726 bit = fls(mask) - 1;
2727 mask &= ~(1 << bit);
2728
2729
2730
2731
2732 if (spd > 1)
2733 mask &= (1 << (spd - 1)) - 1;
2734 else
2735 mask &= 1;
2736
2737
2738 if (!mask)
2739 return -EINVAL;
2740
2741 if (spd_limit) {
2742 if (mask & ((1 << spd_limit) - 1))
2743 mask &= (1 << spd_limit) - 1;
2744 else {
2745 bit = ffs(mask) - 1;
2746 mask = 1 << bit;
2747 }
2748 }
2749
2750 link->sata_spd_limit = mask;
2751
2752 ata_link_warn(link, "limiting SATA link speed to %s\n",
2753 sata_spd_string(fls(mask)));
2754
2755 return 0;
2756}
2757
2758static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2759{
2760 struct ata_link *host_link = &link->ap->link;
2761 u32 limit, target, spd;
2762
2763 limit = link->sata_spd_limit;
2764
2765
2766
2767
2768
2769 if (!ata_is_host_link(link) && host_link->sata_spd)
2770 limit &= (1 << host_link->sata_spd) - 1;
2771
2772 if (limit == UINT_MAX)
2773 target = 0;
2774 else
2775 target = fls(limit);
2776
2777 spd = (*scontrol >> 4) & 0xf;
2778 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2779
2780 return spd != target;
2781}
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798static int sata_set_spd_needed(struct ata_link *link)
2799{
2800 u32 scontrol;
2801
2802 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2803 return 1;
2804
2805 return __sata_set_spd_needed(link, &scontrol);
2806}
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821int sata_set_spd(struct ata_link *link)
2822{
2823 u32 scontrol;
2824 int rc;
2825
2826 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2827 return rc;
2828
2829 if (!__sata_set_spd_needed(link, &scontrol))
2830 return 0;
2831
2832 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2833 return rc;
2834
2835 return 1;
2836}
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850static const struct ata_timing ata_timing[] = {
2851
2852 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2853 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2854 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2855 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2856 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2857 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2858 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2859
2860 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2861 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2862 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2863
2864 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2865 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2866 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2867 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2868 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2869
2870
2871 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2872 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2873 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2874 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2875 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2876 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2877 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
2878
2879 { 0xFF }
2880};
2881
2882#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2883#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2884
2885static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2886{
2887 q->setup = EZ(t->setup * 1000, T);
2888 q->act8b = EZ(t->act8b * 1000, T);
2889 q->rec8b = EZ(t->rec8b * 1000, T);
2890 q->cyc8b = EZ(t->cyc8b * 1000, T);
2891 q->active = EZ(t->active * 1000, T);
2892 q->recover = EZ(t->recover * 1000, T);
2893 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2894 q->cycle = EZ(t->cycle * 1000, T);
2895 q->udma = EZ(t->udma * 1000, UT);
2896}
2897
2898void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2899 struct ata_timing *m, unsigned int what)
2900{
2901 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2902 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2903 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2904 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2905 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2906 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2907 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2908 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2909 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2910}
2911
2912const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2913{
2914 const struct ata_timing *t = ata_timing;
2915
2916 while (xfer_mode > t->mode)
2917 t++;
2918
2919 if (xfer_mode == t->mode)
2920 return t;
2921 return NULL;
2922}
2923
2924int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2925 struct ata_timing *t, int T, int UT)
2926{
2927 const u16 *id = adev->id;
2928 const struct ata_timing *s;
2929 struct ata_timing p;
2930
2931
2932
2933
2934
2935 if (!(s = ata_timing_find_mode(speed)))
2936 return -EINVAL;
2937
2938 memcpy(t, s, sizeof(*s));
2939
2940
2941
2942
2943
2944
2945 if (id[ATA_ID_FIELD_VALID] & 2) {
2946 memset(&p, 0, sizeof(p));
2947
2948 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
2949 if (speed <= XFER_PIO_2)
2950 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
2951 else if ((speed <= XFER_PIO_4) ||
2952 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
2953 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
2954 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
2955 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
2956
2957 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2958 }
2959
2960
2961
2962
2963
2964 ata_timing_quantize(t, t, T, UT);
2965
2966
2967
2968
2969
2970
2971
2972 if (speed > XFER_PIO_6) {
2973 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2974 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2975 }
2976
2977
2978
2979
2980
2981 if (t->act8b + t->rec8b < t->cyc8b) {
2982 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2983 t->rec8b = t->cyc8b - t->act8b;
2984 }
2985
2986 if (t->active + t->recover < t->cycle) {
2987 t->active += (t->cycle - (t->active + t->recover)) / 2;
2988 t->recover = t->cycle - t->active;
2989 }
2990
2991
2992
2993
2994 if (t->active + t->recover > t->cycle)
2995 t->cycle = t->active + t->recover;
2996
2997 return 0;
2998}
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3017{
3018 u8 base_mode = 0xff, last_mode = 0xff;
3019 const struct ata_xfer_ent *ent;
3020 const struct ata_timing *t;
3021
3022 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3023 if (ent->shift == xfer_shift)
3024 base_mode = ent->base;
3025
3026 for (t = ata_timing_find_mode(base_mode);
3027 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3028 unsigned short this_cycle;
3029
3030 switch (xfer_shift) {
3031 case ATA_SHIFT_PIO:
3032 case ATA_SHIFT_MWDMA:
3033 this_cycle = t->cycle;
3034 break;
3035 case ATA_SHIFT_UDMA:
3036 this_cycle = t->udma;
3037 break;
3038 default:
3039 return 0xff;
3040 }
3041
3042 if (cycle > this_cycle)
3043 break;
3044
3045 last_mode = t->mode;
3046 }
3047
3048 return last_mode;
3049}
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3067{
3068 char buf[32];
3069 unsigned long orig_mask, xfer_mask;
3070 unsigned long pio_mask, mwdma_mask, udma_mask;
3071 int quiet, highbit;
3072
3073 quiet = !!(sel & ATA_DNXFER_QUIET);
3074 sel &= ~ATA_DNXFER_QUIET;
3075
3076 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3077 dev->mwdma_mask,
3078 dev->udma_mask);
3079 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3080
3081 switch (sel) {
3082 case ATA_DNXFER_PIO:
3083 highbit = fls(pio_mask) - 1;
3084 pio_mask &= ~(1 << highbit);
3085 break;
3086
3087 case ATA_DNXFER_DMA:
3088 if (udma_mask) {
3089 highbit = fls(udma_mask) - 1;
3090 udma_mask &= ~(1 << highbit);
3091 if (!udma_mask)
3092 return -ENOENT;
3093 } else if (mwdma_mask) {
3094 highbit = fls(mwdma_mask) - 1;
3095 mwdma_mask &= ~(1 << highbit);
3096 if (!mwdma_mask)
3097 return -ENOENT;
3098 }
3099 break;
3100
3101 case ATA_DNXFER_40C:
3102 udma_mask &= ATA_UDMA_MASK_40C;
3103 break;
3104
3105 case ATA_DNXFER_FORCE_PIO0:
3106 pio_mask &= 1;
3107 case ATA_DNXFER_FORCE_PIO:
3108 mwdma_mask = 0;
3109 udma_mask = 0;
3110 break;
3111
3112 default:
3113 BUG();
3114 }
3115
3116 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3117
3118 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3119 return -ENOENT;
3120
3121 if (!quiet) {
3122 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3123 snprintf(buf, sizeof(buf), "%s:%s",
3124 ata_mode_string(xfer_mask),
3125 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3126 else
3127 snprintf(buf, sizeof(buf), "%s",
3128 ata_mode_string(xfer_mask));
3129
3130 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3131 }
3132
3133 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3134 &dev->udma_mask);
3135
3136 return 0;
3137}
3138
3139static int ata_dev_set_mode(struct ata_device *dev)
3140{
3141 struct ata_port *ap = dev->link->ap;
3142 struct ata_eh_context *ehc = &dev->link->eh_context;
3143 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3144 const char *dev_err_whine = "";
3145 int ign_dev_err = 0;
3146 unsigned int err_mask = 0;
3147 int rc;
3148
3149 dev->flags &= ~ATA_DFLAG_PIO;
3150 if (dev->xfer_shift == ATA_SHIFT_PIO)
3151 dev->flags |= ATA_DFLAG_PIO;
3152
3153 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3154 dev_err_whine = " (SET_XFERMODE skipped)";
3155 else {
3156 if (nosetxfer)
3157 ata_dev_warn(dev,
3158 "NOSETXFER but PATA detected - can't "
3159 "skip SETXFER, might malfunction\n");
3160 err_mask = ata_dev_set_xfermode(dev);
3161 }
3162
3163 if (err_mask & ~AC_ERR_DEV)
3164 goto fail;
3165
3166
3167 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3168 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3169 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3170 if (rc)
3171 return rc;
3172
3173 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3174
3175 if (ata_id_is_cfa(dev->id))
3176 ign_dev_err = 1;
3177
3178
3179 if (ata_id_major_version(dev->id) == 0 &&
3180 dev->pio_mode <= XFER_PIO_2)
3181 ign_dev_err = 1;
3182
3183
3184
3185 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3186 ign_dev_err = 1;
3187 }
3188
3189
3190 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3191 dev->dma_mode == XFER_MW_DMA_0 &&
3192 (dev->id[63] >> 8) & 1)
3193 ign_dev_err = 1;
3194
3195
3196 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3197 ign_dev_err = 1;
3198
3199 if (err_mask & AC_ERR_DEV) {
3200 if (!ign_dev_err)
3201 goto fail;
3202 else
3203 dev_err_whine = " (device error ignored)";
3204 }
3205
3206 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3207 dev->xfer_shift, (int)dev->xfer_mode);
3208
3209 ata_dev_info(dev, "configured for %s%s\n",
3210 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3211 dev_err_whine);
3212
3213 return 0;
3214
3215 fail:
3216 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3217 return -EIO;
3218}
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3238{
3239 struct ata_port *ap = link->ap;
3240 struct ata_device *dev;
3241 int rc = 0, used_dma = 0, found = 0;
3242
3243
3244 ata_for_each_dev(dev, link, ENABLED) {
3245 unsigned long pio_mask, dma_mask;
3246 unsigned int mode_mask;
3247
3248 mode_mask = ATA_DMA_MASK_ATA;
3249 if (dev->class == ATA_DEV_ATAPI)
3250 mode_mask = ATA_DMA_MASK_ATAPI;
3251 else if (ata_id_is_cfa(dev->id))
3252 mode_mask = ATA_DMA_MASK_CFA;
3253
3254 ata_dev_xfermask(dev);
3255 ata_force_xfermask(dev);
3256
3257 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3258
3259 if (libata_dma_mask & mode_mask)
3260 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3261 dev->udma_mask);
3262 else
3263 dma_mask = 0;
3264
3265 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3266 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3267
3268 found = 1;
3269 if (ata_dma_enabled(dev))
3270 used_dma = 1;
3271 }
3272 if (!found)
3273 goto out;
3274
3275
3276 ata_for_each_dev(dev, link, ENABLED) {
3277 if (dev->pio_mode == 0xff) {
3278 ata_dev_warn(dev, "no PIO support\n");
3279 rc = -EINVAL;
3280 goto out;
3281 }
3282
3283 dev->xfer_mode = dev->pio_mode;
3284 dev->xfer_shift = ATA_SHIFT_PIO;
3285 if (ap->ops->set_piomode)
3286 ap->ops->set_piomode(ap, dev);
3287 }
3288
3289
3290 ata_for_each_dev(dev, link, ENABLED) {
3291 if (!ata_dma_enabled(dev))
3292 continue;
3293
3294 dev->xfer_mode = dev->dma_mode;
3295 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3296 if (ap->ops->set_dmamode)
3297 ap->ops->set_dmamode(ap, dev);
3298 }
3299
3300
3301 ata_for_each_dev(dev, link, ENABLED) {
3302 rc = ata_dev_set_mode(dev);
3303 if (rc)
3304 goto out;
3305 }
3306
3307
3308
3309
3310 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3311 ap->host->simplex_claimed = ap;
3312
3313 out:
3314 if (rc)
3315 *r_failed_dev = dev;
3316 return rc;
3317}
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3340 int (*check_ready)(struct ata_link *link))
3341{
3342 unsigned long start = jiffies;
3343 unsigned long nodev_deadline;
3344 int warned = 0;
3345
3346
3347 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3348 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3349 else
3350 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3351
3352
3353
3354
3355
3356 WARN_ON(link == link->ap->slave_link);
3357
3358 if (time_after(nodev_deadline, deadline))
3359 nodev_deadline = deadline;
3360
3361 while (1) {
3362 unsigned long now = jiffies;
3363 int ready, tmp;
3364
3365 ready = tmp = check_ready(link);
3366 if (ready > 0)
3367 return 0;
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380 if (ready == -ENODEV) {
3381 if (ata_link_online(link))
3382 ready = 0;
3383 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3384 !ata_link_offline(link) &&
3385 time_before(now, nodev_deadline))
3386 ready = 0;
3387 }
3388
3389 if (ready)
3390 return ready;
3391 if (time_after(now, deadline))
3392 return -EBUSY;
3393
3394 if (!warned && time_after(now, start + 5 * HZ) &&
3395 (deadline - now > 3 * HZ)) {
3396 ata_link_warn(link,
3397 "link is slow to respond, please be patient "
3398 "(ready=%d)\n", tmp);
3399 warned = 1;
3400 }
3401
3402 ata_msleep(link->ap, 50);
3403 }
3404}
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3421 int (*check_ready)(struct ata_link *link))
3422{
3423 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3424
3425 return ata_wait_ready(link, deadline, check_ready);
3426}
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3451 unsigned long deadline)
3452{
3453 unsigned long interval = params[0];
3454 unsigned long duration = params[1];
3455 unsigned long last_jiffies, t;
3456 u32 last, cur;
3457 int rc;
3458
3459 t = ata_deadline(jiffies, params[2]);
3460 if (time_before(t, deadline))
3461 deadline = t;
3462
3463 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3464 return rc;
3465 cur &= 0xf;
3466
3467 last = cur;
3468 last_jiffies = jiffies;
3469
3470 while (1) {
3471 ata_msleep(link->ap, interval);
3472 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3473 return rc;
3474 cur &= 0xf;
3475
3476
3477 if (cur == last) {
3478 if (cur == 1 && time_before(jiffies, deadline))
3479 continue;
3480 if (time_after(jiffies,
3481 ata_deadline(last_jiffies, duration)))
3482 return 0;
3483 continue;
3484 }
3485
3486
3487 last = cur;
3488 last_jiffies = jiffies;
3489
3490
3491
3492
3493 if (time_after(jiffies, deadline))
3494 return -EPIPE;
3495 }
3496}
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512int sata_link_resume(struct ata_link *link, const unsigned long *params,
3513 unsigned long deadline)
3514{
3515 int tries = ATA_LINK_RESUME_TRIES;
3516 u32 scontrol, serror;
3517 int rc;
3518
3519 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3520 return rc;
3521
3522
3523
3524
3525
3526
3527 do {
3528 scontrol = (scontrol & 0x0f0) | 0x300;
3529 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3530 return rc;
3531
3532
3533
3534
3535
3536 ata_msleep(link->ap, 200);
3537
3538
3539 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3540 return rc;
3541 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3542
3543 if ((scontrol & 0xf0f) != 0x300) {
3544 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3545 scontrol);
3546 return 0;
3547 }
3548
3549 if (tries < ATA_LINK_RESUME_TRIES)
3550 ata_link_warn(link, "link resume succeeded after %d retries\n",
3551 ATA_LINK_RESUME_TRIES - tries);
3552
3553 if ((rc = sata_link_debounce(link, params, deadline)))
3554 return rc;
3555
3556
3557 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3558 rc = sata_scr_write(link, SCR_ERROR, serror);
3559
3560 return rc != -EINVAL ? rc : 0;
3561}
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3582 bool spm_wakeup)
3583{
3584 struct ata_eh_context *ehc = &link->eh_context;
3585 bool woken_up = false;
3586 u32 scontrol;
3587 int rc;
3588
3589 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3590 if (rc)
3591 return rc;
3592
3593 switch (policy) {
3594 case ATA_LPM_MAX_POWER:
3595
3596 scontrol |= (0x3 << 8);
3597
3598 if (spm_wakeup) {
3599 scontrol |= (0x4 << 12);
3600 woken_up = true;
3601 }
3602 break;
3603 case ATA_LPM_MED_POWER:
3604
3605 scontrol &= ~(0x1 << 8);
3606 scontrol |= (0x2 << 8);
3607 break;
3608 case ATA_LPM_MIN_POWER:
3609 if (ata_link_nr_enabled(link) > 0)
3610
3611 scontrol &= ~(0x3 << 8);
3612 else {
3613
3614 scontrol &= ~0xf;
3615 scontrol |= (0x1 << 2);
3616 }
3617 break;
3618 default:
3619 WARN_ON(1);
3620 }
3621
3622 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3623 if (rc)
3624 return rc;
3625
3626
3627 if (woken_up)
3628 msleep(10);
3629
3630
3631 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3632 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3633}
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3653{
3654 struct ata_port *ap = link->ap;
3655 struct ata_eh_context *ehc = &link->eh_context;
3656 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3657 int rc;
3658
3659
3660 if (ehc->i.action & ATA_EH_HARDRESET)
3661 return 0;
3662
3663
3664 if (ap->flags & ATA_FLAG_SATA) {
3665 rc = sata_link_resume(link, timing, deadline);
3666
3667 if (rc && rc != -EOPNOTSUPP)
3668 ata_link_warn(link,
3669 "failed to resume link for reset (errno=%d)\n",
3670 rc);
3671 }
3672
3673
3674 if (ata_phys_link_offline(link))
3675 ehc->i.action &= ~ATA_EH_SOFTRESET;
3676
3677 return 0;
3678}
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3705 unsigned long deadline,
3706 bool *online, int (*check_ready)(struct ata_link *))
3707{
3708 u32 scontrol;
3709 int rc;
3710
3711 DPRINTK("ENTER\n");
3712
3713 if (online)
3714 *online = false;
3715
3716 if (sata_set_spd_needed(link)) {
3717
3718
3719
3720
3721
3722 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3723 goto out;
3724
3725 scontrol = (scontrol & 0x0f0) | 0x304;
3726
3727 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3728 goto out;
3729
3730 sata_set_spd(link);
3731 }
3732
3733
3734 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3735 goto out;
3736
3737 scontrol = (scontrol & 0x0f0) | 0x301;
3738
3739 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3740 goto out;
3741
3742
3743
3744
3745 ata_msleep(link->ap, 1);
3746
3747
3748 rc = sata_link_resume(link, timing, deadline);
3749 if (rc)
3750 goto out;
3751
3752 if (ata_phys_link_offline(link))
3753 goto out;
3754
3755
3756 if (online)
3757 *online = true;
3758
3759 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3760
3761
3762
3763
3764
3765 if (check_ready) {
3766 unsigned long pmp_deadline;
3767
3768 pmp_deadline = ata_deadline(jiffies,
3769 ATA_TMOUT_PMP_SRST_WAIT);
3770 if (time_after(pmp_deadline, deadline))
3771 pmp_deadline = deadline;
3772 ata_wait_ready(link, pmp_deadline, check_ready);
3773 }
3774 rc = -EAGAIN;
3775 goto out;
3776 }
3777
3778 rc = 0;
3779 if (check_ready)
3780 rc = ata_wait_ready(link, deadline, check_ready);
3781 out:
3782 if (rc && rc != -EAGAIN) {
3783
3784 if (online)
3785 *online = false;
3786 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3787 }
3788 DPRINTK("EXIT, rc=%d\n", rc);
3789 return rc;
3790}
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3807 unsigned long deadline)
3808{
3809 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3810 bool online;
3811 int rc;
3812
3813
3814 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3815 return online ? -EAGAIN : rc;
3816}
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3831{
3832 u32 serror;
3833
3834 DPRINTK("ENTER\n");
3835
3836
3837 if (!sata_scr_read(link, SCR_ERROR, &serror))
3838 sata_scr_write(link, SCR_ERROR, serror);
3839
3840
3841 sata_print_link_status(link);
3842
3843 DPRINTK("EXIT\n");
3844}
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3863 const u16 *new_id)
3864{
3865 const u16 *old_id = dev->id;
3866 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3867 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3868
3869 if (dev->class != new_class) {
3870 ata_dev_info(dev, "class mismatch %d != %d\n",
3871 dev->class, new_class);
3872 return 0;
3873 }
3874
3875 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3876 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3877 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3878 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3879
3880 if (strcmp(model[0], model[1])) {
3881 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3882 model[0], model[1]);
3883 return 0;
3884 }
3885
3886 if (strcmp(serial[0], serial[1])) {
3887 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3888 serial[0], serial[1]);
3889 return 0;
3890 }
3891
3892 return 1;
3893}
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3910{
3911 unsigned int class = dev->class;
3912 u16 *id = (void *)dev->link->ap->sector_buf;
3913 int rc;
3914
3915
3916 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3917 if (rc)
3918 return rc;
3919
3920
3921 if (!ata_dev_same_device(dev, class, id))
3922 return -ENODEV;
3923
3924 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3925 return 0;
3926}
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3944 unsigned int readid_flags)
3945{
3946 u64 n_sectors = dev->n_sectors;
3947 u64 n_native_sectors = dev->n_native_sectors;
3948 int rc;
3949
3950 if (!ata_dev_enabled(dev))
3951 return -ENODEV;
3952
3953
3954 if (ata_class_enabled(new_class) &&
3955 new_class != ATA_DEV_ATA &&
3956 new_class != ATA_DEV_ATAPI &&
3957 new_class != ATA_DEV_SEMB) {
3958 ata_dev_info(dev, "class mismatch %u != %u\n",
3959 dev->class, new_class);
3960 rc = -ENODEV;
3961 goto fail;
3962 }
3963
3964
3965 rc = ata_dev_reread_id(dev, readid_flags);
3966 if (rc)
3967 goto fail;
3968
3969
3970 rc = ata_dev_configure(dev);
3971 if (rc)
3972 goto fail;
3973
3974
3975 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3976 dev->n_sectors == n_sectors)
3977 return 0;
3978
3979
3980 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3981 (unsigned long long)n_sectors,
3982 (unsigned long long)dev->n_sectors);
3983
3984
3985
3986
3987
3988
3989 if (dev->n_native_sectors == n_native_sectors &&
3990 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3991 ata_dev_warn(dev,
3992 "new n_sectors matches native, probably "
3993 "late HPA unlock, n_sectors updated\n");
3994
3995 return 0;
3996 }
3997
3998
3999
4000
4001
4002
4003
4004 if (dev->n_native_sectors == n_native_sectors &&
4005 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4006 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4007 ata_dev_warn(dev,
4008 "old n_sectors matches native, probably "
4009 "late HPA lock, will try to unlock HPA\n");
4010
4011 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4012 rc = -EIO;
4013 } else
4014 rc = -ENODEV;
4015
4016
4017 dev->n_native_sectors = n_native_sectors;
4018 dev->n_sectors = n_sectors;
4019 fail:
4020 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4021 return rc;
4022}
4023
4024struct ata_blacklist_entry {
4025 const char *model_num;
4026 const char *model_rev;
4027 unsigned long horkage;
4028};
4029
4030static const struct ata_blacklist_entry ata_device_blacklist [] = {
4031
4032 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4033 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4034 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4035 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4036 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4037 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4038 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4039 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4040 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4041 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4042 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4043 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4044 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4045 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4046 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4047 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4048 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4049 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4050 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4051 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4052 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4053 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4054 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4055 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4056 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4057 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4058 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4059 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4060 { "2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4061
4062 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4063
4064
4065 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4066 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4067
4068
4069
4070
4071
4072 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4073 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4074
4075 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4076
4077 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4078 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4079 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4080 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4081 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4082
4083
4084 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4085 ATA_HORKAGE_FIRMWARE_WARN },
4086
4087 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4088 ATA_HORKAGE_FIRMWARE_WARN },
4089
4090 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4091 ATA_HORKAGE_FIRMWARE_WARN },
4092
4093 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4094 ATA_HORKAGE_FIRMWARE_WARN },
4095
4096
4097
4098 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4099 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4100 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4101
4102
4103 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4104
4105
4106 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4107 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4108 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4109 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4110
4111
4112 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4113
4114
4115 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4116 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4117 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4118
4119
4120 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4121
4122 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4123
4124
4125 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4126
4127
4128 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4129
4130
4131
4132
4133
4134 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4135 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4136 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4137 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4138 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4139
4140
4141 { }
4142};
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171static int glob_match (const char *text, const char *pattern)
4172{
4173 do {
4174
4175 if (*text == *pattern || *pattern == '?') {
4176 if (!*pattern++)
4177 return 0;
4178 } else {
4179
4180 if (!*text || *pattern != '[')
4181 break;
4182 while (*++pattern && *pattern != ']' && *text != *pattern) {
4183 if (*pattern == '-' && *(pattern - 1) != '[')
4184 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4185 ++pattern;
4186 break;
4187 }
4188 }
4189 if (!*pattern || *pattern == ']')
4190 return 1;
4191 while (*pattern && *pattern++ != ']');
4192 }
4193 } while (*++text && *pattern);
4194
4195
4196 if (*pattern == '*') {
4197 if (!*++pattern)
4198 return 0;
4199
4200 while (*text) {
4201 if (glob_match(text, pattern) == 0)
4202 return 0;
4203 ++text;
4204 }
4205 }
4206 if (!*text && !*pattern)
4207 return 0;
4208 return 1;
4209}
4210
4211static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4212{
4213 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4214 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4215 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4216
4217 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4218 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4219
4220 while (ad->model_num) {
4221 if (!glob_match(model_num, ad->model_num)) {
4222 if (ad->model_rev == NULL)
4223 return ad->horkage;
4224 if (!glob_match(model_rev, ad->model_rev))
4225 return ad->horkage;
4226 }
4227 ad++;
4228 }
4229 return 0;
4230}
4231
4232static int ata_dma_blacklisted(const struct ata_device *dev)
4233{
4234
4235
4236
4237
4238 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4239 (dev->flags & ATA_DFLAG_CDB_INTR))
4240 return 1;
4241 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4242}
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252static int ata_is_40wire(struct ata_device *dev)
4253{
4254 if (dev->horkage & ATA_HORKAGE_IVB)
4255 return ata_drive_40wire_relaxed(dev->id);
4256 return ata_drive_40wire(dev->id);
4257}
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272static int cable_is_40wire(struct ata_port *ap)
4273{
4274 struct ata_link *link;
4275 struct ata_device *dev;
4276
4277
4278 if (ap->cbl == ATA_CBL_PATA40)
4279 return 1;
4280
4281
4282 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4283 return 0;
4284
4285
4286
4287
4288
4289 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4290 return 0;
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301 ata_for_each_link(link, ap, EDGE) {
4302 ata_for_each_dev(dev, link, ENABLED) {
4303 if (!ata_is_40wire(dev))
4304 return 0;
4305 }
4306 }
4307 return 1;
4308}
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322static void ata_dev_xfermask(struct ata_device *dev)
4323{
4324 struct ata_link *link = dev->link;
4325 struct ata_port *ap = link->ap;
4326 struct ata_host *host = ap->host;
4327 unsigned long xfer_mask;
4328
4329
4330 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4331 ap->mwdma_mask, ap->udma_mask);
4332
4333
4334 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4335 dev->mwdma_mask, dev->udma_mask);
4336 xfer_mask &= ata_id_xfermask(dev->id);
4337
4338
4339
4340
4341
4342 if (ata_dev_pair(dev)) {
4343
4344 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4345
4346 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4347 }
4348
4349 if (ata_dma_blacklisted(dev)) {
4350 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4351 ata_dev_warn(dev,
4352 "device is on DMA blacklist, disabling DMA\n");
4353 }
4354
4355 if ((host->flags & ATA_HOST_SIMPLEX) &&
4356 host->simplex_claimed && host->simplex_claimed != ap) {
4357 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4358 ata_dev_warn(dev,
4359 "simplex DMA is claimed by other device, disabling DMA\n");
4360 }
4361
4362 if (ap->flags & ATA_FLAG_NO_IORDY)
4363 xfer_mask &= ata_pio_mask_no_iordy(dev);
4364
4365 if (ap->ops->mode_filter)
4366 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4377
4378 if (cable_is_40wire(ap)) {
4379 ata_dev_warn(dev,
4380 "limited to UDMA/33 due to 40-wire cable\n");
4381 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4382 }
4383
4384 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4385 &dev->mwdma_mask, &dev->udma_mask);
4386}
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4403{
4404 struct ata_taskfile tf;
4405 unsigned int err_mask;
4406
4407
4408 DPRINTK("set features - xfer mode\n");
4409
4410
4411
4412
4413 ata_tf_init(dev, &tf);
4414 tf.command = ATA_CMD_SET_FEATURES;
4415 tf.feature = SETFEATURES_XFER;
4416 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4417 tf.protocol = ATA_PROT_NODATA;
4418
4419 if (ata_pio_need_iordy(dev))
4420 tf.nsect = dev->xfer_mode;
4421
4422 else if (ata_id_has_iordy(dev->id))
4423 tf.nsect = 0x01;
4424 else
4425 return 0;
4426
4427 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4428
4429 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4430 return err_mask;
4431}
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4449{
4450 struct ata_taskfile tf;
4451 unsigned int err_mask;
4452
4453
4454 DPRINTK("set features - SATA features\n");
4455
4456 ata_tf_init(dev, &tf);
4457 tf.command = ATA_CMD_SET_FEATURES;
4458 tf.feature = enable;
4459 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4460 tf.protocol = ATA_PROT_NODATA;
4461 tf.nsect = feature;
4462
4463 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4464
4465 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4466 return err_mask;
4467}
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481static unsigned int ata_dev_init_params(struct ata_device *dev,
4482 u16 heads, u16 sectors)
4483{
4484 struct ata_taskfile tf;
4485 unsigned int err_mask;
4486
4487
4488 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4489 return AC_ERR_INVALID;
4490
4491
4492 DPRINTK("init dev params \n");
4493
4494 ata_tf_init(dev, &tf);
4495 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4496 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4497 tf.protocol = ATA_PROT_NODATA;
4498 tf.nsect = sectors;
4499 tf.device |= (heads - 1) & 0x0f;
4500
4501 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4502
4503
4504
4505 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4506 err_mask = 0;
4507
4508 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4509 return err_mask;
4510}
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521void ata_sg_clean(struct ata_queued_cmd *qc)
4522{
4523 struct ata_port *ap = qc->ap;
4524 struct scatterlist *sg = qc->sg;
4525 int dir = qc->dma_dir;
4526
4527 WARN_ON_ONCE(sg == NULL);
4528
4529 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4530
4531 if (qc->n_elem)
4532 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4533
4534 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4535 qc->sg = NULL;
4536}
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552int atapi_check_dma(struct ata_queued_cmd *qc)
4553{
4554 struct ata_port *ap = qc->ap;
4555
4556
4557
4558
4559 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4560 unlikely(qc->nbytes & 15))
4561 return 1;
4562
4563 if (ap->ops->check_atapi_dma)
4564 return ap->ops->check_atapi_dma(qc);
4565
4566 return 0;
4567}
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584int ata_std_qc_defer(struct ata_queued_cmd *qc)
4585{
4586 struct ata_link *link = qc->dev->link;
4587
4588 if (qc->tf.protocol == ATA_PROT_NCQ) {
4589 if (!ata_tag_valid(link->active_tag))
4590 return 0;
4591 } else {
4592 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4593 return 0;
4594 }
4595
4596 return ATA_DEFER_LINK;
4597}
4598
4599void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4615 unsigned int n_elem)
4616{
4617 qc->sg = sg;
4618 qc->n_elem = n_elem;
4619 qc->cursg = qc->sg;
4620}
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635static int ata_sg_setup(struct ata_queued_cmd *qc)
4636{
4637 struct ata_port *ap = qc->ap;
4638 unsigned int n_elem;
4639
4640 VPRINTK("ENTER, ata%u\n", ap->print_id);
4641
4642 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4643 if (n_elem < 1)
4644 return -1;
4645
4646 DPRINTK("%d sg elements mapped\n", n_elem);
4647 qc->orig_n_elem = qc->n_elem;
4648 qc->n_elem = n_elem;
4649 qc->flags |= ATA_QCFLAG_DMAMAP;
4650
4651 return 0;
4652}
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666void swap_buf_le16(u16 *buf, unsigned int buf_words)
4667{
4668#ifdef __BIG_ENDIAN
4669 unsigned int i;
4670
4671 for (i = 0; i < buf_words; i++)
4672 buf[i] = le16_to_cpu(buf[i]);
4673#endif
4674}
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4685{
4686 struct ata_queued_cmd *qc = NULL;
4687 unsigned int i;
4688
4689
4690 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4691 return NULL;
4692
4693
4694 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4695 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4696 qc = __ata_qc_from_tag(ap, i);
4697 break;
4698 }
4699
4700 if (qc)
4701 qc->tag = i;
4702
4703 return qc;
4704}
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4715{
4716 struct ata_port *ap = dev->link->ap;
4717 struct ata_queued_cmd *qc;
4718
4719 qc = ata_qc_new(ap);
4720 if (qc) {
4721 qc->scsicmd = NULL;
4722 qc->ap = ap;
4723 qc->dev = dev;
4724
4725 ata_qc_reinit(qc);
4726 }
4727
4728 return qc;
4729}
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741void ata_qc_free(struct ata_queued_cmd *qc)
4742{
4743 struct ata_port *ap;
4744 unsigned int tag;
4745
4746 WARN_ON_ONCE(qc == NULL);
4747 ap = qc->ap;
4748
4749 qc->flags = 0;
4750 tag = qc->tag;
4751 if (likely(ata_tag_valid(tag))) {
4752 qc->tag = ATA_TAG_POISON;
4753 clear_bit(tag, &ap->qc_allocated);
4754 }
4755}
4756
4757void __ata_qc_complete(struct ata_queued_cmd *qc)
4758{
4759 struct ata_port *ap;
4760 struct ata_link *link;
4761
4762 WARN_ON_ONCE(qc == NULL);
4763 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4764 ap = qc->ap;
4765 link = qc->dev->link;
4766
4767 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4768 ata_sg_clean(qc);
4769
4770
4771 if (qc->tf.protocol == ATA_PROT_NCQ) {
4772 link->sactive &= ~(1 << qc->tag);
4773 if (!link->sactive)
4774 ap->nr_active_links--;
4775 } else {
4776 link->active_tag = ATA_TAG_POISON;
4777 ap->nr_active_links--;
4778 }
4779
4780
4781 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4782 ap->excl_link == link))
4783 ap->excl_link = NULL;
4784
4785
4786
4787
4788
4789 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4790 ap->qc_active &= ~(1 << qc->tag);
4791
4792
4793 qc->complete_fn(qc);
4794}
4795
4796static void fill_result_tf(struct ata_queued_cmd *qc)
4797{
4798 struct ata_port *ap = qc->ap;
4799
4800 qc->result_tf.flags = qc->tf.flags;
4801 ap->ops->qc_fill_rtf(qc);
4802}
4803
4804static void ata_verify_xfer(struct ata_queued_cmd *qc)
4805{
4806 struct ata_device *dev = qc->dev;
4807
4808 if (ata_is_nodata(qc->tf.protocol))
4809 return;
4810
4811 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4812 return;
4813
4814 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4815}
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832void ata_qc_complete(struct ata_queued_cmd *qc)
4833{
4834 struct ata_port *ap = qc->ap;
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849 if (ap->ops->error_handler) {
4850 struct ata_device *dev = qc->dev;
4851 struct ata_eh_info *ehi = &dev->link->eh_info;
4852
4853 if (unlikely(qc->err_mask))
4854 qc->flags |= ATA_QCFLAG_FAILED;
4855
4856
4857
4858
4859
4860 if (unlikely(ata_tag_internal(qc->tag))) {
4861 fill_result_tf(qc);
4862 __ata_qc_complete(qc);
4863 return;
4864 }
4865
4866
4867
4868
4869
4870 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4871 fill_result_tf(qc);
4872 ata_qc_schedule_eh(qc);
4873 return;
4874 }
4875
4876 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4877
4878
4879 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4880 fill_result_tf(qc);
4881
4882
4883
4884
4885 switch (qc->tf.command) {
4886 case ATA_CMD_SET_FEATURES:
4887 if (qc->tf.feature != SETFEATURES_WC_ON &&
4888 qc->tf.feature != SETFEATURES_WC_OFF)
4889 break;
4890
4891 case ATA_CMD_INIT_DEV_PARAMS:
4892 case ATA_CMD_SET_MULTI:
4893
4894 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4895 ata_port_schedule_eh(ap);
4896 break;
4897
4898 case ATA_CMD_SLEEP:
4899 dev->flags |= ATA_DFLAG_SLEEPING;
4900 break;
4901 }
4902
4903 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4904 ata_verify_xfer(qc);
4905
4906 __ata_qc_complete(qc);
4907 } else {
4908 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4909 return;
4910
4911
4912 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4913 fill_result_tf(qc);
4914
4915 __ata_qc_complete(qc);
4916 }
4917}
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4940{
4941 int nr_done = 0;
4942 u32 done_mask;
4943
4944 done_mask = ap->qc_active ^ qc_active;
4945
4946 if (unlikely(done_mask & qc_active)) {
4947 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
4948 ap->qc_active, qc_active);
4949 return -EINVAL;
4950 }
4951
4952 while (done_mask) {
4953 struct ata_queued_cmd *qc;
4954 unsigned int tag = __ffs(done_mask);
4955
4956 qc = ata_qc_from_tag(ap, tag);
4957 if (qc) {
4958 ata_qc_complete(qc);
4959 nr_done++;
4960 }
4961 done_mask &= ~(1 << tag);
4962 }
4963
4964 return nr_done;
4965}
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979void ata_qc_issue(struct ata_queued_cmd *qc)
4980{
4981 struct ata_port *ap = qc->ap;
4982 struct ata_link *link = qc->dev->link;
4983 u8 prot = qc->tf.protocol;
4984
4985
4986
4987
4988
4989 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4990
4991 if (ata_is_ncq(prot)) {
4992 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
4993
4994 if (!link->sactive)
4995 ap->nr_active_links++;
4996 link->sactive |= 1 << qc->tag;
4997 } else {
4998 WARN_ON_ONCE(link->sactive);
4999
5000 ap->nr_active_links++;
5001 link->active_tag = qc->tag;
5002 }
5003
5004 qc->flags |= ATA_QCFLAG_ACTIVE;
5005 ap->qc_active |= 1 << qc->tag;
5006
5007
5008
5009
5010
5011 if (WARN_ON_ONCE(ata_is_data(prot) &&
5012 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5013 goto sys_err;
5014
5015 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5016 (ap->flags & ATA_FLAG_PIO_DMA)))
5017 if (ata_sg_setup(qc))
5018 goto sys_err;
5019
5020
5021 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5022 link->eh_info.action |= ATA_EH_RESET;
5023 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5024 ata_link_abort(link);
5025 return;
5026 }
5027
5028 ap->ops->qc_prep(qc);
5029
5030 qc->err_mask |= ap->ops->qc_issue(qc);
5031 if (unlikely(qc->err_mask))
5032 goto err;
5033 return;
5034
5035sys_err:
5036 qc->err_mask |= AC_ERR_SYSTEM;
5037err:
5038 ata_qc_complete(qc);
5039}
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053int sata_scr_valid(struct ata_link *link)
5054{
5055 struct ata_port *ap = link->ap;
5056
5057 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5058}
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5077{
5078 if (ata_is_host_link(link)) {
5079 if (sata_scr_valid(link))
5080 return link->ap->ops->scr_read(link, reg, val);
5081 return -EOPNOTSUPP;
5082 }
5083
5084 return sata_pmp_scr_read(link, reg, val);
5085}
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103int sata_scr_write(struct ata_link *link, int reg, u32 val)
5104{
5105 if (ata_is_host_link(link)) {
5106 if (sata_scr_valid(link))
5107 return link->ap->ops->scr_write(link, reg, val);
5108 return -EOPNOTSUPP;
5109 }
5110
5111 return sata_pmp_scr_write(link, reg, val);
5112}
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5130{
5131 if (ata_is_host_link(link)) {
5132 int rc;
5133
5134 if (sata_scr_valid(link)) {
5135 rc = link->ap->ops->scr_write(link, reg, val);
5136 if (rc == 0)
5137 rc = link->ap->ops->scr_read(link, reg, &val);
5138 return rc;
5139 }
5140 return -EOPNOTSUPP;
5141 }
5142
5143 return sata_pmp_scr_write(link, reg, val);
5144}
5145
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160bool ata_phys_link_online(struct ata_link *link)
5161{
5162 u32 sstatus;
5163
5164 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5165 ata_sstatus_online(sstatus))
5166 return true;
5167 return false;
5168}
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184bool ata_phys_link_offline(struct ata_link *link)
5185{
5186 u32 sstatus;
5187
5188 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5189 !ata_sstatus_online(sstatus))
5190 return true;
5191 return false;
5192}
5193
5194
5195
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210bool ata_link_online(struct ata_link *link)
5211{
5212 struct ata_link *slave = link->ap->slave_link;
5213
5214 WARN_ON(link == slave);
5215
5216 return ata_phys_link_online(link) ||
5217 (slave && ata_phys_link_online(slave));
5218}
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236bool ata_link_offline(struct ata_link *link)
5237{
5238 struct ata_link *slave = link->ap->slave_link;
5239
5240 WARN_ON(link == slave);
5241
5242 return ata_phys_link_offline(link) &&
5243 (!slave || ata_phys_link_offline(slave));
5244}
5245
5246#ifdef CONFIG_PM
5247static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5248 unsigned int action, unsigned int ehi_flags,
5249 int wait)
5250{
5251 struct ata_link *link;
5252 unsigned long flags;
5253 int rc;
5254
5255
5256
5257
5258 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5259 ata_port_wait_eh(ap);
5260 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5261 }
5262
5263
5264 spin_lock_irqsave(ap->lock, flags);
5265
5266 ap->pm_mesg = mesg;
5267 if (wait) {
5268 rc = 0;
5269 ap->pm_result = &rc;
5270 }
5271
5272 ap->pflags |= ATA_PFLAG_PM_PENDING;
5273 ata_for_each_link(link, ap, HOST_FIRST) {
5274 link->eh_info.action |= action;
5275 link->eh_info.flags |= ehi_flags;
5276 }
5277
5278 ata_port_schedule_eh(ap);
5279
5280 spin_unlock_irqrestore(ap->lock, flags);
5281
5282
5283 if (wait) {
5284 ata_port_wait_eh(ap);
5285 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5286 }
5287
5288 return rc;
5289}
5290
5291#define to_ata_port(d) container_of(d, struct ata_port, tdev)
5292
5293static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
5294{
5295 struct ata_port *ap = to_ata_port(dev);
5296 unsigned int ehi_flags = ATA_EHI_QUIET;
5297 int rc;
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307 if (mesg.event == PM_EVENT_SUSPEND)
5308 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5309
5310 rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, 1);
5311 return rc;
5312}
5313
5314static int ata_port_suspend(struct device *dev)
5315{
5316 if (pm_runtime_suspended(dev))
5317 return 0;
5318
5319 return ata_port_suspend_common(dev, PMSG_SUSPEND);
5320}
5321
5322static int ata_port_do_freeze(struct device *dev)
5323{
5324 if (pm_runtime_suspended(dev))
5325 pm_runtime_resume(dev);
5326
5327 return ata_port_suspend_common(dev, PMSG_FREEZE);
5328}
5329
5330static int ata_port_poweroff(struct device *dev)
5331{
5332 if (pm_runtime_suspended(dev))
5333 return 0;
5334
5335 return ata_port_suspend_common(dev, PMSG_HIBERNATE);
5336}
5337
5338static int ata_port_resume_common(struct device *dev)
5339{
5340 struct ata_port *ap = to_ata_port(dev);
5341 int rc;
5342
5343 rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
5344 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 1);
5345 return rc;
5346}
5347
5348static int ata_port_resume(struct device *dev)
5349{
5350 int rc;
5351
5352 rc = ata_port_resume_common(dev);
5353 if (!rc) {
5354 pm_runtime_disable(dev);
5355 pm_runtime_set_active(dev);
5356 pm_runtime_enable(dev);
5357 }
5358
5359 return rc;
5360}
5361
5362static int ata_port_runtime_idle(struct device *dev)
5363{
5364 return pm_runtime_suspend(dev);
5365}
5366
5367static const struct dev_pm_ops ata_port_pm_ops = {
5368 .suspend = ata_port_suspend,
5369 .resume = ata_port_resume,
5370 .freeze = ata_port_do_freeze,
5371 .thaw = ata_port_resume,
5372 .poweroff = ata_port_poweroff,
5373 .restore = ata_port_resume,
5374
5375 .runtime_suspend = ata_port_suspend,
5376 .runtime_resume = ata_port_resume_common,
5377 .runtime_idle = ata_port_runtime_idle,
5378};
5379
5380
5381
5382
5383
5384
5385
5386
5387int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5388{
5389 host->dev->power.power_state = mesg;
5390 return 0;
5391}
5392
5393
5394
5395
5396
5397
5398
5399void ata_host_resume(struct ata_host *host)
5400{
5401 host->dev->power.power_state = PMSG_ON;
5402}
5403#endif
5404
5405struct device_type ata_port_type = {
5406 .name = "ata_port",
5407#ifdef CONFIG_PM
5408 .pm = &ata_port_pm_ops,
5409#endif
5410};
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421void ata_dev_init(struct ata_device *dev)
5422{
5423 struct ata_link *link = ata_dev_phys_link(dev);
5424 struct ata_port *ap = link->ap;
5425 unsigned long flags;
5426
5427
5428 link->sata_spd_limit = link->hw_sata_spd_limit;
5429 link->sata_spd = 0;
5430
5431
5432
5433
5434
5435 spin_lock_irqsave(ap->lock, flags);
5436 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5437 dev->horkage = 0;
5438 spin_unlock_irqrestore(ap->lock, flags);
5439
5440 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5441 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5442 dev->pio_mask = UINT_MAX;
5443 dev->mwdma_mask = UINT_MAX;
5444 dev->udma_mask = UINT_MAX;
5445}
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5459{
5460 int i;
5461
5462
5463 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5464 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5465
5466 link->ap = ap;
5467 link->pmp = pmp;
5468 link->active_tag = ATA_TAG_POISON;
5469 link->hw_sata_spd_limit = UINT_MAX;
5470
5471
5472 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5473 struct ata_device *dev = &link->device[i];
5474
5475 dev->link = link;
5476 dev->devno = dev - link->device;
5477#ifdef CONFIG_ATA_ACPI
5478 dev->gtf_filter = ata_acpi_gtf_filter;
5479#endif
5480 ata_dev_init(dev);
5481 }
5482}
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497int sata_link_init_spd(struct ata_link *link)
5498{
5499 u8 spd;
5500 int rc;
5501
5502 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5503 if (rc)
5504 return rc;
5505
5506 spd = (link->saved_scontrol >> 4) & 0xf;
5507 if (spd)
5508 link->hw_sata_spd_limit &= (1 << spd) - 1;
5509
5510 ata_force_link_limits(link);
5511
5512 link->sata_spd_limit = link->hw_sata_spd_limit;
5513
5514 return 0;
5515}
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529struct ata_port *ata_port_alloc(struct ata_host *host)
5530{
5531 struct ata_port *ap;
5532
5533 DPRINTK("ENTER\n");
5534
5535 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5536 if (!ap)
5537 return NULL;
5538
5539 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5540 ap->lock = &host->lock;
5541 ap->print_id = -1;
5542 ap->host = host;
5543 ap->dev = host->dev;
5544
5545#if defined(ATA_VERBOSE_DEBUG)
5546
5547 ap->msg_enable = 0x00FF;
5548#elif defined(ATA_DEBUG)
5549 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5550#else
5551 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5552#endif
5553
5554 mutex_init(&ap->scsi_scan_mutex);
5555 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5556 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5557 INIT_LIST_HEAD(&ap->eh_done_q);
5558 init_waitqueue_head(&ap->eh_wait_q);
5559 init_completion(&ap->park_req_pending);
5560 init_timer_deferrable(&ap->fastdrain_timer);
5561 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5562 ap->fastdrain_timer.data = (unsigned long)ap;
5563
5564 ap->cbl = ATA_CBL_NONE;
5565
5566 ata_link_init(ap, &ap->link, 0);
5567
5568#ifdef ATA_IRQ_TRAP
5569 ap->stats.unhandled_irq = 1;
5570 ap->stats.idle_irq = 1;
5571#endif
5572 ata_sff_port_init(ap);
5573
5574 return ap;
5575}
5576
5577static void ata_host_release(struct device *gendev, void *res)
5578{
5579 struct ata_host *host = dev_get_drvdata(gendev);
5580 int i;
5581
5582 for (i = 0; i < host->n_ports; i++) {
5583 struct ata_port *ap = host->ports[i];
5584
5585 if (!ap)
5586 continue;
5587
5588 if (ap->scsi_host)
5589 scsi_host_put(ap->scsi_host);
5590
5591 kfree(ap->pmp_link);
5592 kfree(ap->slave_link);
5593 kfree(ap);
5594 host->ports[i] = NULL;
5595 }
5596
5597 dev_set_drvdata(gendev, NULL);
5598}
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5621{
5622 struct ata_host *host;
5623 size_t sz;
5624 int i;
5625
5626 DPRINTK("ENTER\n");
5627
5628 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5629 return NULL;
5630
5631
5632 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5633
5634 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5635 if (!host)
5636 goto err_out;
5637
5638 devres_add(dev, host);
5639 dev_set_drvdata(dev, host);
5640
5641 spin_lock_init(&host->lock);
5642 mutex_init(&host->eh_mutex);
5643 host->dev = dev;
5644 host->n_ports = max_ports;
5645
5646
5647 for (i = 0; i < max_ports; i++) {
5648 struct ata_port *ap;
5649
5650 ap = ata_port_alloc(host);
5651 if (!ap)
5652 goto err_out;
5653
5654 ap->port_no = i;
5655 host->ports[i] = ap;
5656 }
5657
5658 devres_remove_group(dev, NULL);
5659 return host;
5660
5661 err_out:
5662 devres_release_group(dev, NULL);
5663 return NULL;
5664}
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5683 const struct ata_port_info * const * ppi,
5684 int n_ports)
5685{
5686 const struct ata_port_info *pi;
5687 struct ata_host *host;
5688 int i, j;
5689
5690 host = ata_host_alloc(dev, n_ports);
5691 if (!host)
5692 return NULL;
5693
5694 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5695 struct ata_port *ap = host->ports[i];
5696
5697 if (ppi[j])
5698 pi = ppi[j++];
5699
5700 ap->pio_mask = pi->pio_mask;
5701 ap->mwdma_mask = pi->mwdma_mask;
5702 ap->udma_mask = pi->udma_mask;
5703 ap->flags |= pi->flags;
5704 ap->link.flags |= pi->link_flags;
5705 ap->ops = pi->port_ops;
5706
5707 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5708 host->ops = pi->port_ops;
5709 }
5710
5711 return host;
5712}
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756
5757
5758
5759
5760int ata_slave_link_init(struct ata_port *ap)
5761{
5762 struct ata_link *link;
5763
5764 WARN_ON(ap->slave_link);
5765 WARN_ON(ap->flags & ATA_FLAG_PMP);
5766
5767 link = kzalloc(sizeof(*link), GFP_KERNEL);
5768 if (!link)
5769 return -ENOMEM;
5770
5771 ata_link_init(ap, link, 1);
5772 ap->slave_link = link;
5773 return 0;
5774}
5775
5776static void ata_host_stop(struct device *gendev, void *res)
5777{
5778 struct ata_host *host = dev_get_drvdata(gendev);
5779 int i;
5780
5781 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5782
5783 for (i = 0; i < host->n_ports; i++) {
5784 struct ata_port *ap = host->ports[i];
5785
5786 if (ap->ops->port_stop)
5787 ap->ops->port_stop(ap);
5788 }
5789
5790 if (host->ops->host_stop)
5791 host->ops->host_stop(host);
5792}
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812
5813
5814static void ata_finalize_port_ops(struct ata_port_operations *ops)
5815{
5816 static DEFINE_SPINLOCK(lock);
5817 const struct ata_port_operations *cur;
5818 void **begin = (void **)ops;
5819 void **end = (void **)&ops->inherits;
5820 void **pp;
5821
5822 if (!ops || !ops->inherits)
5823 return;
5824
5825 spin_lock(&lock);
5826
5827 for (cur = ops->inherits; cur; cur = cur->inherits) {
5828 void **inherit = (void **)cur;
5829
5830 for (pp = begin; pp < end; pp++, inherit++)
5831 if (!*pp)
5832 *pp = *inherit;
5833 }
5834
5835 for (pp = begin; pp < end; pp++)
5836 if (IS_ERR(*pp))
5837 *pp = NULL;
5838
5839 ops->inherits = NULL;
5840
5841 spin_unlock(&lock);
5842}
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860int ata_host_start(struct ata_host *host)
5861{
5862 int have_stop = 0;
5863 void *start_dr = NULL;
5864 int i, rc;
5865
5866 if (host->flags & ATA_HOST_STARTED)
5867 return 0;
5868
5869 ata_finalize_port_ops(host->ops);
5870
5871 for (i = 0; i < host->n_ports; i++) {
5872 struct ata_port *ap = host->ports[i];
5873
5874 ata_finalize_port_ops(ap->ops);
5875
5876 if (!host->ops && !ata_port_is_dummy(ap))
5877 host->ops = ap->ops;
5878
5879 if (ap->ops->port_stop)
5880 have_stop = 1;
5881 }
5882
5883 if (host->ops->host_stop)
5884 have_stop = 1;
5885
5886 if (have_stop) {
5887 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5888 if (!start_dr)
5889 return -ENOMEM;
5890 }
5891
5892 for (i = 0; i < host->n_ports; i++) {
5893 struct ata_port *ap = host->ports[i];
5894
5895 if (ap->ops->port_start) {
5896 rc = ap->ops->port_start(ap);
5897 if (rc) {
5898 if (rc != -ENODEV)
5899 dev_err(host->dev,
5900 "failed to start port %d (errno=%d)\n",
5901 i, rc);
5902 goto err_out;
5903 }
5904 }
5905 ata_eh_freeze_port(ap);
5906 }
5907
5908 if (start_dr)
5909 devres_add(host->dev, start_dr);
5910 host->flags |= ATA_HOST_STARTED;
5911 return 0;
5912
5913 err_out:
5914 while (--i >= 0) {
5915 struct ata_port *ap = host->ports[i];
5916
5917 if (ap->ops->port_stop)
5918 ap->ops->port_stop(ap);
5919 }
5920 devres_free(start_dr);
5921 return rc;
5922}
5923
5924
5925
5926
5927
5928
5929
5930
5931
5932
5933
5934
5935
5936void ata_host_init(struct ata_host *host, struct device *dev,
5937 unsigned long flags, struct ata_port_operations *ops)
5938{
5939 spin_lock_init(&host->lock);
5940 mutex_init(&host->eh_mutex);
5941 host->dev = dev;
5942 host->flags = flags;
5943 host->ops = ops;
5944}
5945
5946void __ata_port_probe(struct ata_port *ap)
5947{
5948 struct ata_eh_info *ehi = &ap->link.eh_info;
5949 unsigned long flags;
5950
5951
5952 spin_lock_irqsave(ap->lock, flags);
5953
5954 ehi->probe_mask |= ATA_ALL_DEVICES;
5955 ehi->action |= ATA_EH_RESET;
5956 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5957
5958 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5959 ap->pflags |= ATA_PFLAG_LOADING;
5960 ata_port_schedule_eh(ap);
5961
5962 spin_unlock_irqrestore(ap->lock, flags);
5963}
5964
5965int ata_port_probe(struct ata_port *ap)
5966{
5967 int rc = 0;
5968
5969 if (ap->ops->error_handler) {
5970 __ata_port_probe(ap);
5971 ata_port_wait_eh(ap);
5972 } else {
5973 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5974 rc = ata_bus_probe(ap);
5975 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5976 }
5977 return rc;
5978}
5979
5980
5981static void async_port_probe(void *data, async_cookie_t cookie)
5982{
5983 struct ata_port *ap = data;
5984
5985
5986
5987
5988
5989
5990
5991
5992 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5993 async_synchronize_cookie(cookie);
5994
5995 (void)ata_port_probe(ap);
5996
5997
5998 async_synchronize_cookie(cookie);
5999
6000 ata_scsi_scan_host(ap, 1);
6001}
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013
6014
6015
6016
6017
6018
6019int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6020{
6021 int i, rc;
6022
6023
6024 if (!(host->flags & ATA_HOST_STARTED)) {
6025 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6026 WARN_ON(1);
6027 return -EINVAL;
6028 }
6029
6030
6031
6032
6033
6034 for (i = host->n_ports; host->ports[i]; i++)
6035 kfree(host->ports[i]);
6036
6037
6038 for (i = 0; i < host->n_ports; i++)
6039 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6040
6041
6042
6043 for (i = 0; i < host->n_ports; i++) {
6044 rc = ata_tport_add(host->dev,host->ports[i]);
6045 if (rc) {
6046 goto err_tadd;
6047 }
6048 }
6049
6050 rc = ata_scsi_add_hosts(host, sht);
6051 if (rc)
6052 goto err_tadd;
6053
6054
6055 ata_acpi_associate(host);
6056
6057
6058 for (i = 0; i < host->n_ports; i++) {
6059 struct ata_port *ap = host->ports[i];
6060 unsigned long xfer_mask;
6061
6062
6063 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6064 ap->cbl = ATA_CBL_SATA;
6065
6066
6067 sata_link_init_spd(&ap->link);
6068 if (ap->slave_link)
6069 sata_link_init_spd(ap->slave_link);
6070
6071
6072 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6073 ap->udma_mask);
6074
6075 if (!ata_port_is_dummy(ap)) {
6076 ata_port_info(ap, "%cATA max %s %s\n",
6077 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6078 ata_mode_string(xfer_mask),
6079 ap->link.eh_info.desc);
6080 ata_ehi_clear_desc(&ap->link.eh_info);
6081 } else
6082 ata_port_info(ap, "DUMMY\n");
6083 }
6084
6085
6086 for (i = 0; i < host->n_ports; i++) {
6087 struct ata_port *ap = host->ports[i];
6088 async_schedule(async_port_probe, ap);
6089 }
6090
6091 return 0;
6092
6093 err_tadd:
6094 while (--i >= 0) {
6095 ata_tport_delete(host->ports[i]);
6096 }
6097 return rc;
6098
6099}
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124int ata_host_activate(struct ata_host *host, int irq,
6125 irq_handler_t irq_handler, unsigned long irq_flags,
6126 struct scsi_host_template *sht)
6127{
6128 int i, rc;
6129
6130 rc = ata_host_start(host);
6131 if (rc)
6132 return rc;
6133
6134
6135 if (!irq) {
6136 WARN_ON(irq_handler);
6137 return ata_host_register(host, sht);
6138 }
6139
6140 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6141 dev_driver_string(host->dev), host);
6142 if (rc)
6143 return rc;
6144
6145 for (i = 0; i < host->n_ports; i++)
6146 ata_port_desc(host->ports[i], "irq %d", irq);
6147
6148 rc = ata_host_register(host, sht);
6149
6150 if (rc)
6151 devm_free_irq(host->dev, irq, host);
6152
6153 return rc;
6154}
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167static void ata_port_detach(struct ata_port *ap)
6168{
6169 unsigned long flags;
6170
6171 if (!ap->ops->error_handler)
6172 goto skip_eh;
6173
6174
6175 spin_lock_irqsave(ap->lock, flags);
6176 ap->pflags |= ATA_PFLAG_UNLOADING;
6177 ata_port_schedule_eh(ap);
6178 spin_unlock_irqrestore(ap->lock, flags);
6179
6180
6181 ata_port_wait_eh(ap);
6182
6183
6184 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6185
6186 cancel_delayed_work_sync(&ap->hotplug_task);
6187
6188 skip_eh:
6189 if (ap->pmp_link) {
6190 int i;
6191 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6192 ata_tlink_delete(&ap->pmp_link[i]);
6193 }
6194 ata_tport_delete(ap);
6195
6196
6197 scsi_remove_host(ap->scsi_host);
6198}
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208
6209void ata_host_detach(struct ata_host *host)
6210{
6211 int i;
6212
6213 for (i = 0; i < host->n_ports; i++)
6214 ata_port_detach(host->ports[i]);
6215
6216
6217 ata_acpi_dissociate(host);
6218}
6219
6220#ifdef CONFIG_PCI
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233void ata_pci_remove_one(struct pci_dev *pdev)
6234{
6235 struct device *dev = &pdev->dev;
6236 struct ata_host *host = dev_get_drvdata(dev);
6237
6238 ata_host_detach(host);
6239}
6240
6241
6242int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6243{
6244 unsigned long tmp = 0;
6245
6246 switch (bits->width) {
6247 case 1: {
6248 u8 tmp8 = 0;
6249 pci_read_config_byte(pdev, bits->reg, &tmp8);
6250 tmp = tmp8;
6251 break;
6252 }
6253 case 2: {
6254 u16 tmp16 = 0;
6255 pci_read_config_word(pdev, bits->reg, &tmp16);
6256 tmp = tmp16;
6257 break;
6258 }
6259 case 4: {
6260 u32 tmp32 = 0;
6261 pci_read_config_dword(pdev, bits->reg, &tmp32);
6262 tmp = tmp32;
6263 break;
6264 }
6265
6266 default:
6267 return -EINVAL;
6268 }
6269
6270 tmp &= bits->mask;
6271
6272 return (tmp == bits->val) ? 1 : 0;
6273}
6274
6275#ifdef CONFIG_PM
6276void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6277{
6278 pci_save_state(pdev);
6279 pci_disable_device(pdev);
6280
6281 if (mesg.event & PM_EVENT_SLEEP)
6282 pci_set_power_state(pdev, PCI_D3hot);
6283}
6284
6285int ata_pci_device_do_resume(struct pci_dev *pdev)
6286{
6287 int rc;
6288
6289 pci_set_power_state(pdev, PCI_D0);
6290 pci_restore_state(pdev);
6291
6292 rc = pcim_enable_device(pdev);
6293 if (rc) {
6294 dev_err(&pdev->dev,
6295 "failed to enable device after resume (%d)\n", rc);
6296 return rc;
6297 }
6298
6299 pci_set_master(pdev);
6300 return 0;
6301}
6302
6303int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6304{
6305 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6306 int rc = 0;
6307
6308 rc = ata_host_suspend(host, mesg);
6309 if (rc)
6310 return rc;
6311
6312 ata_pci_device_do_suspend(pdev, mesg);
6313
6314 return 0;
6315}
6316
6317int ata_pci_device_resume(struct pci_dev *pdev)
6318{
6319 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6320 int rc;
6321
6322 rc = ata_pci_device_do_resume(pdev);
6323 if (rc == 0)
6324 ata_host_resume(host);
6325 return rc;
6326}
6327#endif
6328
6329#endif
6330
6331static int __init ata_parse_force_one(char **cur,
6332 struct ata_force_ent *force_ent,
6333 const char **reason)
6334{
6335
6336
6337
6338
6339
6340 static struct ata_force_param force_tbl[] __initdata = {
6341 { "40c", .cbl = ATA_CBL_PATA40 },
6342 { "80c", .cbl = ATA_CBL_PATA80 },
6343 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6344 { "unk", .cbl = ATA_CBL_PATA_UNK },
6345 { "ign", .cbl = ATA_CBL_PATA_IGN },
6346 { "sata", .cbl = ATA_CBL_SATA },
6347 { "1.5Gbps", .spd_limit = 1 },
6348 { "3.0Gbps", .spd_limit = 2 },
6349 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6350 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6351 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6352 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6353 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6354 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6355 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6356 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6357 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6358 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6359 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6360 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6361 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6362 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6363 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6364 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6365 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6366 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6367 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6368 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6369 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6370 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6371 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6372 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6373 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6374 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6375 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6376 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6377 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6378 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6379 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6380 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6381 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6382 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6383 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6384 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6385 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6386 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6387 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6388 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6389 };
6390 char *start = *cur, *p = *cur;
6391 char *id, *val, *endp;
6392 const struct ata_force_param *match_fp = NULL;
6393 int nr_matches = 0, i;
6394
6395
6396 while (*p != '\0' && *p != ',')
6397 p++;
6398
6399 if (*p == '\0')
6400 *cur = p;
6401 else
6402 *cur = p + 1;
6403
6404 *p = '\0';
6405
6406
6407 p = strchr(start, ':');
6408 if (!p) {
6409 val = strstrip(start);
6410 goto parse_val;
6411 }
6412 *p = '\0';
6413
6414 id = strstrip(start);
6415 val = strstrip(p + 1);
6416
6417
6418 p = strchr(id, '.');
6419 if (p) {
6420 *p++ = '\0';
6421 force_ent->device = simple_strtoul(p, &endp, 10);
6422 if (p == endp || *endp != '\0') {
6423 *reason = "invalid device";
6424 return -EINVAL;
6425 }
6426 }
6427
6428 force_ent->port = simple_strtoul(id, &endp, 10);
6429 if (p == endp || *endp != '\0') {
6430 *reason = "invalid port/link";
6431 return -EINVAL;
6432 }
6433
6434 parse_val:
6435
6436 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6437 const struct ata_force_param *fp = &force_tbl[i];
6438
6439 if (strncasecmp(val, fp->name, strlen(val)))
6440 continue;
6441
6442 nr_matches++;
6443 match_fp = fp;
6444
6445 if (strcasecmp(val, fp->name) == 0) {
6446 nr_matches = 1;
6447 break;
6448 }
6449 }
6450
6451 if (!nr_matches) {
6452 *reason = "unknown value";
6453 return -EINVAL;
6454 }
6455 if (nr_matches > 1) {
6456 *reason = "ambigious value";
6457 return -EINVAL;
6458 }
6459
6460 force_ent->param = *match_fp;
6461
6462 return 0;
6463}
6464
6465static void __init ata_parse_force_param(void)
6466{
6467 int idx = 0, size = 1;
6468 int last_port = -1, last_device = -1;
6469 char *p, *cur, *next;
6470
6471
6472 for (p = ata_force_param_buf; *p; p++)
6473 if (*p == ',')
6474 size++;
6475
6476 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6477 if (!ata_force_tbl) {
6478 printk(KERN_WARNING "ata: failed to extend force table, "
6479 "libata.force ignored\n");
6480 return;
6481 }
6482
6483
6484 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6485 const char *reason = "";
6486 struct ata_force_ent te = { .port = -1, .device = -1 };
6487
6488 next = cur;
6489 if (ata_parse_force_one(&next, &te, &reason)) {
6490 printk(KERN_WARNING "ata: failed to parse force "
6491 "parameter \"%s\" (%s)\n",
6492 cur, reason);
6493 continue;
6494 }
6495
6496 if (te.port == -1) {
6497 te.port = last_port;
6498 te.device = last_device;
6499 }
6500
6501 ata_force_tbl[idx++] = te;
6502
6503 last_port = te.port;
6504 last_device = te.device;
6505 }
6506
6507 ata_force_tbl_size = idx;
6508}
6509
6510static int __init ata_init(void)
6511{
6512 int rc;
6513
6514 ata_parse_force_param();
6515
6516 rc = ata_sff_init();
6517 if (rc) {
6518 kfree(ata_force_tbl);
6519 return rc;
6520 }
6521
6522 libata_transport_init();
6523 ata_scsi_transport_template = ata_attach_transport();
6524 if (!ata_scsi_transport_template) {
6525 ata_sff_exit();
6526 rc = -ENOMEM;
6527 goto err_out;
6528 }
6529
6530 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6531 return 0;
6532
6533err_out:
6534 return rc;
6535}
6536
6537static void __exit ata_exit(void)
6538{
6539 ata_release_transport(ata_scsi_transport_template);
6540 libata_transport_exit();
6541 ata_sff_exit();
6542 kfree(ata_force_tbl);
6543}
6544
6545subsys_initcall(ata_init);
6546module_exit(ata_exit);
6547
6548static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6549
6550int ata_ratelimit(void)
6551{
6552 return __ratelimit(&ratelimit);
6553}
6554
6555
6556
6557
6558
6559
6560
6561
6562
6563
6564
6565
6566
6567
6568
6569void ata_msleep(struct ata_port *ap, unsigned int msecs)
6570{
6571 bool owns_eh = ap && ap->host->eh_owner == current;
6572
6573 if (owns_eh)
6574 ata_eh_release(ap);
6575
6576 msleep(msecs);
6577
6578 if (owns_eh)
6579 ata_eh_acquire(ap);
6580}
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597
6598
6599
6600
6601
6602
6603
6604
6605
6606u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6607 unsigned long interval, unsigned long timeout)
6608{
6609 unsigned long deadline;
6610 u32 tmp;
6611
6612 tmp = ioread32(reg);
6613
6614
6615
6616
6617
6618 deadline = ata_deadline(jiffies, timeout);
6619
6620 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6621 ata_msleep(ap, interval);
6622 tmp = ioread32(reg);
6623 }
6624
6625 return tmp;
6626}
6627
6628
6629
6630
6631static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6632{
6633 return AC_ERR_SYSTEM;
6634}
6635
6636static void ata_dummy_error_handler(struct ata_port *ap)
6637{
6638
6639}
6640
6641struct ata_port_operations ata_dummy_port_ops = {
6642 .qc_prep = ata_noop_qc_prep,
6643 .qc_issue = ata_dummy_qc_issue,
6644 .error_handler = ata_dummy_error_handler,
6645};
6646
6647const struct ata_port_info ata_dummy_port_info = {
6648 .port_ops = &ata_dummy_port_ops,
6649};
6650
6651
6652
6653
6654int ata_port_printk(const struct ata_port *ap, const char *level,
6655 const char *fmt, ...)
6656{
6657 struct va_format vaf;
6658 va_list args;
6659 int r;
6660
6661 va_start(args, fmt);
6662
6663 vaf.fmt = fmt;
6664 vaf.va = &args;
6665
6666 r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
6667
6668 va_end(args);
6669
6670 return r;
6671}
6672EXPORT_SYMBOL(ata_port_printk);
6673
6674int ata_link_printk(const struct ata_link *link, const char *level,
6675 const char *fmt, ...)
6676{
6677 struct va_format vaf;
6678 va_list args;
6679 int r;
6680
6681 va_start(args, fmt);
6682
6683 vaf.fmt = fmt;
6684 vaf.va = &args;
6685
6686 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6687 r = printk("%sata%u.%02u: %pV",
6688 level, link->ap->print_id, link->pmp, &vaf);
6689 else
6690 r = printk("%sata%u: %pV",
6691 level, link->ap->print_id, &vaf);
6692
6693 va_end(args);
6694
6695 return r;
6696}
6697EXPORT_SYMBOL(ata_link_printk);
6698
6699int ata_dev_printk(const struct ata_device *dev, const char *level,
6700 const char *fmt, ...)
6701{
6702 struct va_format vaf;
6703 va_list args;
6704 int r;
6705
6706 va_start(args, fmt);
6707
6708 vaf.fmt = fmt;
6709 vaf.va = &args;
6710
6711 r = printk("%sata%u.%02u: %pV",
6712 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6713 &vaf);
6714
6715 va_end(args);
6716
6717 return r;
6718}
6719EXPORT_SYMBOL(ata_dev_printk);
6720
6721void ata_print_version(const struct device *dev, const char *version)
6722{
6723 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6724}
6725EXPORT_SYMBOL(ata_print_version);
6726
6727
6728
6729
6730
6731
6732
6733EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6734EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6735EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6736EXPORT_SYMBOL_GPL(ata_base_port_ops);
6737EXPORT_SYMBOL_GPL(sata_port_ops);
6738EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6739EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6740EXPORT_SYMBOL_GPL(ata_link_next);
6741EXPORT_SYMBOL_GPL(ata_dev_next);
6742EXPORT_SYMBOL_GPL(ata_std_bios_param);
6743EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6744EXPORT_SYMBOL_GPL(ata_host_init);
6745EXPORT_SYMBOL_GPL(ata_host_alloc);
6746EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6747EXPORT_SYMBOL_GPL(ata_slave_link_init);
6748EXPORT_SYMBOL_GPL(ata_host_start);
6749EXPORT_SYMBOL_GPL(ata_host_register);
6750EXPORT_SYMBOL_GPL(ata_host_activate);
6751EXPORT_SYMBOL_GPL(ata_host_detach);
6752EXPORT_SYMBOL_GPL(ata_sg_init);
6753EXPORT_SYMBOL_GPL(ata_qc_complete);
6754EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6755EXPORT_SYMBOL_GPL(atapi_cmd_type);
6756EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6757EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6758EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6759EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6760EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6761EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6762EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6763EXPORT_SYMBOL_GPL(ata_mode_string);
6764EXPORT_SYMBOL_GPL(ata_id_xfermask);
6765EXPORT_SYMBOL_GPL(ata_do_set_mode);
6766EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6767EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6768EXPORT_SYMBOL_GPL(ata_dev_disable);
6769EXPORT_SYMBOL_GPL(sata_set_spd);
6770EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6771EXPORT_SYMBOL_GPL(sata_link_debounce);
6772EXPORT_SYMBOL_GPL(sata_link_resume);
6773EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6774EXPORT_SYMBOL_GPL(ata_std_prereset);
6775EXPORT_SYMBOL_GPL(sata_link_hardreset);
6776EXPORT_SYMBOL_GPL(sata_std_hardreset);
6777EXPORT_SYMBOL_GPL(ata_std_postreset);
6778EXPORT_SYMBOL_GPL(ata_dev_classify);
6779EXPORT_SYMBOL_GPL(ata_dev_pair);
6780EXPORT_SYMBOL_GPL(ata_ratelimit);
6781EXPORT_SYMBOL_GPL(ata_msleep);
6782EXPORT_SYMBOL_GPL(ata_wait_register);
6783EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6784EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6785EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6786EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6787EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
6788EXPORT_SYMBOL_GPL(sata_scr_valid);
6789EXPORT_SYMBOL_GPL(sata_scr_read);
6790EXPORT_SYMBOL_GPL(sata_scr_write);
6791EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6792EXPORT_SYMBOL_GPL(ata_link_online);
6793EXPORT_SYMBOL_GPL(ata_link_offline);
6794#ifdef CONFIG_PM
6795EXPORT_SYMBOL_GPL(ata_host_suspend);
6796EXPORT_SYMBOL_GPL(ata_host_resume);
6797#endif
6798EXPORT_SYMBOL_GPL(ata_id_string);
6799EXPORT_SYMBOL_GPL(ata_id_c_string);
6800EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6801EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6802
6803EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6804EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6805EXPORT_SYMBOL_GPL(ata_timing_compute);
6806EXPORT_SYMBOL_GPL(ata_timing_merge);
6807EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6808
6809#ifdef CONFIG_PCI
6810EXPORT_SYMBOL_GPL(pci_test_config_bits);
6811EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6812#ifdef CONFIG_PM
6813EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6814EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6815EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6816EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6817#endif
6818#endif
6819
6820EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6821EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6822EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6823EXPORT_SYMBOL_GPL(ata_port_desc);
6824#ifdef CONFIG_PCI
6825EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6826#endif
6827EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6828EXPORT_SYMBOL_GPL(ata_link_abort);
6829EXPORT_SYMBOL_GPL(ata_port_abort);
6830EXPORT_SYMBOL_GPL(ata_port_freeze);
6831EXPORT_SYMBOL_GPL(sata_async_notification);
6832EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6833EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6834EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6835EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6836EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6837EXPORT_SYMBOL_GPL(ata_do_eh);
6838EXPORT_SYMBOL_GPL(ata_std_error_handler);
6839
6840EXPORT_SYMBOL_GPL(ata_cable_40wire);
6841EXPORT_SYMBOL_GPL(ata_cable_80wire);
6842EXPORT_SYMBOL_GPL(ata_cable_unknown);
6843EXPORT_SYMBOL_GPL(ata_cable_ignore);
6844EXPORT_SYMBOL_GPL(ata_cable_sata);
6845