1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
57#include <linux/scatterlist.h>
58#include <linux/io.h>
59#include <linux/async.h>
60#include <linux/log2.h>
61#include <linux/slab.h>
62#include <scsi/scsi.h>
63#include <scsi/scsi_cmnd.h>
64#include <scsi/scsi_host.h>
65#include <linux/libata.h>
66#include <asm/byteorder.h>
67#include <linux/cdrom.h>
68#include <linux/ratelimit.h>
69
70#include "libata.h"
71#include "libata-transport.h"
72
73
74const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
75const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
76const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
77
78const struct ata_port_operations ata_base_port_ops = {
79 .prereset = ata_std_prereset,
80 .postreset = ata_std_postreset,
81 .error_handler = ata_std_error_handler,
82};
83
84const struct ata_port_operations sata_port_ops = {
85 .inherits = &ata_base_port_ops,
86
87 .qc_defer = ata_std_qc_defer,
88 .hardreset = sata_std_hardreset,
89};
90
91static unsigned int ata_dev_init_params(struct ata_device *dev,
92 u16 heads, u16 sectors);
93static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
94static void ata_dev_xfermask(struct ata_device *dev);
95static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
96
97unsigned int ata_print_id = 1;
98
99struct ata_force_param {
100 const char *name;
101 unsigned int cbl;
102 int spd_limit;
103 unsigned long xfer_mask;
104 unsigned int horkage_on;
105 unsigned int horkage_off;
106 unsigned int lflags;
107};
108
109struct ata_force_ent {
110 int port;
111 int device;
112 struct ata_force_param param;
113};
114
115static struct ata_force_ent *ata_force_tbl;
116static int ata_force_tbl_size;
117
118static char ata_force_param_buf[PAGE_SIZE] __initdata;
119
120module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
121MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
122
123static int atapi_enabled = 1;
124module_param(atapi_enabled, int, 0444);
125MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
126
127static int atapi_dmadir = 0;
128module_param(atapi_dmadir, int, 0444);
129MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
130
131int atapi_passthru16 = 1;
132module_param(atapi_passthru16, int, 0444);
133MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
134
135int libata_fua = 0;
136module_param_named(fua, libata_fua, int, 0444);
137MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
138
139static int ata_ignore_hpa;
140module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
141MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
142
143static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
144module_param_named(dma, libata_dma_mask, int, 0444);
145MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
146
147static int ata_probe_timeout;
148module_param(ata_probe_timeout, int, 0444);
149MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
150
151int libata_noacpi = 0;
152module_param_named(noacpi, libata_noacpi, int, 0444);
153MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
154
155int libata_allow_tpm = 0;
156module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
157MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
158
159static int atapi_an;
160module_param(atapi_an, int, 0444);
161MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
162
163MODULE_AUTHOR("Jeff Garzik");
164MODULE_DESCRIPTION("Library module for ATA devices");
165MODULE_LICENSE("GPL");
166MODULE_VERSION(DRV_VERSION);
167
168
169static bool ata_sstatus_online(u32 sstatus)
170{
171 return (sstatus & 0xf) == 0x3;
172}
173
174
175
176
177
178
179
180
181
182
183
184
185
186struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
187 enum ata_link_iter_mode mode)
188{
189 BUG_ON(mode != ATA_LITER_EDGE &&
190 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
191
192
193 if (!link)
194 switch (mode) {
195 case ATA_LITER_EDGE:
196 case ATA_LITER_PMP_FIRST:
197 if (sata_pmp_attached(ap))
198 return ap->pmp_link;
199
200 case ATA_LITER_HOST_FIRST:
201 return &ap->link;
202 }
203
204
205 if (link == &ap->link)
206 switch (mode) {
207 case ATA_LITER_HOST_FIRST:
208 if (sata_pmp_attached(ap))
209 return ap->pmp_link;
210
211 case ATA_LITER_PMP_FIRST:
212 if (unlikely(ap->slave_link))
213 return ap->slave_link;
214
215 case ATA_LITER_EDGE:
216 return NULL;
217 }
218
219
220 if (unlikely(link == ap->slave_link))
221 return NULL;
222
223
224 if (++link < ap->pmp_link + ap->nr_pmp_links)
225 return link;
226
227 if (mode == ATA_LITER_PMP_FIRST)
228 return &ap->link;
229
230 return NULL;
231}
232
233
234
235
236
237
238
239
240
241
242
243
244
245struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
246 enum ata_dev_iter_mode mode)
247{
248 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
249 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
250
251
252 if (!dev)
253 switch (mode) {
254 case ATA_DITER_ENABLED:
255 case ATA_DITER_ALL:
256 dev = link->device;
257 goto check;
258 case ATA_DITER_ENABLED_REVERSE:
259 case ATA_DITER_ALL_REVERSE:
260 dev = link->device + ata_link_max_devices(link) - 1;
261 goto check;
262 }
263
264 next:
265
266 switch (mode) {
267 case ATA_DITER_ENABLED:
268 case ATA_DITER_ALL:
269 if (++dev < link->device + ata_link_max_devices(link))
270 goto check;
271 return NULL;
272 case ATA_DITER_ENABLED_REVERSE:
273 case ATA_DITER_ALL_REVERSE:
274 if (--dev >= link->device)
275 goto check;
276 return NULL;
277 }
278
279 check:
280 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
281 !ata_dev_enabled(dev))
282 goto next;
283 return dev;
284}
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300struct ata_link *ata_dev_phys_link(struct ata_device *dev)
301{
302 struct ata_port *ap = dev->link->ap;
303
304 if (!ap->slave_link)
305 return dev->link;
306 if (!dev->devno)
307 return &ap->link;
308 return ap->slave_link;
309}
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324void ata_force_cbl(struct ata_port *ap)
325{
326 int i;
327
328 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
329 const struct ata_force_ent *fe = &ata_force_tbl[i];
330
331 if (fe->port != -1 && fe->port != ap->print_id)
332 continue;
333
334 if (fe->param.cbl == ATA_CBL_NONE)
335 continue;
336
337 ap->cbl = fe->param.cbl;
338 ata_port_printk(ap, KERN_NOTICE,
339 "FORCE: cable set to %s\n", fe->param.name);
340 return;
341 }
342}
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360static void ata_force_link_limits(struct ata_link *link)
361{
362 bool did_spd = false;
363 int linkno = link->pmp;
364 int i;
365
366 if (ata_is_host_link(link))
367 linkno += 15;
368
369 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
370 const struct ata_force_ent *fe = &ata_force_tbl[i];
371
372 if (fe->port != -1 && fe->port != link->ap->print_id)
373 continue;
374
375 if (fe->device != -1 && fe->device != linkno)
376 continue;
377
378
379 if (!did_spd && fe->param.spd_limit) {
380 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
381 ata_link_printk(link, KERN_NOTICE,
382 "FORCE: PHY spd limit set to %s\n",
383 fe->param.name);
384 did_spd = true;
385 }
386
387
388 if (fe->param.lflags) {
389 link->flags |= fe->param.lflags;
390 ata_link_printk(link, KERN_NOTICE,
391 "FORCE: link flag 0x%x forced -> 0x%x\n",
392 fe->param.lflags, link->flags);
393 }
394 }
395}
396
397
398
399
400
401
402
403
404
405
406
407
408static void ata_force_xfermask(struct ata_device *dev)
409{
410 int devno = dev->link->pmp + dev->devno;
411 int alt_devno = devno;
412 int i;
413
414
415 if (ata_is_host_link(dev->link))
416 alt_devno += 15;
417
418 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
419 const struct ata_force_ent *fe = &ata_force_tbl[i];
420 unsigned long pio_mask, mwdma_mask, udma_mask;
421
422 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
423 continue;
424
425 if (fe->device != -1 && fe->device != devno &&
426 fe->device != alt_devno)
427 continue;
428
429 if (!fe->param.xfer_mask)
430 continue;
431
432 ata_unpack_xfermask(fe->param.xfer_mask,
433 &pio_mask, &mwdma_mask, &udma_mask);
434 if (udma_mask)
435 dev->udma_mask = udma_mask;
436 else if (mwdma_mask) {
437 dev->udma_mask = 0;
438 dev->mwdma_mask = mwdma_mask;
439 } else {
440 dev->udma_mask = 0;
441 dev->mwdma_mask = 0;
442 dev->pio_mask = pio_mask;
443 }
444
445 ata_dev_printk(dev, KERN_NOTICE,
446 "FORCE: xfer_mask set to %s\n", fe->param.name);
447 return;
448 }
449}
450
451
452
453
454
455
456
457
458
459
460
461
462static void ata_force_horkage(struct ata_device *dev)
463{
464 int devno = dev->link->pmp + dev->devno;
465 int alt_devno = devno;
466 int i;
467
468
469 if (ata_is_host_link(dev->link))
470 alt_devno += 15;
471
472 for (i = 0; i < ata_force_tbl_size; i++) {
473 const struct ata_force_ent *fe = &ata_force_tbl[i];
474
475 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
476 continue;
477
478 if (fe->device != -1 && fe->device != devno &&
479 fe->device != alt_devno)
480 continue;
481
482 if (!(~dev->horkage & fe->param.horkage_on) &&
483 !(dev->horkage & fe->param.horkage_off))
484 continue;
485
486 dev->horkage |= fe->param.horkage_on;
487 dev->horkage &= ~fe->param.horkage_off;
488
489 ata_dev_printk(dev, KERN_NOTICE,
490 "FORCE: horkage modified (%s)\n", fe->param.name);
491 }
492}
493
494
495
496
497
498
499
500
501
502
503
504
505
506int atapi_cmd_type(u8 opcode)
507{
508 switch (opcode) {
509 case GPCMD_READ_10:
510 case GPCMD_READ_12:
511 return ATAPI_READ;
512
513 case GPCMD_WRITE_10:
514 case GPCMD_WRITE_12:
515 case GPCMD_WRITE_AND_VERIFY_10:
516 return ATAPI_WRITE;
517
518 case GPCMD_READ_CD:
519 case GPCMD_READ_CD_MSF:
520 return ATAPI_READ_CD;
521
522 case ATA_16:
523 case ATA_12:
524 if (atapi_passthru16)
525 return ATAPI_PASS_THRU;
526
527 default:
528 return ATAPI_MISC;
529 }
530}
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
546{
547 fis[0] = 0x27;
548 fis[1] = pmp & 0xf;
549 if (is_cmd)
550 fis[1] |= (1 << 7);
551
552 fis[2] = tf->command;
553 fis[3] = tf->feature;
554
555 fis[4] = tf->lbal;
556 fis[5] = tf->lbam;
557 fis[6] = tf->lbah;
558 fis[7] = tf->device;
559
560 fis[8] = tf->hob_lbal;
561 fis[9] = tf->hob_lbam;
562 fis[10] = tf->hob_lbah;
563 fis[11] = tf->hob_feature;
564
565 fis[12] = tf->nsect;
566 fis[13] = tf->hob_nsect;
567 fis[14] = 0;
568 fis[15] = tf->ctl;
569
570 fis[16] = 0;
571 fis[17] = 0;
572 fis[18] = 0;
573 fis[19] = 0;
574}
575
576
577
578
579
580
581
582
583
584
585
586
587void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
588{
589 tf->command = fis[2];
590 tf->feature = fis[3];
591
592 tf->lbal = fis[4];
593 tf->lbam = fis[5];
594 tf->lbah = fis[6];
595 tf->device = fis[7];
596
597 tf->hob_lbal = fis[8];
598 tf->hob_lbam = fis[9];
599 tf->hob_lbah = fis[10];
600
601 tf->nsect = fis[12];
602 tf->hob_nsect = fis[13];
603}
604
605static const u8 ata_rw_cmds[] = {
606
607 ATA_CMD_READ_MULTI,
608 ATA_CMD_WRITE_MULTI,
609 ATA_CMD_READ_MULTI_EXT,
610 ATA_CMD_WRITE_MULTI_EXT,
611 0,
612 0,
613 0,
614 ATA_CMD_WRITE_MULTI_FUA_EXT,
615
616 ATA_CMD_PIO_READ,
617 ATA_CMD_PIO_WRITE,
618 ATA_CMD_PIO_READ_EXT,
619 ATA_CMD_PIO_WRITE_EXT,
620 0,
621 0,
622 0,
623 0,
624
625 ATA_CMD_READ,
626 ATA_CMD_WRITE,
627 ATA_CMD_READ_EXT,
628 ATA_CMD_WRITE_EXT,
629 0,
630 0,
631 0,
632 ATA_CMD_WRITE_FUA_EXT
633};
634
635
636
637
638
639
640
641
642
643
644
645
646static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
647{
648 u8 cmd;
649
650 int index, fua, lba48, write;
651
652 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
653 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
654 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
655
656 if (dev->flags & ATA_DFLAG_PIO) {
657 tf->protocol = ATA_PROT_PIO;
658 index = dev->multi_count ? 0 : 8;
659 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
660
661 tf->protocol = ATA_PROT_PIO;
662 index = dev->multi_count ? 0 : 8;
663 } else {
664 tf->protocol = ATA_PROT_DMA;
665 index = 16;
666 }
667
668 cmd = ata_rw_cmds[index + fua + lba48 + write];
669 if (cmd) {
670 tf->command = cmd;
671 return 0;
672 }
673 return -1;
674}
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
692{
693 u64 block = 0;
694
695 if (tf->flags & ATA_TFLAG_LBA) {
696 if (tf->flags & ATA_TFLAG_LBA48) {
697 block |= (u64)tf->hob_lbah << 40;
698 block |= (u64)tf->hob_lbam << 32;
699 block |= (u64)tf->hob_lbal << 24;
700 } else
701 block |= (tf->device & 0xf) << 24;
702
703 block |= tf->lbah << 16;
704 block |= tf->lbam << 8;
705 block |= tf->lbal;
706 } else {
707 u32 cyl, head, sect;
708
709 cyl = tf->lbam | (tf->lbah << 8);
710 head = tf->device & 0xf;
711 sect = tf->lbal;
712
713 if (!sect) {
714 ata_dev_printk(dev, KERN_WARNING, "device reported "
715 "invalid CHS sector 0\n");
716 sect = 1;
717 }
718
719 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
720 }
721
722 return block;
723}
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
746 u64 block, u32 n_block, unsigned int tf_flags,
747 unsigned int tag)
748{
749 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
750 tf->flags |= tf_flags;
751
752 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
753
754 if (!lba_48_ok(block, n_block))
755 return -ERANGE;
756
757 tf->protocol = ATA_PROT_NCQ;
758 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
759
760 if (tf->flags & ATA_TFLAG_WRITE)
761 tf->command = ATA_CMD_FPDMA_WRITE;
762 else
763 tf->command = ATA_CMD_FPDMA_READ;
764
765 tf->nsect = tag << 3;
766 tf->hob_feature = (n_block >> 8) & 0xff;
767 tf->feature = n_block & 0xff;
768
769 tf->hob_lbah = (block >> 40) & 0xff;
770 tf->hob_lbam = (block >> 32) & 0xff;
771 tf->hob_lbal = (block >> 24) & 0xff;
772 tf->lbah = (block >> 16) & 0xff;
773 tf->lbam = (block >> 8) & 0xff;
774 tf->lbal = block & 0xff;
775
776 tf->device = 1 << 6;
777 if (tf->flags & ATA_TFLAG_FUA)
778 tf->device |= 1 << 7;
779 } else if (dev->flags & ATA_DFLAG_LBA) {
780 tf->flags |= ATA_TFLAG_LBA;
781
782 if (lba_28_ok(block, n_block)) {
783
784 tf->device |= (block >> 24) & 0xf;
785 } else if (lba_48_ok(block, n_block)) {
786 if (!(dev->flags & ATA_DFLAG_LBA48))
787 return -ERANGE;
788
789
790 tf->flags |= ATA_TFLAG_LBA48;
791
792 tf->hob_nsect = (n_block >> 8) & 0xff;
793
794 tf->hob_lbah = (block >> 40) & 0xff;
795 tf->hob_lbam = (block >> 32) & 0xff;
796 tf->hob_lbal = (block >> 24) & 0xff;
797 } else
798
799 return -ERANGE;
800
801 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
802 return -EINVAL;
803
804 tf->nsect = n_block & 0xff;
805
806 tf->lbah = (block >> 16) & 0xff;
807 tf->lbam = (block >> 8) & 0xff;
808 tf->lbal = block & 0xff;
809
810 tf->device |= ATA_LBA;
811 } else {
812
813 u32 sect, head, cyl, track;
814
815
816 if (!lba_28_ok(block, n_block))
817 return -ERANGE;
818
819 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
820 return -EINVAL;
821
822
823 track = (u32)block / dev->sectors;
824 cyl = track / dev->heads;
825 head = track % dev->heads;
826 sect = (u32)block % dev->sectors + 1;
827
828 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
829 (u32)block, track, cyl, head, sect);
830
831
832
833
834
835 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
836 return -ERANGE;
837
838 tf->nsect = n_block & 0xff;
839 tf->lbal = sect;
840 tf->lbam = cyl;
841 tf->lbah = cyl >> 8;
842 tf->device |= head;
843 }
844
845 return 0;
846}
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863unsigned long ata_pack_xfermask(unsigned long pio_mask,
864 unsigned long mwdma_mask,
865 unsigned long udma_mask)
866{
867 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
868 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
869 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
870}
871
872
873
874
875
876
877
878
879
880
881
882void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
883 unsigned long *mwdma_mask, unsigned long *udma_mask)
884{
885 if (pio_mask)
886 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
887 if (mwdma_mask)
888 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
889 if (udma_mask)
890 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
891}
892
893static const struct ata_xfer_ent {
894 int shift, bits;
895 u8 base;
896} ata_xfer_tbl[] = {
897 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
898 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
899 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
900 { -1, },
901};
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916u8 ata_xfer_mask2mode(unsigned long xfer_mask)
917{
918 int highbit = fls(xfer_mask) - 1;
919 const struct ata_xfer_ent *ent;
920
921 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
922 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
923 return ent->base + highbit - ent->shift;
924 return 0xff;
925}
926
927
928
929
930
931
932
933
934
935
936
937
938
939unsigned long ata_xfer_mode2mask(u8 xfer_mode)
940{
941 const struct ata_xfer_ent *ent;
942
943 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
944 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
945 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
946 & ~((1 << ent->shift) - 1);
947 return 0;
948}
949
950
951
952
953
954
955
956
957
958
959
960
961
962int ata_xfer_mode2shift(unsigned long xfer_mode)
963{
964 const struct ata_xfer_ent *ent;
965
966 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
967 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
968 return ent->shift;
969 return -1;
970}
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986const char *ata_mode_string(unsigned long xfer_mask)
987{
988 static const char * const xfer_mode_str[] = {
989 "PIO0",
990 "PIO1",
991 "PIO2",
992 "PIO3",
993 "PIO4",
994 "PIO5",
995 "PIO6",
996 "MWDMA0",
997 "MWDMA1",
998 "MWDMA2",
999 "MWDMA3",
1000 "MWDMA4",
1001 "UDMA/16",
1002 "UDMA/25",
1003 "UDMA/33",
1004 "UDMA/44",
1005 "UDMA/66",
1006 "UDMA/100",
1007 "UDMA/133",
1008 "UDMA7",
1009 };
1010 int highbit;
1011
1012 highbit = fls(xfer_mask) - 1;
1013 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1014 return xfer_mode_str[highbit];
1015 return "<n/a>";
1016}
1017
1018const char *sata_spd_string(unsigned int spd)
1019{
1020 static const char * const spd_str[] = {
1021 "1.5 Gbps",
1022 "3.0 Gbps",
1023 "6.0 Gbps",
1024 };
1025
1026 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1027 return "<unknown>";
1028 return spd_str[spd - 1];
1029}
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1047{
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1070 DPRINTK("found ATA device by sig\n");
1071 return ATA_DEV_ATA;
1072 }
1073
1074 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1075 DPRINTK("found ATAPI device by sig\n");
1076 return ATA_DEV_ATAPI;
1077 }
1078
1079 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1080 DPRINTK("found PMP device by sig\n");
1081 return ATA_DEV_PMP;
1082 }
1083
1084 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1085 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1086 return ATA_DEV_SEMB;
1087 }
1088
1089 DPRINTK("unknown device\n");
1090 return ATA_DEV_UNKNOWN;
1091}
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108void ata_id_string(const u16 *id, unsigned char *s,
1109 unsigned int ofs, unsigned int len)
1110{
1111 unsigned int c;
1112
1113 BUG_ON(len & 1);
1114
1115 while (len > 0) {
1116 c = id[ofs] >> 8;
1117 *s = c;
1118 s++;
1119
1120 c = id[ofs] & 0xff;
1121 *s = c;
1122 s++;
1123
1124 ofs++;
1125 len -= 2;
1126 }
1127}
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143void ata_id_c_string(const u16 *id, unsigned char *s,
1144 unsigned int ofs, unsigned int len)
1145{
1146 unsigned char *p;
1147
1148 ata_id_string(id, s, ofs, len - 1);
1149
1150 p = s + strnlen(s, len - 1);
1151 while (p > s && p[-1] == ' ')
1152 p--;
1153 *p = '\0';
1154}
1155
1156static u64 ata_id_n_sectors(const u16 *id)
1157{
1158 if (ata_id_has_lba(id)) {
1159 if (ata_id_has_lba48(id))
1160 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1161 else
1162 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1163 } else {
1164 if (ata_id_current_chs_valid(id))
1165 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1166 id[ATA_ID_CUR_SECTORS];
1167 else
1168 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1169 id[ATA_ID_SECTORS];
1170 }
1171}
1172
1173u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1174{
1175 u64 sectors = 0;
1176
1177 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1178 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1179 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1180 sectors |= (tf->lbah & 0xff) << 16;
1181 sectors |= (tf->lbam & 0xff) << 8;
1182 sectors |= (tf->lbal & 0xff);
1183
1184 return sectors;
1185}
1186
1187u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1188{
1189 u64 sectors = 0;
1190
1191 sectors |= (tf->device & 0x0f) << 24;
1192 sectors |= (tf->lbah & 0xff) << 16;
1193 sectors |= (tf->lbam & 0xff) << 8;
1194 sectors |= (tf->lbal & 0xff);
1195
1196 return sectors;
1197}
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1212{
1213 unsigned int err_mask;
1214 struct ata_taskfile tf;
1215 int lba48 = ata_id_has_lba48(dev->id);
1216
1217 ata_tf_init(dev, &tf);
1218
1219
1220 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1221
1222 if (lba48) {
1223 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1224 tf.flags |= ATA_TFLAG_LBA48;
1225 } else
1226 tf.command = ATA_CMD_READ_NATIVE_MAX;
1227
1228 tf.protocol |= ATA_PROT_NODATA;
1229 tf.device |= ATA_LBA;
1230
1231 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1232 if (err_mask) {
1233 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1234 "max address (err_mask=0x%x)\n", err_mask);
1235 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1236 return -EACCES;
1237 return -EIO;
1238 }
1239
1240 if (lba48)
1241 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1242 else
1243 *max_sectors = ata_tf_to_lba(&tf) + 1;
1244 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1245 (*max_sectors)--;
1246 return 0;
1247}
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1262{
1263 unsigned int err_mask;
1264 struct ata_taskfile tf;
1265 int lba48 = ata_id_has_lba48(dev->id);
1266
1267 new_sectors--;
1268
1269 ata_tf_init(dev, &tf);
1270
1271 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1272
1273 if (lba48) {
1274 tf.command = ATA_CMD_SET_MAX_EXT;
1275 tf.flags |= ATA_TFLAG_LBA48;
1276
1277 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1278 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1279 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1280 } else {
1281 tf.command = ATA_CMD_SET_MAX;
1282
1283 tf.device |= (new_sectors >> 24) & 0xf;
1284 }
1285
1286 tf.protocol |= ATA_PROT_NODATA;
1287 tf.device |= ATA_LBA;
1288
1289 tf.lbal = (new_sectors >> 0) & 0xff;
1290 tf.lbam = (new_sectors >> 8) & 0xff;
1291 tf.lbah = (new_sectors >> 16) & 0xff;
1292
1293 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1294 if (err_mask) {
1295 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1296 "max address (err_mask=0x%x)\n", err_mask);
1297 if (err_mask == AC_ERR_DEV &&
1298 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1299 return -EACCES;
1300 return -EIO;
1301 }
1302
1303 return 0;
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317static int ata_hpa_resize(struct ata_device *dev)
1318{
1319 struct ata_eh_context *ehc = &dev->link->eh_context;
1320 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1321 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1322 u64 sectors = ata_id_n_sectors(dev->id);
1323 u64 native_sectors;
1324 int rc;
1325
1326
1327 if (dev->class != ATA_DEV_ATA ||
1328 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1329 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1330 return 0;
1331
1332
1333 rc = ata_read_native_max_address(dev, &native_sectors);
1334 if (rc) {
1335
1336
1337
1338 if (rc == -EACCES || !unlock_hpa) {
1339 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1340 "broken, skipping HPA handling\n");
1341 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1342
1343
1344 if (rc == -EACCES)
1345 rc = 0;
1346 }
1347
1348 return rc;
1349 }
1350 dev->n_native_sectors = native_sectors;
1351
1352
1353 if (native_sectors <= sectors || !unlock_hpa) {
1354 if (!print_info || native_sectors == sectors)
1355 return 0;
1356
1357 if (native_sectors > sectors)
1358 ata_dev_printk(dev, KERN_INFO,
1359 "HPA detected: current %llu, native %llu\n",
1360 (unsigned long long)sectors,
1361 (unsigned long long)native_sectors);
1362 else if (native_sectors < sectors)
1363 ata_dev_printk(dev, KERN_WARNING,
1364 "native sectors (%llu) is smaller than "
1365 "sectors (%llu)\n",
1366 (unsigned long long)native_sectors,
1367 (unsigned long long)sectors);
1368 return 0;
1369 }
1370
1371
1372 rc = ata_set_max_sectors(dev, native_sectors);
1373 if (rc == -EACCES) {
1374
1375 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1376 "(%llu -> %llu), skipping HPA handling\n",
1377 (unsigned long long)sectors,
1378 (unsigned long long)native_sectors);
1379 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1380 return 0;
1381 } else if (rc)
1382 return rc;
1383
1384
1385 rc = ata_dev_reread_id(dev, 0);
1386 if (rc) {
1387 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1388 "data after HPA resizing\n");
1389 return rc;
1390 }
1391
1392 if (print_info) {
1393 u64 new_sectors = ata_id_n_sectors(dev->id);
1394 ata_dev_printk(dev, KERN_INFO,
1395 "HPA unlocked: %llu -> %llu, native %llu\n",
1396 (unsigned long long)sectors,
1397 (unsigned long long)new_sectors,
1398 (unsigned long long)native_sectors);
1399 }
1400
1401 return 0;
1402}
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415static inline void ata_dump_id(const u16 *id)
1416{
1417 DPRINTK("49==0x%04x "
1418 "53==0x%04x "
1419 "63==0x%04x "
1420 "64==0x%04x "
1421 "75==0x%04x \n",
1422 id[49],
1423 id[53],
1424 id[63],
1425 id[64],
1426 id[75]);
1427 DPRINTK("80==0x%04x "
1428 "81==0x%04x "
1429 "82==0x%04x "
1430 "83==0x%04x "
1431 "84==0x%04x \n",
1432 id[80],
1433 id[81],
1434 id[82],
1435 id[83],
1436 id[84]);
1437 DPRINTK("88==0x%04x "
1438 "93==0x%04x\n",
1439 id[88],
1440 id[93]);
1441}
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458unsigned long ata_id_xfermask(const u16 *id)
1459{
1460 unsigned long pio_mask, mwdma_mask, udma_mask;
1461
1462
1463 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1464 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1465 pio_mask <<= 3;
1466 pio_mask |= 0x7;
1467 } else {
1468
1469
1470
1471
1472 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1473 if (mode < 5)
1474 pio_mask = (2 << mode) - 1;
1475 else
1476 pio_mask = 1;
1477
1478
1479
1480
1481
1482
1483
1484 }
1485
1486 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1487
1488 if (ata_id_is_cfa(id)) {
1489
1490
1491
1492 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1493 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1494
1495 if (pio)
1496 pio_mask |= (1 << 5);
1497 if (pio > 1)
1498 pio_mask |= (1 << 6);
1499 if (dma)
1500 mwdma_mask |= (1 << 3);
1501 if (dma > 1)
1502 mwdma_mask |= (1 << 4);
1503 }
1504
1505 udma_mask = 0;
1506 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1507 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1508
1509 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1510}
1511
1512static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1513{
1514 struct completion *waiting = qc->private_data;
1515
1516 complete(waiting);
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541unsigned ata_exec_internal_sg(struct ata_device *dev,
1542 struct ata_taskfile *tf, const u8 *cdb,
1543 int dma_dir, struct scatterlist *sgl,
1544 unsigned int n_elem, unsigned long timeout)
1545{
1546 struct ata_link *link = dev->link;
1547 struct ata_port *ap = link->ap;
1548 u8 command = tf->command;
1549 int auto_timeout = 0;
1550 struct ata_queued_cmd *qc;
1551 unsigned int tag, preempted_tag;
1552 u32 preempted_sactive, preempted_qc_active;
1553 int preempted_nr_active_links;
1554 DECLARE_COMPLETION_ONSTACK(wait);
1555 unsigned long flags;
1556 unsigned int err_mask;
1557 int rc;
1558
1559 spin_lock_irqsave(ap->lock, flags);
1560
1561
1562 if (ap->pflags & ATA_PFLAG_FROZEN) {
1563 spin_unlock_irqrestore(ap->lock, flags);
1564 return AC_ERR_SYSTEM;
1565 }
1566
1567
1568
1569
1570
1571
1572
1573
1574 if (ap->ops->error_handler)
1575 tag = ATA_TAG_INTERNAL;
1576 else
1577 tag = 0;
1578
1579 if (test_and_set_bit(tag, &ap->qc_allocated))
1580 BUG();
1581 qc = __ata_qc_from_tag(ap, tag);
1582
1583 qc->tag = tag;
1584 qc->scsicmd = NULL;
1585 qc->ap = ap;
1586 qc->dev = dev;
1587 ata_qc_reinit(qc);
1588
1589 preempted_tag = link->active_tag;
1590 preempted_sactive = link->sactive;
1591 preempted_qc_active = ap->qc_active;
1592 preempted_nr_active_links = ap->nr_active_links;
1593 link->active_tag = ATA_TAG_POISON;
1594 link->sactive = 0;
1595 ap->qc_active = 0;
1596 ap->nr_active_links = 0;
1597
1598
1599 qc->tf = *tf;
1600 if (cdb)
1601 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1602 qc->flags |= ATA_QCFLAG_RESULT_TF;
1603 qc->dma_dir = dma_dir;
1604 if (dma_dir != DMA_NONE) {
1605 unsigned int i, buflen = 0;
1606 struct scatterlist *sg;
1607
1608 for_each_sg(sgl, sg, n_elem, i)
1609 buflen += sg->length;
1610
1611 ata_sg_init(qc, sgl, n_elem);
1612 qc->nbytes = buflen;
1613 }
1614
1615 qc->private_data = &wait;
1616 qc->complete_fn = ata_qc_complete_internal;
1617
1618 ata_qc_issue(qc);
1619
1620 spin_unlock_irqrestore(ap->lock, flags);
1621
1622 if (!timeout) {
1623 if (ata_probe_timeout)
1624 timeout = ata_probe_timeout * 1000;
1625 else {
1626 timeout = ata_internal_cmd_timeout(dev, command);
1627 auto_timeout = 1;
1628 }
1629 }
1630
1631 if (ap->ops->error_handler)
1632 ata_eh_release(ap);
1633
1634 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1635
1636 if (ap->ops->error_handler)
1637 ata_eh_acquire(ap);
1638
1639 ata_sff_flush_pio_task(ap);
1640
1641 if (!rc) {
1642 spin_lock_irqsave(ap->lock, flags);
1643
1644
1645
1646
1647
1648
1649 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1650 qc->err_mask |= AC_ERR_TIMEOUT;
1651
1652 if (ap->ops->error_handler)
1653 ata_port_freeze(ap);
1654 else
1655 ata_qc_complete(qc);
1656
1657 if (ata_msg_warn(ap))
1658 ata_dev_printk(dev, KERN_WARNING,
1659 "qc timeout (cmd 0x%x)\n", command);
1660 }
1661
1662 spin_unlock_irqrestore(ap->lock, flags);
1663 }
1664
1665
1666 if (ap->ops->post_internal_cmd)
1667 ap->ops->post_internal_cmd(qc);
1668
1669
1670 if (qc->flags & ATA_QCFLAG_FAILED) {
1671 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1672 qc->err_mask |= AC_ERR_DEV;
1673
1674 if (!qc->err_mask)
1675 qc->err_mask |= AC_ERR_OTHER;
1676
1677 if (qc->err_mask & ~AC_ERR_OTHER)
1678 qc->err_mask &= ~AC_ERR_OTHER;
1679 }
1680
1681
1682 spin_lock_irqsave(ap->lock, flags);
1683
1684 *tf = qc->result_tf;
1685 err_mask = qc->err_mask;
1686
1687 ata_qc_free(qc);
1688 link->active_tag = preempted_tag;
1689 link->sactive = preempted_sactive;
1690 ap->qc_active = preempted_qc_active;
1691 ap->nr_active_links = preempted_nr_active_links;
1692
1693 spin_unlock_irqrestore(ap->lock, flags);
1694
1695 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1696 ata_internal_cmd_timed_out(dev, command);
1697
1698 return err_mask;
1699}
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720unsigned ata_exec_internal(struct ata_device *dev,
1721 struct ata_taskfile *tf, const u8 *cdb,
1722 int dma_dir, void *buf, unsigned int buflen,
1723 unsigned long timeout)
1724{
1725 struct scatterlist *psg = NULL, sg;
1726 unsigned int n_elem = 0;
1727
1728 if (dma_dir != DMA_NONE) {
1729 WARN_ON(!buf);
1730 sg_init_one(&sg, buf, buflen);
1731 psg = &sg;
1732 n_elem++;
1733 }
1734
1735 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1736 timeout);
1737}
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1754{
1755 struct ata_taskfile tf;
1756
1757 ata_tf_init(dev, &tf);
1758
1759 tf.command = cmd;
1760 tf.flags |= ATA_TFLAG_DEVICE;
1761 tf.protocol = ATA_PROT_NODATA;
1762
1763 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1764}
1765
1766
1767
1768
1769
1770
1771
1772
1773unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1774{
1775
1776
1777
1778
1779 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1780 return 0;
1781
1782
1783
1784 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1785 return 0;
1786
1787 if (ata_id_is_cfa(adev->id)
1788 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1789 return 0;
1790
1791 if (adev->pio_mode > XFER_PIO_2)
1792 return 1;
1793
1794 if (ata_id_has_iordy(adev->id))
1795 return 1;
1796 return 0;
1797}
1798
1799
1800
1801
1802
1803
1804
1805
1806static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1807{
1808
1809 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
1810 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1811
1812 if (pio) {
1813
1814 if (pio > 240)
1815 return 3 << ATA_SHIFT_PIO;
1816 return 7 << ATA_SHIFT_PIO;
1817 }
1818 }
1819 return 3 << ATA_SHIFT_PIO;
1820}
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832unsigned int ata_do_dev_read_id(struct ata_device *dev,
1833 struct ata_taskfile *tf, u16 *id)
1834{
1835 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1836 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1837}
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1861 unsigned int flags, u16 *id)
1862{
1863 struct ata_port *ap = dev->link->ap;
1864 unsigned int class = *p_class;
1865 struct ata_taskfile tf;
1866 unsigned int err_mask = 0;
1867 const char *reason;
1868 bool is_semb = class == ATA_DEV_SEMB;
1869 int may_fallback = 1, tried_spinup = 0;
1870 int rc;
1871
1872 if (ata_msg_ctl(ap))
1873 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1874
1875retry:
1876 ata_tf_init(dev, &tf);
1877
1878 switch (class) {
1879 case ATA_DEV_SEMB:
1880 class = ATA_DEV_ATA;
1881 case ATA_DEV_ATA:
1882 tf.command = ATA_CMD_ID_ATA;
1883 break;
1884 case ATA_DEV_ATAPI:
1885 tf.command = ATA_CMD_ID_ATAPI;
1886 break;
1887 default:
1888 rc = -ENODEV;
1889 reason = "unsupported class";
1890 goto err_out;
1891 }
1892
1893 tf.protocol = ATA_PROT_PIO;
1894
1895
1896
1897
1898 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1899
1900
1901
1902
1903 tf.flags |= ATA_TFLAG_POLLING;
1904
1905 if (ap->ops->read_id)
1906 err_mask = ap->ops->read_id(dev, &tf, id);
1907 else
1908 err_mask = ata_do_dev_read_id(dev, &tf, id);
1909
1910 if (err_mask) {
1911 if (err_mask & AC_ERR_NODEV_HINT) {
1912 ata_dev_printk(dev, KERN_DEBUG,
1913 "NODEV after polling detection\n");
1914 return -ENOENT;
1915 }
1916
1917 if (is_semb) {
1918 ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on "
1919 "device w/ SEMB sig, disabled\n");
1920
1921 *p_class = ATA_DEV_SEMB_UNSUP;
1922 return 0;
1923 }
1924
1925 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1926
1927
1928
1929
1930
1931 if (may_fallback) {
1932 may_fallback = 0;
1933
1934 if (class == ATA_DEV_ATA)
1935 class = ATA_DEV_ATAPI;
1936 else
1937 class = ATA_DEV_ATA;
1938 goto retry;
1939 }
1940
1941
1942
1943
1944
1945 ata_dev_printk(dev, KERN_DEBUG,
1946 "both IDENTIFYs aborted, assuming NODEV\n");
1947 return -ENOENT;
1948 }
1949
1950 rc = -EIO;
1951 reason = "I/O error";
1952 goto err_out;
1953 }
1954
1955 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1956 ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, "
1957 "class=%d may_fallback=%d tried_spinup=%d\n",
1958 class, may_fallback, tried_spinup);
1959 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1960 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1961 }
1962
1963
1964
1965
1966 may_fallback = 0;
1967
1968 swap_buf_le16(id, ATA_ID_WORDS);
1969
1970
1971 rc = -EINVAL;
1972 reason = "device reports invalid type";
1973
1974 if (class == ATA_DEV_ATA) {
1975 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1976 goto err_out;
1977 } else {
1978 if (ata_id_is_ata(id))
1979 goto err_out;
1980 }
1981
1982 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1983 tried_spinup = 1;
1984
1985
1986
1987
1988
1989 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1990 if (err_mask && id[2] != 0x738c) {
1991 rc = -EIO;
1992 reason = "SPINUP failed";
1993 goto err_out;
1994 }
1995
1996
1997
1998
1999 if (id[2] == 0x37c8)
2000 goto retry;
2001 }
2002
2003 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2016 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2017 if (err_mask) {
2018 rc = -EIO;
2019 reason = "INIT_DEV_PARAMS failed";
2020 goto err_out;
2021 }
2022
2023
2024
2025
2026 flags &= ~ATA_READID_POSTRESET;
2027 goto retry;
2028 }
2029 }
2030
2031 *p_class = class;
2032
2033 return 0;
2034
2035 err_out:
2036 if (ata_msg_warn(ap))
2037 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2038 "(%s, err_mask=0x%x)\n", reason, err_mask);
2039 return rc;
2040}
2041
2042static int ata_do_link_spd_horkage(struct ata_device *dev)
2043{
2044 struct ata_link *plink = ata_dev_phys_link(dev);
2045 u32 target, target_limit;
2046
2047 if (!sata_scr_valid(plink))
2048 return 0;
2049
2050 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2051 target = 1;
2052 else
2053 return 0;
2054
2055 target_limit = (1 << target) - 1;
2056
2057
2058 if (plink->sata_spd_limit <= target_limit)
2059 return 0;
2060
2061 plink->sata_spd_limit = target_limit;
2062
2063
2064
2065
2066
2067 if (plink->sata_spd > target) {
2068 ata_dev_printk(dev, KERN_INFO,
2069 "applying link speed limit horkage to %s\n",
2070 sata_spd_string(target));
2071 return -EAGAIN;
2072 }
2073 return 0;
2074}
2075
2076static inline u8 ata_dev_knobble(struct ata_device *dev)
2077{
2078 struct ata_port *ap = dev->link->ap;
2079
2080 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2081 return 0;
2082
2083 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2084}
2085
2086static int ata_dev_config_ncq(struct ata_device *dev,
2087 char *desc, size_t desc_sz)
2088{
2089 struct ata_port *ap = dev->link->ap;
2090 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2091 unsigned int err_mask;
2092 char *aa_desc = "";
2093
2094 if (!ata_id_has_ncq(dev->id)) {
2095 desc[0] = '\0';
2096 return 0;
2097 }
2098 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2099 snprintf(desc, desc_sz, "NCQ (not used)");
2100 return 0;
2101 }
2102 if (ap->flags & ATA_FLAG_NCQ) {
2103 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2104 dev->flags |= ATA_DFLAG_NCQ;
2105 }
2106
2107 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2108 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2109 ata_id_has_fpdma_aa(dev->id)) {
2110 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2111 SATA_FPDMA_AA);
2112 if (err_mask) {
2113 ata_dev_printk(dev, KERN_ERR, "failed to enable AA"
2114 "(error_mask=0x%x)\n", err_mask);
2115 if (err_mask != AC_ERR_DEV) {
2116 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2117 return -EIO;
2118 }
2119 } else
2120 aa_desc = ", AA";
2121 }
2122
2123 if (hdepth >= ddepth)
2124 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2125 else
2126 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2127 ddepth, aa_desc);
2128 return 0;
2129}
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144int ata_dev_configure(struct ata_device *dev)
2145{
2146 struct ata_port *ap = dev->link->ap;
2147 struct ata_eh_context *ehc = &dev->link->eh_context;
2148 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2149 const u16 *id = dev->id;
2150 unsigned long xfer_mask;
2151 char revbuf[7];
2152 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2153 char modelbuf[ATA_ID_PROD_LEN+1];
2154 int rc;
2155
2156 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2157 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2158 __func__);
2159 return 0;
2160 }
2161
2162 if (ata_msg_probe(ap))
2163 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2164
2165
2166 dev->horkage |= ata_dev_blacklisted(dev);
2167 ata_force_horkage(dev);
2168
2169 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2170 ata_dev_printk(dev, KERN_INFO,
2171 "unsupported device, disabling\n");
2172 ata_dev_disable(dev);
2173 return 0;
2174 }
2175
2176 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2177 dev->class == ATA_DEV_ATAPI) {
2178 ata_dev_printk(dev, KERN_WARNING,
2179 "WARNING: ATAPI is %s, device ignored.\n",
2180 atapi_enabled ? "not supported with this driver"
2181 : "disabled");
2182 ata_dev_disable(dev);
2183 return 0;
2184 }
2185
2186 rc = ata_do_link_spd_horkage(dev);
2187 if (rc)
2188 return rc;
2189
2190
2191 rc = ata_acpi_on_devcfg(dev);
2192 if (rc)
2193 return rc;
2194
2195
2196 rc = ata_hpa_resize(dev);
2197 if (rc)
2198 return rc;
2199
2200
2201 if (ata_msg_probe(ap))
2202 ata_dev_printk(dev, KERN_DEBUG,
2203 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2204 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2205 __func__,
2206 id[49], id[82], id[83], id[84],
2207 id[85], id[86], id[87], id[88]);
2208
2209
2210 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2211 dev->max_sectors = 0;
2212 dev->cdb_len = 0;
2213 dev->n_sectors = 0;
2214 dev->cylinders = 0;
2215 dev->heads = 0;
2216 dev->sectors = 0;
2217 dev->multi_count = 0;
2218
2219
2220
2221
2222
2223
2224 xfer_mask = ata_id_xfermask(id);
2225
2226 if (ata_msg_probe(ap))
2227 ata_dump_id(id);
2228
2229
2230 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2231 sizeof(fwrevbuf));
2232
2233 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2234 sizeof(modelbuf));
2235
2236
2237 if (dev->class == ATA_DEV_ATA) {
2238 if (ata_id_is_cfa(id)) {
2239
2240 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2241 ata_dev_printk(dev, KERN_WARNING,
2242 "supports DRM functions and may "
2243 "not be fully accessible.\n");
2244 snprintf(revbuf, 7, "CFA");
2245 } else {
2246 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2247
2248 if (ata_id_has_tpm(id))
2249 ata_dev_printk(dev, KERN_WARNING,
2250 "supports DRM functions and may "
2251 "not be fully accessible.\n");
2252 }
2253
2254 dev->n_sectors = ata_id_n_sectors(id);
2255
2256
2257 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2258 unsigned int max = dev->id[47] & 0xff;
2259 unsigned int cnt = dev->id[59] & 0xff;
2260
2261 if (is_power_of_2(max) && is_power_of_2(cnt))
2262 if (cnt <= max)
2263 dev->multi_count = cnt;
2264 }
2265
2266 if (ata_id_has_lba(id)) {
2267 const char *lba_desc;
2268 char ncq_desc[24];
2269
2270 lba_desc = "LBA";
2271 dev->flags |= ATA_DFLAG_LBA;
2272 if (ata_id_has_lba48(id)) {
2273 dev->flags |= ATA_DFLAG_LBA48;
2274 lba_desc = "LBA48";
2275
2276 if (dev->n_sectors >= (1UL << 28) &&
2277 ata_id_has_flush_ext(id))
2278 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2279 }
2280
2281
2282 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2283 if (rc)
2284 return rc;
2285
2286
2287 if (ata_msg_drv(ap) && print_info) {
2288 ata_dev_printk(dev, KERN_INFO,
2289 "%s: %s, %s, max %s\n",
2290 revbuf, modelbuf, fwrevbuf,
2291 ata_mode_string(xfer_mask));
2292 ata_dev_printk(dev, KERN_INFO,
2293 "%Lu sectors, multi %u: %s %s\n",
2294 (unsigned long long)dev->n_sectors,
2295 dev->multi_count, lba_desc, ncq_desc);
2296 }
2297 } else {
2298
2299
2300
2301 dev->cylinders = id[1];
2302 dev->heads = id[3];
2303 dev->sectors = id[6];
2304
2305 if (ata_id_current_chs_valid(id)) {
2306
2307 dev->cylinders = id[54];
2308 dev->heads = id[55];
2309 dev->sectors = id[56];
2310 }
2311
2312
2313 if (ata_msg_drv(ap) && print_info) {
2314 ata_dev_printk(dev, KERN_INFO,
2315 "%s: %s, %s, max %s\n",
2316 revbuf, modelbuf, fwrevbuf,
2317 ata_mode_string(xfer_mask));
2318 ata_dev_printk(dev, KERN_INFO,
2319 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2320 (unsigned long long)dev->n_sectors,
2321 dev->multi_count, dev->cylinders,
2322 dev->heads, dev->sectors);
2323 }
2324 }
2325
2326 dev->cdb_len = 16;
2327 }
2328
2329
2330 else if (dev->class == ATA_DEV_ATAPI) {
2331 const char *cdb_intr_string = "";
2332 const char *atapi_an_string = "";
2333 const char *dma_dir_string = "";
2334 u32 sntf;
2335
2336 rc = atapi_cdb_len(id);
2337 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2338 if (ata_msg_warn(ap))
2339 ata_dev_printk(dev, KERN_WARNING,
2340 "unsupported CDB len\n");
2341 rc = -EINVAL;
2342 goto err_out_nosup;
2343 }
2344 dev->cdb_len = (unsigned int) rc;
2345
2346
2347
2348
2349
2350
2351 if (atapi_an &&
2352 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2353 (!sata_pmp_attached(ap) ||
2354 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2355 unsigned int err_mask;
2356
2357
2358 err_mask = ata_dev_set_feature(dev,
2359 SETFEATURES_SATA_ENABLE, SATA_AN);
2360 if (err_mask)
2361 ata_dev_printk(dev, KERN_ERR,
2362 "failed to enable ATAPI AN "
2363 "(err_mask=0x%x)\n", err_mask);
2364 else {
2365 dev->flags |= ATA_DFLAG_AN;
2366 atapi_an_string = ", ATAPI AN";
2367 }
2368 }
2369
2370 if (ata_id_cdb_intr(dev->id)) {
2371 dev->flags |= ATA_DFLAG_CDB_INTR;
2372 cdb_intr_string = ", CDB intr";
2373 }
2374
2375 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2376 dev->flags |= ATA_DFLAG_DMADIR;
2377 dma_dir_string = ", DMADIR";
2378 }
2379
2380
2381 if (ata_msg_drv(ap) && print_info)
2382 ata_dev_printk(dev, KERN_INFO,
2383 "ATAPI: %s, %s, max %s%s%s%s\n",
2384 modelbuf, fwrevbuf,
2385 ata_mode_string(xfer_mask),
2386 cdb_intr_string, atapi_an_string,
2387 dma_dir_string);
2388 }
2389
2390
2391 dev->max_sectors = ATA_MAX_SECTORS;
2392 if (dev->flags & ATA_DFLAG_LBA48)
2393 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2394
2395
2396
2397 if (ata_dev_knobble(dev)) {
2398 if (ata_msg_drv(ap) && print_info)
2399 ata_dev_printk(dev, KERN_INFO,
2400 "applying bridge limits\n");
2401 dev->udma_mask &= ATA_UDMA5;
2402 dev->max_sectors = ATA_MAX_SECTORS;
2403 }
2404
2405 if ((dev->class == ATA_DEV_ATAPI) &&
2406 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2407 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2408 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2409 }
2410
2411 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2412 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2413 dev->max_sectors);
2414
2415 if (ap->ops->dev_config)
2416 ap->ops->dev_config(dev);
2417
2418 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2419
2420
2421
2422
2423
2424
2425 if (print_info) {
2426 ata_dev_printk(dev, KERN_WARNING,
2427"Drive reports diagnostics failure. This may indicate a drive\n");
2428 ata_dev_printk(dev, KERN_WARNING,
2429"fault or invalid emulation. Contact drive vendor for information.\n");
2430 }
2431 }
2432
2433 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2434 ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
2435 "firmware update to be fully functional.\n");
2436 ata_dev_printk(dev, KERN_WARNING, " contact the vendor "
2437 "or visit http://ata.wiki.kernel.org.\n");
2438 }
2439
2440 return 0;
2441
2442err_out_nosup:
2443 if (ata_msg_probe(ap))
2444 ata_dev_printk(dev, KERN_DEBUG,
2445 "%s: EXIT, err\n", __func__);
2446 return rc;
2447}
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457int ata_cable_40wire(struct ata_port *ap)
2458{
2459 return ATA_CBL_PATA40;
2460}
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470int ata_cable_80wire(struct ata_port *ap)
2471{
2472 return ATA_CBL_PATA80;
2473}
2474
2475
2476
2477
2478
2479
2480
2481
2482int ata_cable_unknown(struct ata_port *ap)
2483{
2484 return ATA_CBL_PATA_UNK;
2485}
2486
2487
2488
2489
2490
2491
2492
2493
2494int ata_cable_ignore(struct ata_port *ap)
2495{
2496 return ATA_CBL_PATA_IGN;
2497}
2498
2499
2500
2501
2502
2503
2504
2505
2506int ata_cable_sata(struct ata_port *ap)
2507{
2508 return ATA_CBL_SATA;
2509}
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526int ata_bus_probe(struct ata_port *ap)
2527{
2528 unsigned int classes[ATA_MAX_DEVICES];
2529 int tries[ATA_MAX_DEVICES];
2530 int rc;
2531 struct ata_device *dev;
2532
2533 ata_for_each_dev(dev, &ap->link, ALL)
2534 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2535
2536 retry:
2537 ata_for_each_dev(dev, &ap->link, ALL) {
2538
2539
2540
2541
2542
2543
2544
2545 dev->pio_mode = XFER_PIO_0;
2546
2547
2548
2549
2550
2551
2552 if (ap->ops->set_piomode)
2553 ap->ops->set_piomode(ap, dev);
2554 }
2555
2556
2557 ap->ops->phy_reset(ap);
2558
2559 ata_for_each_dev(dev, &ap->link, ALL) {
2560 if (dev->class != ATA_DEV_UNKNOWN)
2561 classes[dev->devno] = dev->class;
2562 else
2563 classes[dev->devno] = ATA_DEV_NONE;
2564
2565 dev->class = ATA_DEV_UNKNOWN;
2566 }
2567
2568
2569
2570
2571
2572 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2573 if (tries[dev->devno])
2574 dev->class = classes[dev->devno];
2575
2576 if (!ata_dev_enabled(dev))
2577 continue;
2578
2579 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2580 dev->id);
2581 if (rc)
2582 goto fail;
2583 }
2584
2585
2586 if (ap->ops->cable_detect)
2587 ap->cbl = ap->ops->cable_detect(ap);
2588
2589
2590
2591
2592
2593
2594 ata_for_each_dev(dev, &ap->link, ENABLED)
2595 if (ata_id_is_sata(dev->id))
2596 ap->cbl = ATA_CBL_SATA;
2597
2598
2599
2600
2601 ata_for_each_dev(dev, &ap->link, ENABLED) {
2602 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2603 rc = ata_dev_configure(dev);
2604 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2605 if (rc)
2606 goto fail;
2607 }
2608
2609
2610 rc = ata_set_mode(&ap->link, &dev);
2611 if (rc)
2612 goto fail;
2613
2614 ata_for_each_dev(dev, &ap->link, ENABLED)
2615 return 0;
2616
2617 return -ENODEV;
2618
2619 fail:
2620 tries[dev->devno]--;
2621
2622 switch (rc) {
2623 case -EINVAL:
2624
2625 tries[dev->devno] = 0;
2626 break;
2627
2628 case -ENODEV:
2629
2630 tries[dev->devno] = min(tries[dev->devno], 1);
2631 case -EIO:
2632 if (tries[dev->devno] == 1) {
2633
2634
2635
2636 sata_down_spd_limit(&ap->link, 0);
2637 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2638 }
2639 }
2640
2641 if (!tries[dev->devno])
2642 ata_dev_disable(dev);
2643
2644 goto retry;
2645}
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656static void sata_print_link_status(struct ata_link *link)
2657{
2658 u32 sstatus, scontrol, tmp;
2659
2660 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2661 return;
2662 sata_scr_read(link, SCR_CONTROL, &scontrol);
2663
2664 if (ata_phys_link_online(link)) {
2665 tmp = (sstatus >> 4) & 0xf;
2666 ata_link_printk(link, KERN_INFO,
2667 "SATA link up %s (SStatus %X SControl %X)\n",
2668 sata_spd_string(tmp), sstatus, scontrol);
2669 } else {
2670 ata_link_printk(link, KERN_INFO,
2671 "SATA link down (SStatus %X SControl %X)\n",
2672 sstatus, scontrol);
2673 }
2674}
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684struct ata_device *ata_dev_pair(struct ata_device *adev)
2685{
2686 struct ata_link *link = adev->link;
2687 struct ata_device *pair = &link->device[1 - adev->devno];
2688 if (!ata_dev_enabled(pair))
2689 return NULL;
2690 return pair;
2691}
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2714{
2715 u32 sstatus, spd, mask;
2716 int rc, bit;
2717
2718 if (!sata_scr_valid(link))
2719 return -EOPNOTSUPP;
2720
2721
2722
2723
2724 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2725 if (rc == 0 && ata_sstatus_online(sstatus))
2726 spd = (sstatus >> 4) & 0xf;
2727 else
2728 spd = link->sata_spd;
2729
2730 mask = link->sata_spd_limit;
2731 if (mask <= 1)
2732 return -EINVAL;
2733
2734
2735 bit = fls(mask) - 1;
2736 mask &= ~(1 << bit);
2737
2738
2739
2740
2741 if (spd > 1)
2742 mask &= (1 << (spd - 1)) - 1;
2743 else
2744 mask &= 1;
2745
2746
2747 if (!mask)
2748 return -EINVAL;
2749
2750 if (spd_limit) {
2751 if (mask & ((1 << spd_limit) - 1))
2752 mask &= (1 << spd_limit) - 1;
2753 else {
2754 bit = ffs(mask) - 1;
2755 mask = 1 << bit;
2756 }
2757 }
2758
2759 link->sata_spd_limit = mask;
2760
2761 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2762 sata_spd_string(fls(mask)));
2763
2764 return 0;
2765}
2766
2767static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2768{
2769 struct ata_link *host_link = &link->ap->link;
2770 u32 limit, target, spd;
2771
2772 limit = link->sata_spd_limit;
2773
2774
2775
2776
2777
2778 if (!ata_is_host_link(link) && host_link->sata_spd)
2779 limit &= (1 << host_link->sata_spd) - 1;
2780
2781 if (limit == UINT_MAX)
2782 target = 0;
2783 else
2784 target = fls(limit);
2785
2786 spd = (*scontrol >> 4) & 0xf;
2787 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2788
2789 return spd != target;
2790}
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807static int sata_set_spd_needed(struct ata_link *link)
2808{
2809 u32 scontrol;
2810
2811 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2812 return 1;
2813
2814 return __sata_set_spd_needed(link, &scontrol);
2815}
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830int sata_set_spd(struct ata_link *link)
2831{
2832 u32 scontrol;
2833 int rc;
2834
2835 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2836 return rc;
2837
2838 if (!__sata_set_spd_needed(link, &scontrol))
2839 return 0;
2840
2841 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2842 return rc;
2843
2844 return 1;
2845}
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859static const struct ata_timing ata_timing[] = {
2860
2861 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2862 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2863 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2864 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2865 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2866 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2867 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2868
2869 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2870 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2871 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2872
2873 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2874 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2875 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2876 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2877 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2878
2879
2880 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2881 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2882 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2883 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2884 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2885 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2886 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
2887
2888 { 0xFF }
2889};
2890
2891#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2892#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2893
2894static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2895{
2896 q->setup = EZ(t->setup * 1000, T);
2897 q->act8b = EZ(t->act8b * 1000, T);
2898 q->rec8b = EZ(t->rec8b * 1000, T);
2899 q->cyc8b = EZ(t->cyc8b * 1000, T);
2900 q->active = EZ(t->active * 1000, T);
2901 q->recover = EZ(t->recover * 1000, T);
2902 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2903 q->cycle = EZ(t->cycle * 1000, T);
2904 q->udma = EZ(t->udma * 1000, UT);
2905}
2906
2907void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2908 struct ata_timing *m, unsigned int what)
2909{
2910 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2911 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2912 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2913 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2914 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2915 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2916 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2917 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2918 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2919}
2920
2921const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2922{
2923 const struct ata_timing *t = ata_timing;
2924
2925 while (xfer_mode > t->mode)
2926 t++;
2927
2928 if (xfer_mode == t->mode)
2929 return t;
2930 return NULL;
2931}
2932
2933int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2934 struct ata_timing *t, int T, int UT)
2935{
2936 const u16 *id = adev->id;
2937 const struct ata_timing *s;
2938 struct ata_timing p;
2939
2940
2941
2942
2943
2944 if (!(s = ata_timing_find_mode(speed)))
2945 return -EINVAL;
2946
2947 memcpy(t, s, sizeof(*s));
2948
2949
2950
2951
2952
2953
2954 if (id[ATA_ID_FIELD_VALID] & 2) {
2955 memset(&p, 0, sizeof(p));
2956
2957 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2958 if (speed <= XFER_PIO_2)
2959 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
2960 else if ((speed <= XFER_PIO_4) ||
2961 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
2962 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
2963 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
2964 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
2965
2966 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2967 }
2968
2969
2970
2971
2972
2973 ata_timing_quantize(t, t, T, UT);
2974
2975
2976
2977
2978
2979
2980
2981 if (speed > XFER_PIO_6) {
2982 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2983 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2984 }
2985
2986
2987
2988
2989
2990 if (t->act8b + t->rec8b < t->cyc8b) {
2991 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2992 t->rec8b = t->cyc8b - t->act8b;
2993 }
2994
2995 if (t->active + t->recover < t->cycle) {
2996 t->active += (t->cycle - (t->active + t->recover)) / 2;
2997 t->recover = t->cycle - t->active;
2998 }
2999
3000
3001
3002
3003 if (t->active + t->recover > t->cycle)
3004 t->cycle = t->active + t->recover;
3005
3006 return 0;
3007}
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3026{
3027 u8 base_mode = 0xff, last_mode = 0xff;
3028 const struct ata_xfer_ent *ent;
3029 const struct ata_timing *t;
3030
3031 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3032 if (ent->shift == xfer_shift)
3033 base_mode = ent->base;
3034
3035 for (t = ata_timing_find_mode(base_mode);
3036 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3037 unsigned short this_cycle;
3038
3039 switch (xfer_shift) {
3040 case ATA_SHIFT_PIO:
3041 case ATA_SHIFT_MWDMA:
3042 this_cycle = t->cycle;
3043 break;
3044 case ATA_SHIFT_UDMA:
3045 this_cycle = t->udma;
3046 break;
3047 default:
3048 return 0xff;
3049 }
3050
3051 if (cycle > this_cycle)
3052 break;
3053
3054 last_mode = t->mode;
3055 }
3056
3057 return last_mode;
3058}
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3076{
3077 char buf[32];
3078 unsigned long orig_mask, xfer_mask;
3079 unsigned long pio_mask, mwdma_mask, udma_mask;
3080 int quiet, highbit;
3081
3082 quiet = !!(sel & ATA_DNXFER_QUIET);
3083 sel &= ~ATA_DNXFER_QUIET;
3084
3085 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3086 dev->mwdma_mask,
3087 dev->udma_mask);
3088 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3089
3090 switch (sel) {
3091 case ATA_DNXFER_PIO:
3092 highbit = fls(pio_mask) - 1;
3093 pio_mask &= ~(1 << highbit);
3094 break;
3095
3096 case ATA_DNXFER_DMA:
3097 if (udma_mask) {
3098 highbit = fls(udma_mask) - 1;
3099 udma_mask &= ~(1 << highbit);
3100 if (!udma_mask)
3101 return -ENOENT;
3102 } else if (mwdma_mask) {
3103 highbit = fls(mwdma_mask) - 1;
3104 mwdma_mask &= ~(1 << highbit);
3105 if (!mwdma_mask)
3106 return -ENOENT;
3107 }
3108 break;
3109
3110 case ATA_DNXFER_40C:
3111 udma_mask &= ATA_UDMA_MASK_40C;
3112 break;
3113
3114 case ATA_DNXFER_FORCE_PIO0:
3115 pio_mask &= 1;
3116 case ATA_DNXFER_FORCE_PIO:
3117 mwdma_mask = 0;
3118 udma_mask = 0;
3119 break;
3120
3121 default:
3122 BUG();
3123 }
3124
3125 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3126
3127 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3128 return -ENOENT;
3129
3130 if (!quiet) {
3131 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3132 snprintf(buf, sizeof(buf), "%s:%s",
3133 ata_mode_string(xfer_mask),
3134 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3135 else
3136 snprintf(buf, sizeof(buf), "%s",
3137 ata_mode_string(xfer_mask));
3138
3139 ata_dev_printk(dev, KERN_WARNING,
3140 "limiting speed to %s\n", buf);
3141 }
3142
3143 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3144 &dev->udma_mask);
3145
3146 return 0;
3147}
3148
3149static int ata_dev_set_mode(struct ata_device *dev)
3150{
3151 struct ata_port *ap = dev->link->ap;
3152 struct ata_eh_context *ehc = &dev->link->eh_context;
3153 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3154 const char *dev_err_whine = "";
3155 int ign_dev_err = 0;
3156 unsigned int err_mask = 0;
3157 int rc;
3158
3159 dev->flags &= ~ATA_DFLAG_PIO;
3160 if (dev->xfer_shift == ATA_SHIFT_PIO)
3161 dev->flags |= ATA_DFLAG_PIO;
3162
3163 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3164 dev_err_whine = " (SET_XFERMODE skipped)";
3165 else {
3166 if (nosetxfer)
3167 ata_dev_printk(dev, KERN_WARNING,
3168 "NOSETXFER but PATA detected - can't "
3169 "skip SETXFER, might malfunction\n");
3170 err_mask = ata_dev_set_xfermode(dev);
3171 }
3172
3173 if (err_mask & ~AC_ERR_DEV)
3174 goto fail;
3175
3176
3177 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3178 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3179 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3180 if (rc)
3181 return rc;
3182
3183 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3184
3185 if (ata_id_is_cfa(dev->id))
3186 ign_dev_err = 1;
3187
3188
3189 if (ata_id_major_version(dev->id) == 0 &&
3190 dev->pio_mode <= XFER_PIO_2)
3191 ign_dev_err = 1;
3192
3193
3194
3195 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3196 ign_dev_err = 1;
3197 }
3198
3199
3200 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3201 dev->dma_mode == XFER_MW_DMA_0 &&
3202 (dev->id[63] >> 8) & 1)
3203 ign_dev_err = 1;
3204
3205
3206 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3207 ign_dev_err = 1;
3208
3209 if (err_mask & AC_ERR_DEV) {
3210 if (!ign_dev_err)
3211 goto fail;
3212 else
3213 dev_err_whine = " (device error ignored)";
3214 }
3215
3216 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3217 dev->xfer_shift, (int)dev->xfer_mode);
3218
3219 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3220 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3221 dev_err_whine);
3222
3223 return 0;
3224
3225 fail:
3226 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3227 "(err_mask=0x%x)\n", err_mask);
3228 return -EIO;
3229}
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3249{
3250 struct ata_port *ap = link->ap;
3251 struct ata_device *dev;
3252 int rc = 0, used_dma = 0, found = 0;
3253
3254
3255 ata_for_each_dev(dev, link, ENABLED) {
3256 unsigned long pio_mask, dma_mask;
3257 unsigned int mode_mask;
3258
3259 mode_mask = ATA_DMA_MASK_ATA;
3260 if (dev->class == ATA_DEV_ATAPI)
3261 mode_mask = ATA_DMA_MASK_ATAPI;
3262 else if (ata_id_is_cfa(dev->id))
3263 mode_mask = ATA_DMA_MASK_CFA;
3264
3265 ata_dev_xfermask(dev);
3266 ata_force_xfermask(dev);
3267
3268 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3269 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3270
3271 if (libata_dma_mask & mode_mask)
3272 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3273 else
3274 dma_mask = 0;
3275
3276 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3277 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3278
3279 found = 1;
3280 if (ata_dma_enabled(dev))
3281 used_dma = 1;
3282 }
3283 if (!found)
3284 goto out;
3285
3286
3287 ata_for_each_dev(dev, link, ENABLED) {
3288 if (dev->pio_mode == 0xff) {
3289 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3290 rc = -EINVAL;
3291 goto out;
3292 }
3293
3294 dev->xfer_mode = dev->pio_mode;
3295 dev->xfer_shift = ATA_SHIFT_PIO;
3296 if (ap->ops->set_piomode)
3297 ap->ops->set_piomode(ap, dev);
3298 }
3299
3300
3301 ata_for_each_dev(dev, link, ENABLED) {
3302 if (!ata_dma_enabled(dev))
3303 continue;
3304
3305 dev->xfer_mode = dev->dma_mode;
3306 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3307 if (ap->ops->set_dmamode)
3308 ap->ops->set_dmamode(ap, dev);
3309 }
3310
3311
3312 ata_for_each_dev(dev, link, ENABLED) {
3313 rc = ata_dev_set_mode(dev);
3314 if (rc)
3315 goto out;
3316 }
3317
3318
3319
3320
3321 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3322 ap->host->simplex_claimed = ap;
3323
3324 out:
3325 if (rc)
3326 *r_failed_dev = dev;
3327 return rc;
3328}
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3351 int (*check_ready)(struct ata_link *link))
3352{
3353 unsigned long start = jiffies;
3354 unsigned long nodev_deadline;
3355 int warned = 0;
3356
3357
3358 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3359 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3360 else
3361 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3362
3363
3364
3365
3366
3367 WARN_ON(link == link->ap->slave_link);
3368
3369 if (time_after(nodev_deadline, deadline))
3370 nodev_deadline = deadline;
3371
3372 while (1) {
3373 unsigned long now = jiffies;
3374 int ready, tmp;
3375
3376 ready = tmp = check_ready(link);
3377 if (ready > 0)
3378 return 0;
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391 if (ready == -ENODEV) {
3392 if (ata_link_online(link))
3393 ready = 0;
3394 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3395 !ata_link_offline(link) &&
3396 time_before(now, nodev_deadline))
3397 ready = 0;
3398 }
3399
3400 if (ready)
3401 return ready;
3402 if (time_after(now, deadline))
3403 return -EBUSY;
3404
3405 if (!warned && time_after(now, start + 5 * HZ) &&
3406 (deadline - now > 3 * HZ)) {
3407 ata_link_printk(link, KERN_WARNING,
3408 "link is slow to respond, please be patient "
3409 "(ready=%d)\n", tmp);
3410 warned = 1;
3411 }
3412
3413 ata_msleep(link->ap, 50);
3414 }
3415}
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3432 int (*check_ready)(struct ata_link *link))
3433{
3434 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3435
3436 return ata_wait_ready(link, deadline, check_ready);
3437}
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3462 unsigned long deadline)
3463{
3464 unsigned long interval = params[0];
3465 unsigned long duration = params[1];
3466 unsigned long last_jiffies, t;
3467 u32 last, cur;
3468 int rc;
3469
3470 t = ata_deadline(jiffies, params[2]);
3471 if (time_before(t, deadline))
3472 deadline = t;
3473
3474 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3475 return rc;
3476 cur &= 0xf;
3477
3478 last = cur;
3479 last_jiffies = jiffies;
3480
3481 while (1) {
3482 ata_msleep(link->ap, interval);
3483 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3484 return rc;
3485 cur &= 0xf;
3486
3487
3488 if (cur == last) {
3489 if (cur == 1 && time_before(jiffies, deadline))
3490 continue;
3491 if (time_after(jiffies,
3492 ata_deadline(last_jiffies, duration)))
3493 return 0;
3494 continue;
3495 }
3496
3497
3498 last = cur;
3499 last_jiffies = jiffies;
3500
3501
3502
3503
3504 if (time_after(jiffies, deadline))
3505 return -EPIPE;
3506 }
3507}
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523int sata_link_resume(struct ata_link *link, const unsigned long *params,
3524 unsigned long deadline)
3525{
3526 int tries = ATA_LINK_RESUME_TRIES;
3527 u32 scontrol, serror;
3528 int rc;
3529
3530 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3531 return rc;
3532
3533
3534
3535
3536
3537
3538 do {
3539 scontrol = (scontrol & 0x0f0) | 0x300;
3540 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3541 return rc;
3542
3543
3544
3545
3546
3547 ata_msleep(link->ap, 200);
3548
3549
3550 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3551 return rc;
3552 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3553
3554 if ((scontrol & 0xf0f) != 0x300) {
3555 ata_link_printk(link, KERN_ERR,
3556 "failed to resume link (SControl %X)\n",
3557 scontrol);
3558 return 0;
3559 }
3560
3561 if (tries < ATA_LINK_RESUME_TRIES)
3562 ata_link_printk(link, KERN_WARNING,
3563 "link resume succeeded after %d retries\n",
3564 ATA_LINK_RESUME_TRIES - tries);
3565
3566 if ((rc = sata_link_debounce(link, params, deadline)))
3567 return rc;
3568
3569
3570 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3571 rc = sata_scr_write(link, SCR_ERROR, serror);
3572
3573 return rc != -EINVAL ? rc : 0;
3574}
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3595 bool spm_wakeup)
3596{
3597 struct ata_eh_context *ehc = &link->eh_context;
3598 bool woken_up = false;
3599 u32 scontrol;
3600 int rc;
3601
3602 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3603 if (rc)
3604 return rc;
3605
3606 switch (policy) {
3607 case ATA_LPM_MAX_POWER:
3608
3609 scontrol |= (0x3 << 8);
3610
3611 if (spm_wakeup) {
3612 scontrol |= (0x4 << 12);
3613 woken_up = true;
3614 }
3615 break;
3616 case ATA_LPM_MED_POWER:
3617
3618 scontrol &= ~(0x1 << 8);
3619 scontrol |= (0x2 << 8);
3620 break;
3621 case ATA_LPM_MIN_POWER:
3622
3623 scontrol &= ~(0x3 << 8);
3624 break;
3625 default:
3626 WARN_ON(1);
3627 }
3628
3629 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3630 if (rc)
3631 return rc;
3632
3633
3634 if (woken_up)
3635 msleep(10);
3636
3637
3638 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3639 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3640}
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3660{
3661 struct ata_port *ap = link->ap;
3662 struct ata_eh_context *ehc = &link->eh_context;
3663 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3664 int rc;
3665
3666
3667 if (ehc->i.action & ATA_EH_HARDRESET)
3668 return 0;
3669
3670
3671 if (ap->flags & ATA_FLAG_SATA) {
3672 rc = sata_link_resume(link, timing, deadline);
3673
3674 if (rc && rc != -EOPNOTSUPP)
3675 ata_link_printk(link, KERN_WARNING, "failed to resume "
3676 "link for reset (errno=%d)\n", rc);
3677 }
3678
3679
3680 if (ata_phys_link_offline(link))
3681 ehc->i.action &= ~ATA_EH_SOFTRESET;
3682
3683 return 0;
3684}
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3711 unsigned long deadline,
3712 bool *online, int (*check_ready)(struct ata_link *))
3713{
3714 u32 scontrol;
3715 int rc;
3716
3717 DPRINTK("ENTER\n");
3718
3719 if (online)
3720 *online = false;
3721
3722 if (sata_set_spd_needed(link)) {
3723
3724
3725
3726
3727
3728 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3729 goto out;
3730
3731 scontrol = (scontrol & 0x0f0) | 0x304;
3732
3733 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3734 goto out;
3735
3736 sata_set_spd(link);
3737 }
3738
3739
3740 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3741 goto out;
3742
3743 scontrol = (scontrol & 0x0f0) | 0x301;
3744
3745 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3746 goto out;
3747
3748
3749
3750
3751 ata_msleep(link->ap, 1);
3752
3753
3754 rc = sata_link_resume(link, timing, deadline);
3755 if (rc)
3756 goto out;
3757
3758 if (ata_phys_link_offline(link))
3759 goto out;
3760
3761
3762 if (online)
3763 *online = true;
3764
3765 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3766
3767
3768
3769
3770
3771 if (check_ready) {
3772 unsigned long pmp_deadline;
3773
3774 pmp_deadline = ata_deadline(jiffies,
3775 ATA_TMOUT_PMP_SRST_WAIT);
3776 if (time_after(pmp_deadline, deadline))
3777 pmp_deadline = deadline;
3778 ata_wait_ready(link, pmp_deadline, check_ready);
3779 }
3780 rc = -EAGAIN;
3781 goto out;
3782 }
3783
3784 rc = 0;
3785 if (check_ready)
3786 rc = ata_wait_ready(link, deadline, check_ready);
3787 out:
3788 if (rc && rc != -EAGAIN) {
3789
3790 if (online)
3791 *online = false;
3792 ata_link_printk(link, KERN_ERR,
3793 "COMRESET failed (errno=%d)\n", rc);
3794 }
3795 DPRINTK("EXIT, rc=%d\n", rc);
3796 return rc;
3797}
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3814 unsigned long deadline)
3815{
3816 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3817 bool online;
3818 int rc;
3819
3820
3821 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3822 return online ? -EAGAIN : rc;
3823}
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3838{
3839 u32 serror;
3840
3841 DPRINTK("ENTER\n");
3842
3843
3844 if (!sata_scr_read(link, SCR_ERROR, &serror))
3845 sata_scr_write(link, SCR_ERROR, serror);
3846
3847
3848 sata_print_link_status(link);
3849
3850 DPRINTK("EXIT\n");
3851}
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3870 const u16 *new_id)
3871{
3872 const u16 *old_id = dev->id;
3873 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3874 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3875
3876 if (dev->class != new_class) {
3877 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3878 dev->class, new_class);
3879 return 0;
3880 }
3881
3882 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3883 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3884 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3885 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3886
3887 if (strcmp(model[0], model[1])) {
3888 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3889 "'%s' != '%s'\n", model[0], model[1]);
3890 return 0;
3891 }
3892
3893 if (strcmp(serial[0], serial[1])) {
3894 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3895 "'%s' != '%s'\n", serial[0], serial[1]);
3896 return 0;
3897 }
3898
3899 return 1;
3900}
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3917{
3918 unsigned int class = dev->class;
3919 u16 *id = (void *)dev->link->ap->sector_buf;
3920 int rc;
3921
3922
3923 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3924 if (rc)
3925 return rc;
3926
3927
3928 if (!ata_dev_same_device(dev, class, id))
3929 return -ENODEV;
3930
3931 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3932 return 0;
3933}
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3951 unsigned int readid_flags)
3952{
3953 u64 n_sectors = dev->n_sectors;
3954 u64 n_native_sectors = dev->n_native_sectors;
3955 int rc;
3956
3957 if (!ata_dev_enabled(dev))
3958 return -ENODEV;
3959
3960
3961 if (ata_class_enabled(new_class) &&
3962 new_class != ATA_DEV_ATA &&
3963 new_class != ATA_DEV_ATAPI &&
3964 new_class != ATA_DEV_SEMB) {
3965 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3966 dev->class, new_class);
3967 rc = -ENODEV;
3968 goto fail;
3969 }
3970
3971
3972 rc = ata_dev_reread_id(dev, readid_flags);
3973 if (rc)
3974 goto fail;
3975
3976
3977 rc = ata_dev_configure(dev);
3978 if (rc)
3979 goto fail;
3980
3981
3982 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3983 dev->n_sectors == n_sectors)
3984 return 0;
3985
3986
3987 ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n",
3988 (unsigned long long)n_sectors,
3989 (unsigned long long)dev->n_sectors);
3990
3991
3992
3993
3994
3995
3996 if (dev->n_native_sectors == n_native_sectors &&
3997 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3998 ata_dev_printk(dev, KERN_WARNING,
3999 "new n_sectors matches native, probably "
4000 "late HPA unlock, n_sectors updated\n");
4001
4002 return 0;
4003 }
4004
4005
4006
4007
4008
4009
4010
4011 if (dev->n_native_sectors == n_native_sectors &&
4012 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4013 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4014 ata_dev_printk(dev, KERN_WARNING,
4015 "old n_sectors matches native, probably "
4016 "late HPA lock, will try to unlock HPA\n");
4017
4018 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4019 rc = -EIO;
4020 } else
4021 rc = -ENODEV;
4022
4023
4024 dev->n_native_sectors = n_native_sectors;
4025 dev->n_sectors = n_sectors;
4026 fail:
4027 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4028 return rc;
4029}
4030
4031struct ata_blacklist_entry {
4032 const char *model_num;
4033 const char *model_rev;
4034 unsigned long horkage;
4035};
4036
4037static const struct ata_blacklist_entry ata_device_blacklist [] = {
4038
4039 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4040 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4041 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4042 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4043 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4044 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4045 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4046 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4047 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4048 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4049 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4050 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4051 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4052 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4053 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4054 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4055 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4056 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4057 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4058 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4059 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4060 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4061 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4062 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4063 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4064 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4065 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4066 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4067
4068 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4069
4070
4071 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4072 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4073
4074
4075
4076
4077
4078 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4079 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4080
4081 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4082
4083 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4084 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4085 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4086 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4087 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4088
4089
4090 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4091 ATA_HORKAGE_FIRMWARE_WARN },
4092
4093 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4094 ATA_HORKAGE_FIRMWARE_WARN },
4095
4096 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4097 ATA_HORKAGE_FIRMWARE_WARN },
4098
4099 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4100 ATA_HORKAGE_FIRMWARE_WARN },
4101
4102
4103
4104 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4105 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4106 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4107
4108
4109 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4110
4111
4112 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4113 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4114 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4115 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4116
4117
4118 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4119
4120
4121 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4122 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4123 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4124
4125
4126 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4127
4128 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4129
4130
4131 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4132
4133
4134 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4135
4136
4137
4138
4139
4140 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
4141 { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER },
4142
4143
4144 { }
4145};
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174static int glob_match (const char *text, const char *pattern)
4175{
4176 do {
4177
4178 if (*text == *pattern || *pattern == '?') {
4179 if (!*pattern++)
4180 return 0;
4181 } else {
4182
4183 if (!*text || *pattern != '[')
4184 break;
4185 while (*++pattern && *pattern != ']' && *text != *pattern) {
4186 if (*pattern == '-' && *(pattern - 1) != '[')
4187 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4188 ++pattern;
4189 break;
4190 }
4191 }
4192 if (!*pattern || *pattern == ']')
4193 return 1;
4194 while (*pattern && *pattern++ != ']');
4195 }
4196 } while (*++text && *pattern);
4197
4198
4199 if (*pattern == '*') {
4200 if (!*++pattern)
4201 return 0;
4202
4203 while (*text) {
4204 if (glob_match(text, pattern) == 0)
4205 return 0;
4206 ++text;
4207 }
4208 }
4209 if (!*text && !*pattern)
4210 return 0;
4211 return 1;
4212}
4213
4214static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4215{
4216 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4217 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4218 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4219
4220 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4221 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4222
4223 while (ad->model_num) {
4224 if (!glob_match(model_num, ad->model_num)) {
4225 if (ad->model_rev == NULL)
4226 return ad->horkage;
4227 if (!glob_match(model_rev, ad->model_rev))
4228 return ad->horkage;
4229 }
4230 ad++;
4231 }
4232 return 0;
4233}
4234
4235static int ata_dma_blacklisted(const struct ata_device *dev)
4236{
4237
4238
4239
4240
4241 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4242 (dev->flags & ATA_DFLAG_CDB_INTR))
4243 return 1;
4244 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4245}
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255static int ata_is_40wire(struct ata_device *dev)
4256{
4257 if (dev->horkage & ATA_HORKAGE_IVB)
4258 return ata_drive_40wire_relaxed(dev->id);
4259 return ata_drive_40wire(dev->id);
4260}
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275static int cable_is_40wire(struct ata_port *ap)
4276{
4277 struct ata_link *link;
4278 struct ata_device *dev;
4279
4280
4281 if (ap->cbl == ATA_CBL_PATA40)
4282 return 1;
4283
4284
4285 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4286 return 0;
4287
4288
4289
4290
4291
4292 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4293 return 0;
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304 ata_for_each_link(link, ap, EDGE) {
4305 ata_for_each_dev(dev, link, ENABLED) {
4306 if (!ata_is_40wire(dev))
4307 return 0;
4308 }
4309 }
4310 return 1;
4311}
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325static void ata_dev_xfermask(struct ata_device *dev)
4326{
4327 struct ata_link *link = dev->link;
4328 struct ata_port *ap = link->ap;
4329 struct ata_host *host = ap->host;
4330 unsigned long xfer_mask;
4331
4332
4333 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4334 ap->mwdma_mask, ap->udma_mask);
4335
4336
4337 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4338 dev->mwdma_mask, dev->udma_mask);
4339 xfer_mask &= ata_id_xfermask(dev->id);
4340
4341
4342
4343
4344
4345 if (ata_dev_pair(dev)) {
4346
4347 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4348
4349 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4350 }
4351
4352 if (ata_dma_blacklisted(dev)) {
4353 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4354 ata_dev_printk(dev, KERN_WARNING,
4355 "device is on DMA blacklist, disabling DMA\n");
4356 }
4357
4358 if ((host->flags & ATA_HOST_SIMPLEX) &&
4359 host->simplex_claimed && host->simplex_claimed != ap) {
4360 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4361 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4362 "other device, disabling DMA\n");
4363 }
4364
4365 if (ap->flags & ATA_FLAG_NO_IORDY)
4366 xfer_mask &= ata_pio_mask_no_iordy(dev);
4367
4368 if (ap->ops->mode_filter)
4369 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4380
4381 if (cable_is_40wire(ap)) {
4382 ata_dev_printk(dev, KERN_WARNING,
4383 "limited to UDMA/33 due to 40-wire cable\n");
4384 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4385 }
4386
4387 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4388 &dev->mwdma_mask, &dev->udma_mask);
4389}
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4406{
4407 struct ata_taskfile tf;
4408 unsigned int err_mask;
4409
4410
4411 DPRINTK("set features - xfer mode\n");
4412
4413
4414
4415
4416 ata_tf_init(dev, &tf);
4417 tf.command = ATA_CMD_SET_FEATURES;
4418 tf.feature = SETFEATURES_XFER;
4419 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4420 tf.protocol = ATA_PROT_NODATA;
4421
4422 if (ata_pio_need_iordy(dev))
4423 tf.nsect = dev->xfer_mode;
4424
4425 else if (ata_id_has_iordy(dev->id))
4426 tf.nsect = 0x01;
4427 else
4428 return 0;
4429
4430 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4431
4432 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4433 return err_mask;
4434}
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4452{
4453 struct ata_taskfile tf;
4454 unsigned int err_mask;
4455
4456
4457 DPRINTK("set features - SATA features\n");
4458
4459 ata_tf_init(dev, &tf);
4460 tf.command = ATA_CMD_SET_FEATURES;
4461 tf.feature = enable;
4462 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4463 tf.protocol = ATA_PROT_NODATA;
4464 tf.nsect = feature;
4465
4466 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4467
4468 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4469 return err_mask;
4470}
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484static unsigned int ata_dev_init_params(struct ata_device *dev,
4485 u16 heads, u16 sectors)
4486{
4487 struct ata_taskfile tf;
4488 unsigned int err_mask;
4489
4490
4491 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4492 return AC_ERR_INVALID;
4493
4494
4495 DPRINTK("init dev params \n");
4496
4497 ata_tf_init(dev, &tf);
4498 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4499 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4500 tf.protocol = ATA_PROT_NODATA;
4501 tf.nsect = sectors;
4502 tf.device |= (heads - 1) & 0x0f;
4503
4504 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4505
4506
4507
4508 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4509 err_mask = 0;
4510
4511 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4512 return err_mask;
4513}
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524void ata_sg_clean(struct ata_queued_cmd *qc)
4525{
4526 struct ata_port *ap = qc->ap;
4527 struct scatterlist *sg = qc->sg;
4528 int dir = qc->dma_dir;
4529
4530 WARN_ON_ONCE(sg == NULL);
4531
4532 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4533
4534 if (qc->n_elem)
4535 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4536
4537 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4538 qc->sg = NULL;
4539}
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555int atapi_check_dma(struct ata_queued_cmd *qc)
4556{
4557 struct ata_port *ap = qc->ap;
4558
4559
4560
4561
4562 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4563 unlikely(qc->nbytes & 15))
4564 return 1;
4565
4566 if (ap->ops->check_atapi_dma)
4567 return ap->ops->check_atapi_dma(qc);
4568
4569 return 0;
4570}
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587int ata_std_qc_defer(struct ata_queued_cmd *qc)
4588{
4589 struct ata_link *link = qc->dev->link;
4590
4591 if (qc->tf.protocol == ATA_PROT_NCQ) {
4592 if (!ata_tag_valid(link->active_tag))
4593 return 0;
4594 } else {
4595 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4596 return 0;
4597 }
4598
4599 return ATA_DEFER_LINK;
4600}
4601
4602void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4618 unsigned int n_elem)
4619{
4620 qc->sg = sg;
4621 qc->n_elem = n_elem;
4622 qc->cursg = qc->sg;
4623}
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638static int ata_sg_setup(struct ata_queued_cmd *qc)
4639{
4640 struct ata_port *ap = qc->ap;
4641 unsigned int n_elem;
4642
4643 VPRINTK("ENTER, ata%u\n", ap->print_id);
4644
4645 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4646 if (n_elem < 1)
4647 return -1;
4648
4649 DPRINTK("%d sg elements mapped\n", n_elem);
4650 qc->orig_n_elem = qc->n_elem;
4651 qc->n_elem = n_elem;
4652 qc->flags |= ATA_QCFLAG_DMAMAP;
4653
4654 return 0;
4655}
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669void swap_buf_le16(u16 *buf, unsigned int buf_words)
4670{
4671#ifdef __BIG_ENDIAN
4672 unsigned int i;
4673
4674 for (i = 0; i < buf_words; i++)
4675 buf[i] = le16_to_cpu(buf[i]);
4676#endif
4677}
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4688{
4689 struct ata_queued_cmd *qc = NULL;
4690 unsigned int i;
4691
4692
4693 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4694 return NULL;
4695
4696
4697 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4698 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4699 qc = __ata_qc_from_tag(ap, i);
4700 break;
4701 }
4702
4703 if (qc)
4704 qc->tag = i;
4705
4706 return qc;
4707}
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4718{
4719 struct ata_port *ap = dev->link->ap;
4720 struct ata_queued_cmd *qc;
4721
4722 qc = ata_qc_new(ap);
4723 if (qc) {
4724 qc->scsicmd = NULL;
4725 qc->ap = ap;
4726 qc->dev = dev;
4727
4728 ata_qc_reinit(qc);
4729 }
4730
4731 return qc;
4732}
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744void ata_qc_free(struct ata_queued_cmd *qc)
4745{
4746 struct ata_port *ap;
4747 unsigned int tag;
4748
4749 WARN_ON_ONCE(qc == NULL);
4750 ap = qc->ap;
4751
4752 qc->flags = 0;
4753 tag = qc->tag;
4754 if (likely(ata_tag_valid(tag))) {
4755 qc->tag = ATA_TAG_POISON;
4756 clear_bit(tag, &ap->qc_allocated);
4757 }
4758}
4759
4760void __ata_qc_complete(struct ata_queued_cmd *qc)
4761{
4762 struct ata_port *ap;
4763 struct ata_link *link;
4764
4765 WARN_ON_ONCE(qc == NULL);
4766 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4767 ap = qc->ap;
4768 link = qc->dev->link;
4769
4770 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4771 ata_sg_clean(qc);
4772
4773
4774 if (qc->tf.protocol == ATA_PROT_NCQ) {
4775 link->sactive &= ~(1 << qc->tag);
4776 if (!link->sactive)
4777 ap->nr_active_links--;
4778 } else {
4779 link->active_tag = ATA_TAG_POISON;
4780 ap->nr_active_links--;
4781 }
4782
4783
4784 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4785 ap->excl_link == link))
4786 ap->excl_link = NULL;
4787
4788
4789
4790
4791
4792 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4793 ap->qc_active &= ~(1 << qc->tag);
4794
4795
4796 qc->complete_fn(qc);
4797}
4798
4799static void fill_result_tf(struct ata_queued_cmd *qc)
4800{
4801 struct ata_port *ap = qc->ap;
4802
4803 qc->result_tf.flags = qc->tf.flags;
4804 ap->ops->qc_fill_rtf(qc);
4805}
4806
4807static void ata_verify_xfer(struct ata_queued_cmd *qc)
4808{
4809 struct ata_device *dev = qc->dev;
4810
4811 if (ata_is_nodata(qc->tf.protocol))
4812 return;
4813
4814 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4815 return;
4816
4817 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4818}
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835void ata_qc_complete(struct ata_queued_cmd *qc)
4836{
4837 struct ata_port *ap = qc->ap;
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852 if (ap->ops->error_handler) {
4853 struct ata_device *dev = qc->dev;
4854 struct ata_eh_info *ehi = &dev->link->eh_info;
4855
4856 if (unlikely(qc->err_mask))
4857 qc->flags |= ATA_QCFLAG_FAILED;
4858
4859
4860
4861
4862
4863 if (unlikely(ata_tag_internal(qc->tag))) {
4864 fill_result_tf(qc);
4865 __ata_qc_complete(qc);
4866 return;
4867 }
4868
4869
4870
4871
4872
4873 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4874 fill_result_tf(qc);
4875 ata_qc_schedule_eh(qc);
4876 return;
4877 }
4878
4879 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4880
4881
4882 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4883 fill_result_tf(qc);
4884
4885
4886
4887
4888 switch (qc->tf.command) {
4889 case ATA_CMD_SET_FEATURES:
4890 if (qc->tf.feature != SETFEATURES_WC_ON &&
4891 qc->tf.feature != SETFEATURES_WC_OFF)
4892 break;
4893
4894 case ATA_CMD_INIT_DEV_PARAMS:
4895 case ATA_CMD_SET_MULTI:
4896
4897 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4898 ata_port_schedule_eh(ap);
4899 break;
4900
4901 case ATA_CMD_SLEEP:
4902 dev->flags |= ATA_DFLAG_SLEEPING;
4903 break;
4904 }
4905
4906 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4907 ata_verify_xfer(qc);
4908
4909 __ata_qc_complete(qc);
4910 } else {
4911 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4912 return;
4913
4914
4915 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4916 fill_result_tf(qc);
4917
4918 __ata_qc_complete(qc);
4919 }
4920}
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4943{
4944 int nr_done = 0;
4945 u32 done_mask;
4946
4947 done_mask = ap->qc_active ^ qc_active;
4948
4949 if (unlikely(done_mask & qc_active)) {
4950 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4951 "(%08x->%08x)\n", ap->qc_active, qc_active);
4952 return -EINVAL;
4953 }
4954
4955 while (done_mask) {
4956 struct ata_queued_cmd *qc;
4957 unsigned int tag = __ffs(done_mask);
4958
4959 qc = ata_qc_from_tag(ap, tag);
4960 if (qc) {
4961 ata_qc_complete(qc);
4962 nr_done++;
4963 }
4964 done_mask &= ~(1 << tag);
4965 }
4966
4967 return nr_done;
4968}
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982void ata_qc_issue(struct ata_queued_cmd *qc)
4983{
4984 struct ata_port *ap = qc->ap;
4985 struct ata_link *link = qc->dev->link;
4986 u8 prot = qc->tf.protocol;
4987
4988
4989
4990
4991
4992 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4993
4994 if (ata_is_ncq(prot)) {
4995 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
4996
4997 if (!link->sactive)
4998 ap->nr_active_links++;
4999 link->sactive |= 1 << qc->tag;
5000 } else {
5001 WARN_ON_ONCE(link->sactive);
5002
5003 ap->nr_active_links++;
5004 link->active_tag = qc->tag;
5005 }
5006
5007 qc->flags |= ATA_QCFLAG_ACTIVE;
5008 ap->qc_active |= 1 << qc->tag;
5009
5010
5011
5012
5013
5014 if (WARN_ON_ONCE(ata_is_data(prot) &&
5015 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5016 goto sys_err;
5017
5018 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5019 (ap->flags & ATA_FLAG_PIO_DMA)))
5020 if (ata_sg_setup(qc))
5021 goto sys_err;
5022
5023
5024 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5025 link->eh_info.action |= ATA_EH_RESET;
5026 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5027 ata_link_abort(link);
5028 return;
5029 }
5030
5031 ap->ops->qc_prep(qc);
5032
5033 qc->err_mask |= ap->ops->qc_issue(qc);
5034 if (unlikely(qc->err_mask))
5035 goto err;
5036 return;
5037
5038sys_err:
5039 qc->err_mask |= AC_ERR_SYSTEM;
5040err:
5041 ata_qc_complete(qc);
5042}
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056int sata_scr_valid(struct ata_link *link)
5057{
5058 struct ata_port *ap = link->ap;
5059
5060 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5061}
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078
5079int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5080{
5081 if (ata_is_host_link(link)) {
5082 if (sata_scr_valid(link))
5083 return link->ap->ops->scr_read(link, reg, val);
5084 return -EOPNOTSUPP;
5085 }
5086
5087 return sata_pmp_scr_read(link, reg, val);
5088}
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106int sata_scr_write(struct ata_link *link, int reg, u32 val)
5107{
5108 if (ata_is_host_link(link)) {
5109 if (sata_scr_valid(link))
5110 return link->ap->ops->scr_write(link, reg, val);
5111 return -EOPNOTSUPP;
5112 }
5113
5114 return sata_pmp_scr_write(link, reg, val);
5115}
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5133{
5134 if (ata_is_host_link(link)) {
5135 int rc;
5136
5137 if (sata_scr_valid(link)) {
5138 rc = link->ap->ops->scr_write(link, reg, val);
5139 if (rc == 0)
5140 rc = link->ap->ops->scr_read(link, reg, &val);
5141 return rc;
5142 }
5143 return -EOPNOTSUPP;
5144 }
5145
5146 return sata_pmp_scr_write(link, reg, val);
5147}
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161
5162
5163bool ata_phys_link_online(struct ata_link *link)
5164{
5165 u32 sstatus;
5166
5167 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5168 ata_sstatus_online(sstatus))
5169 return true;
5170 return false;
5171}
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187bool ata_phys_link_offline(struct ata_link *link)
5188{
5189 u32 sstatus;
5190
5191 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5192 !ata_sstatus_online(sstatus))
5193 return true;
5194 return false;
5195}
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213bool ata_link_online(struct ata_link *link)
5214{
5215 struct ata_link *slave = link->ap->slave_link;
5216
5217 WARN_ON(link == slave);
5218
5219 return ata_phys_link_online(link) ||
5220 (slave && ata_phys_link_online(slave));
5221}
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
5239bool ata_link_offline(struct ata_link *link)
5240{
5241 struct ata_link *slave = link->ap->slave_link;
5242
5243 WARN_ON(link == slave);
5244
5245 return ata_phys_link_offline(link) &&
5246 (!slave || ata_phys_link_offline(slave));
5247}
5248
5249#ifdef CONFIG_PM
5250static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5251 unsigned int action, unsigned int ehi_flags,
5252 int wait)
5253{
5254 unsigned long flags;
5255 int i, rc;
5256
5257 for (i = 0; i < host->n_ports; i++) {
5258 struct ata_port *ap = host->ports[i];
5259 struct ata_link *link;
5260
5261
5262
5263
5264 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5265 ata_port_wait_eh(ap);
5266 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5267 }
5268
5269
5270 spin_lock_irqsave(ap->lock, flags);
5271
5272 ap->pm_mesg = mesg;
5273 if (wait) {
5274 rc = 0;
5275 ap->pm_result = &rc;
5276 }
5277
5278 ap->pflags |= ATA_PFLAG_PM_PENDING;
5279 ata_for_each_link(link, ap, HOST_FIRST) {
5280 link->eh_info.action |= action;
5281 link->eh_info.flags |= ehi_flags;
5282 }
5283
5284 ata_port_schedule_eh(ap);
5285
5286 spin_unlock_irqrestore(ap->lock, flags);
5287
5288
5289 if (wait) {
5290 ata_port_wait_eh(ap);
5291 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5292 if (rc)
5293 return rc;
5294 }
5295 }
5296
5297 return 0;
5298}
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5316{
5317 unsigned int ehi_flags = ATA_EHI_QUIET;
5318 int rc;
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328 if (mesg.event == PM_EVENT_SUSPEND)
5329 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5330
5331 rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
5332 if (rc == 0)
5333 host->dev->power.power_state = mesg;
5334 return rc;
5335}
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348void ata_host_resume(struct ata_host *host)
5349{
5350 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5351 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5352 host->dev->power.power_state = PMSG_ON;
5353}
5354#endif
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365void ata_dev_init(struct ata_device *dev)
5366{
5367 struct ata_link *link = ata_dev_phys_link(dev);
5368 struct ata_port *ap = link->ap;
5369 unsigned long flags;
5370
5371
5372 link->sata_spd_limit = link->hw_sata_spd_limit;
5373 link->sata_spd = 0;
5374
5375
5376
5377
5378
5379 spin_lock_irqsave(ap->lock, flags);
5380 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5381 dev->horkage = 0;
5382 spin_unlock_irqrestore(ap->lock, flags);
5383
5384 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5385 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5386 dev->pio_mask = UINT_MAX;
5387 dev->mwdma_mask = UINT_MAX;
5388 dev->udma_mask = UINT_MAX;
5389}
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401
5402void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5403{
5404 int i;
5405
5406
5407 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5408 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5409
5410 link->ap = ap;
5411 link->pmp = pmp;
5412 link->active_tag = ATA_TAG_POISON;
5413 link->hw_sata_spd_limit = UINT_MAX;
5414
5415
5416 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5417 struct ata_device *dev = &link->device[i];
5418
5419 dev->link = link;
5420 dev->devno = dev - link->device;
5421#ifdef CONFIG_ATA_ACPI
5422 dev->gtf_filter = ata_acpi_gtf_filter;
5423#endif
5424 ata_dev_init(dev);
5425 }
5426}
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440
5441int sata_link_init_spd(struct ata_link *link)
5442{
5443 u8 spd;
5444 int rc;
5445
5446 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5447 if (rc)
5448 return rc;
5449
5450 spd = (link->saved_scontrol >> 4) & 0xf;
5451 if (spd)
5452 link->hw_sata_spd_limit &= (1 << spd) - 1;
5453
5454 ata_force_link_limits(link);
5455
5456 link->sata_spd_limit = link->hw_sata_spd_limit;
5457
5458 return 0;
5459}
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473struct ata_port *ata_port_alloc(struct ata_host *host)
5474{
5475 struct ata_port *ap;
5476
5477 DPRINTK("ENTER\n");
5478
5479 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5480 if (!ap)
5481 return NULL;
5482
5483 ap->pflags |= ATA_PFLAG_INITIALIZING;
5484 ap->lock = &host->lock;
5485 ap->print_id = -1;
5486 ap->host = host;
5487 ap->dev = host->dev;
5488
5489#if defined(ATA_VERBOSE_DEBUG)
5490
5491 ap->msg_enable = 0x00FF;
5492#elif defined(ATA_DEBUG)
5493 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5494#else
5495 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5496#endif
5497
5498 mutex_init(&ap->scsi_scan_mutex);
5499 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5500 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5501 INIT_LIST_HEAD(&ap->eh_done_q);
5502 init_waitqueue_head(&ap->eh_wait_q);
5503 init_completion(&ap->park_req_pending);
5504 init_timer_deferrable(&ap->fastdrain_timer);
5505 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5506 ap->fastdrain_timer.data = (unsigned long)ap;
5507
5508 ap->cbl = ATA_CBL_NONE;
5509
5510 ata_link_init(ap, &ap->link, 0);
5511
5512#ifdef ATA_IRQ_TRAP
5513 ap->stats.unhandled_irq = 1;
5514 ap->stats.idle_irq = 1;
5515#endif
5516 ata_sff_port_init(ap);
5517
5518 return ap;
5519}
5520
5521static void ata_host_release(struct device *gendev, void *res)
5522{
5523 struct ata_host *host = dev_get_drvdata(gendev);
5524 int i;
5525
5526 for (i = 0; i < host->n_ports; i++) {
5527 struct ata_port *ap = host->ports[i];
5528
5529 if (!ap)
5530 continue;
5531
5532 if (ap->scsi_host)
5533 scsi_host_put(ap->scsi_host);
5534
5535 kfree(ap->pmp_link);
5536 kfree(ap->slave_link);
5537 kfree(ap);
5538 host->ports[i] = NULL;
5539 }
5540
5541 dev_set_drvdata(gendev, NULL);
5542}
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562
5563
5564struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5565{
5566 struct ata_host *host;
5567 size_t sz;
5568 int i;
5569
5570 DPRINTK("ENTER\n");
5571
5572 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5573 return NULL;
5574
5575
5576 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5577
5578 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5579 if (!host)
5580 goto err_out;
5581
5582 devres_add(dev, host);
5583 dev_set_drvdata(dev, host);
5584
5585 spin_lock_init(&host->lock);
5586 mutex_init(&host->eh_mutex);
5587 host->dev = dev;
5588 host->n_ports = max_ports;
5589
5590
5591 for (i = 0; i < max_ports; i++) {
5592 struct ata_port *ap;
5593
5594 ap = ata_port_alloc(host);
5595 if (!ap)
5596 goto err_out;
5597
5598 ap->port_no = i;
5599 host->ports[i] = ap;
5600 }
5601
5602 devres_remove_group(dev, NULL);
5603 return host;
5604
5605 err_out:
5606 devres_release_group(dev, NULL);
5607 return NULL;
5608}
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5627 const struct ata_port_info * const * ppi,
5628 int n_ports)
5629{
5630 const struct ata_port_info *pi;
5631 struct ata_host *host;
5632 int i, j;
5633
5634 host = ata_host_alloc(dev, n_ports);
5635 if (!host)
5636 return NULL;
5637
5638 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5639 struct ata_port *ap = host->ports[i];
5640
5641 if (ppi[j])
5642 pi = ppi[j++];
5643
5644 ap->pio_mask = pi->pio_mask;
5645 ap->mwdma_mask = pi->mwdma_mask;
5646 ap->udma_mask = pi->udma_mask;
5647 ap->flags |= pi->flags;
5648 ap->link.flags |= pi->link_flags;
5649 ap->ops = pi->port_ops;
5650
5651 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5652 host->ops = pi->port_ops;
5653 }
5654
5655 return host;
5656}
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704int ata_slave_link_init(struct ata_port *ap)
5705{
5706 struct ata_link *link;
5707
5708 WARN_ON(ap->slave_link);
5709 WARN_ON(ap->flags & ATA_FLAG_PMP);
5710
5711 link = kzalloc(sizeof(*link), GFP_KERNEL);
5712 if (!link)
5713 return -ENOMEM;
5714
5715 ata_link_init(ap, link, 1);
5716 ap->slave_link = link;
5717 return 0;
5718}
5719
5720static void ata_host_stop(struct device *gendev, void *res)
5721{
5722 struct ata_host *host = dev_get_drvdata(gendev);
5723 int i;
5724
5725 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5726
5727 for (i = 0; i < host->n_ports; i++) {
5728 struct ata_port *ap = host->ports[i];
5729
5730 if (ap->ops->port_stop)
5731 ap->ops->port_stop(ap);
5732 }
5733
5734 if (host->ops->host_stop)
5735 host->ops->host_stop(host);
5736}
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756
5757
5758static void ata_finalize_port_ops(struct ata_port_operations *ops)
5759{
5760 static DEFINE_SPINLOCK(lock);
5761 const struct ata_port_operations *cur;
5762 void **begin = (void **)ops;
5763 void **end = (void **)&ops->inherits;
5764 void **pp;
5765
5766 if (!ops || !ops->inherits)
5767 return;
5768
5769 spin_lock(&lock);
5770
5771 for (cur = ops->inherits; cur; cur = cur->inherits) {
5772 void **inherit = (void **)cur;
5773
5774 for (pp = begin; pp < end; pp++, inherit++)
5775 if (!*pp)
5776 *pp = *inherit;
5777 }
5778
5779 for (pp = begin; pp < end; pp++)
5780 if (IS_ERR(*pp))
5781 *pp = NULL;
5782
5783 ops->inherits = NULL;
5784
5785 spin_unlock(&lock);
5786}
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803
5804int ata_host_start(struct ata_host *host)
5805{
5806 int have_stop = 0;
5807 void *start_dr = NULL;
5808 int i, rc;
5809
5810 if (host->flags & ATA_HOST_STARTED)
5811 return 0;
5812
5813 ata_finalize_port_ops(host->ops);
5814
5815 for (i = 0; i < host->n_ports; i++) {
5816 struct ata_port *ap = host->ports[i];
5817
5818 ata_finalize_port_ops(ap->ops);
5819
5820 if (!host->ops && !ata_port_is_dummy(ap))
5821 host->ops = ap->ops;
5822
5823 if (ap->ops->port_stop)
5824 have_stop = 1;
5825 }
5826
5827 if (host->ops->host_stop)
5828 have_stop = 1;
5829
5830 if (have_stop) {
5831 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5832 if (!start_dr)
5833 return -ENOMEM;
5834 }
5835
5836 for (i = 0; i < host->n_ports; i++) {
5837 struct ata_port *ap = host->ports[i];
5838
5839 if (ap->ops->port_start) {
5840 rc = ap->ops->port_start(ap);
5841 if (rc) {
5842 if (rc != -ENODEV)
5843 dev_printk(KERN_ERR, host->dev,
5844 "failed to start port %d "
5845 "(errno=%d)\n", i, rc);
5846 goto err_out;
5847 }
5848 }
5849 ata_eh_freeze_port(ap);
5850 }
5851
5852 if (start_dr)
5853 devres_add(host->dev, start_dr);
5854 host->flags |= ATA_HOST_STARTED;
5855 return 0;
5856
5857 err_out:
5858 while (--i >= 0) {
5859 struct ata_port *ap = host->ports[i];
5860
5861 if (ap->ops->port_stop)
5862 ap->ops->port_stop(ap);
5863 }
5864 devres_free(start_dr);
5865 return rc;
5866}
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878
5879
5880void ata_host_init(struct ata_host *host, struct device *dev,
5881 unsigned long flags, struct ata_port_operations *ops)
5882{
5883 spin_lock_init(&host->lock);
5884 mutex_init(&host->eh_mutex);
5885 host->dev = dev;
5886 host->flags = flags;
5887 host->ops = ops;
5888}
5889
5890
5891static void async_port_probe(void *data, async_cookie_t cookie)
5892{
5893 int rc;
5894 struct ata_port *ap = data;
5895
5896
5897
5898
5899
5900
5901
5902
5903 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5904 async_synchronize_cookie(cookie);
5905
5906
5907 if (ap->ops->error_handler) {
5908 struct ata_eh_info *ehi = &ap->link.eh_info;
5909 unsigned long flags;
5910
5911
5912 spin_lock_irqsave(ap->lock, flags);
5913
5914 ehi->probe_mask |= ATA_ALL_DEVICES;
5915 ehi->action |= ATA_EH_RESET;
5916 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5917
5918 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5919 ap->pflags |= ATA_PFLAG_LOADING;
5920 ata_port_schedule_eh(ap);
5921
5922 spin_unlock_irqrestore(ap->lock, flags);
5923
5924
5925 ata_port_wait_eh(ap);
5926 } else {
5927 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5928 rc = ata_bus_probe(ap);
5929 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5930
5931 if (rc) {
5932
5933
5934
5935
5936
5937
5938 }
5939 }
5940
5941
5942 async_synchronize_cookie(cookie);
5943
5944 ata_scsi_scan_host(ap, 1);
5945
5946}
5947
5948
5949
5950
5951
5952
5953
5954
5955
5956
5957
5958
5959
5960
5961
5962
5963int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5964{
5965 int i, rc;
5966
5967
5968 if (!(host->flags & ATA_HOST_STARTED)) {
5969 dev_printk(KERN_ERR, host->dev,
5970 "BUG: trying to register unstarted host\n");
5971 WARN_ON(1);
5972 return -EINVAL;
5973 }
5974
5975
5976
5977
5978
5979 for (i = host->n_ports; host->ports[i]; i++)
5980 kfree(host->ports[i]);
5981
5982
5983 for (i = 0; i < host->n_ports; i++)
5984 host->ports[i]->print_id = ata_print_id++;
5985
5986
5987
5988 for (i = 0; i < host->n_ports; i++) {
5989 rc = ata_tport_add(host->dev,host->ports[i]);
5990 if (rc) {
5991 goto err_tadd;
5992 }
5993 }
5994
5995 rc = ata_scsi_add_hosts(host, sht);
5996 if (rc)
5997 goto err_tadd;
5998
5999
6000 ata_acpi_associate(host);
6001
6002
6003 for (i = 0; i < host->n_ports; i++) {
6004 struct ata_port *ap = host->ports[i];
6005 unsigned long xfer_mask;
6006
6007
6008 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6009 ap->cbl = ATA_CBL_SATA;
6010
6011
6012 sata_link_init_spd(&ap->link);
6013 if (ap->slave_link)
6014 sata_link_init_spd(ap->slave_link);
6015
6016
6017 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6018 ap->udma_mask);
6019
6020 if (!ata_port_is_dummy(ap)) {
6021 ata_port_printk(ap, KERN_INFO,
6022 "%cATA max %s %s\n",
6023 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6024 ata_mode_string(xfer_mask),
6025 ap->link.eh_info.desc);
6026 ata_ehi_clear_desc(&ap->link.eh_info);
6027 } else
6028 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6029 }
6030
6031
6032 for (i = 0; i < host->n_ports; i++) {
6033 struct ata_port *ap = host->ports[i];
6034 async_schedule(async_port_probe, ap);
6035 }
6036
6037 return 0;
6038
6039 err_tadd:
6040 while (--i >= 0) {
6041 ata_tport_delete(host->ports[i]);
6042 }
6043 return rc;
6044
6045}
6046
6047
6048
6049
6050
6051
6052
6053
6054
6055
6056
6057
6058
6059
6060
6061
6062
6063
6064
6065
6066
6067
6068
6069
6070int ata_host_activate(struct ata_host *host, int irq,
6071 irq_handler_t irq_handler, unsigned long irq_flags,
6072 struct scsi_host_template *sht)
6073{
6074 int i, rc;
6075
6076 rc = ata_host_start(host);
6077 if (rc)
6078 return rc;
6079
6080
6081 if (!irq) {
6082 WARN_ON(irq_handler);
6083 return ata_host_register(host, sht);
6084 }
6085
6086 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6087 dev_driver_string(host->dev), host);
6088 if (rc)
6089 return rc;
6090
6091 for (i = 0; i < host->n_ports; i++)
6092 ata_port_desc(host->ports[i], "irq %d", irq);
6093
6094 rc = ata_host_register(host, sht);
6095
6096 if (rc)
6097 devm_free_irq(host->dev, irq, host);
6098
6099 return rc;
6100}
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113static void ata_port_detach(struct ata_port *ap)
6114{
6115 unsigned long flags;
6116
6117 if (!ap->ops->error_handler)
6118 goto skip_eh;
6119
6120
6121 spin_lock_irqsave(ap->lock, flags);
6122 ap->pflags |= ATA_PFLAG_UNLOADING;
6123 ata_port_schedule_eh(ap);
6124 spin_unlock_irqrestore(ap->lock, flags);
6125
6126
6127 ata_port_wait_eh(ap);
6128
6129
6130 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6131
6132 cancel_delayed_work_sync(&ap->hotplug_task);
6133
6134 skip_eh:
6135 if (ap->pmp_link) {
6136 int i;
6137 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6138 ata_tlink_delete(&ap->pmp_link[i]);
6139 }
6140 ata_tport_delete(ap);
6141
6142
6143 scsi_remove_host(ap->scsi_host);
6144}
6145
6146
6147
6148
6149
6150
6151
6152
6153
6154
6155void ata_host_detach(struct ata_host *host)
6156{
6157 int i;
6158
6159 for (i = 0; i < host->n_ports; i++)
6160 ata_port_detach(host->ports[i]);
6161
6162
6163 ata_acpi_dissociate(host);
6164}
6165
6166#ifdef CONFIG_PCI
6167
6168
6169
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179void ata_pci_remove_one(struct pci_dev *pdev)
6180{
6181 struct device *dev = &pdev->dev;
6182 struct ata_host *host = dev_get_drvdata(dev);
6183
6184 ata_host_detach(host);
6185}
6186
6187
6188int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6189{
6190 unsigned long tmp = 0;
6191
6192 switch (bits->width) {
6193 case 1: {
6194 u8 tmp8 = 0;
6195 pci_read_config_byte(pdev, bits->reg, &tmp8);
6196 tmp = tmp8;
6197 break;
6198 }
6199 case 2: {
6200 u16 tmp16 = 0;
6201 pci_read_config_word(pdev, bits->reg, &tmp16);
6202 tmp = tmp16;
6203 break;
6204 }
6205 case 4: {
6206 u32 tmp32 = 0;
6207 pci_read_config_dword(pdev, bits->reg, &tmp32);
6208 tmp = tmp32;
6209 break;
6210 }
6211
6212 default:
6213 return -EINVAL;
6214 }
6215
6216 tmp &= bits->mask;
6217
6218 return (tmp == bits->val) ? 1 : 0;
6219}
6220
6221#ifdef CONFIG_PM
6222void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6223{
6224 pci_save_state(pdev);
6225 pci_disable_device(pdev);
6226
6227 if (mesg.event & PM_EVENT_SLEEP)
6228 pci_set_power_state(pdev, PCI_D3hot);
6229}
6230
6231int ata_pci_device_do_resume(struct pci_dev *pdev)
6232{
6233 int rc;
6234
6235 pci_set_power_state(pdev, PCI_D0);
6236 pci_restore_state(pdev);
6237
6238 rc = pcim_enable_device(pdev);
6239 if (rc) {
6240 dev_printk(KERN_ERR, &pdev->dev,
6241 "failed to enable device after resume (%d)\n", rc);
6242 return rc;
6243 }
6244
6245 pci_set_master(pdev);
6246 return 0;
6247}
6248
6249int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6250{
6251 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6252 int rc = 0;
6253
6254 rc = ata_host_suspend(host, mesg);
6255 if (rc)
6256 return rc;
6257
6258 ata_pci_device_do_suspend(pdev, mesg);
6259
6260 return 0;
6261}
6262
6263int ata_pci_device_resume(struct pci_dev *pdev)
6264{
6265 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6266 int rc;
6267
6268 rc = ata_pci_device_do_resume(pdev);
6269 if (rc == 0)
6270 ata_host_resume(host);
6271 return rc;
6272}
6273#endif
6274
6275#endif
6276
6277static int __init ata_parse_force_one(char **cur,
6278 struct ata_force_ent *force_ent,
6279 const char **reason)
6280{
6281
6282
6283
6284
6285
6286 static struct ata_force_param force_tbl[] __initdata = {
6287 { "40c", .cbl = ATA_CBL_PATA40 },
6288 { "80c", .cbl = ATA_CBL_PATA80 },
6289 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6290 { "unk", .cbl = ATA_CBL_PATA_UNK },
6291 { "ign", .cbl = ATA_CBL_PATA_IGN },
6292 { "sata", .cbl = ATA_CBL_SATA },
6293 { "1.5Gbps", .spd_limit = 1 },
6294 { "3.0Gbps", .spd_limit = 2 },
6295 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6296 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6297 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6298 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6299 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6300 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6301 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6302 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6303 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6304 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6305 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6306 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6307 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6308 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6309 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6310 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6311 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6312 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6313 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6314 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6315 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6316 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6317 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6318 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6319 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6320 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6321 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6322 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6323 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6324 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6325 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6326 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6327 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6328 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6329 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6330 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6331 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6332 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6333 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6334 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6335 };
6336 char *start = *cur, *p = *cur;
6337 char *id, *val, *endp;
6338 const struct ata_force_param *match_fp = NULL;
6339 int nr_matches = 0, i;
6340
6341
6342 while (*p != '\0' && *p != ',')
6343 p++;
6344
6345 if (*p == '\0')
6346 *cur = p;
6347 else
6348 *cur = p + 1;
6349
6350 *p = '\0';
6351
6352
6353 p = strchr(start, ':');
6354 if (!p) {
6355 val = strstrip(start);
6356 goto parse_val;
6357 }
6358 *p = '\0';
6359
6360 id = strstrip(start);
6361 val = strstrip(p + 1);
6362
6363
6364 p = strchr(id, '.');
6365 if (p) {
6366 *p++ = '\0';
6367 force_ent->device = simple_strtoul(p, &endp, 10);
6368 if (p == endp || *endp != '\0') {
6369 *reason = "invalid device";
6370 return -EINVAL;
6371 }
6372 }
6373
6374 force_ent->port = simple_strtoul(id, &endp, 10);
6375 if (p == endp || *endp != '\0') {
6376 *reason = "invalid port/link";
6377 return -EINVAL;
6378 }
6379
6380 parse_val:
6381
6382 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6383 const struct ata_force_param *fp = &force_tbl[i];
6384
6385 if (strncasecmp(val, fp->name, strlen(val)))
6386 continue;
6387
6388 nr_matches++;
6389 match_fp = fp;
6390
6391 if (strcasecmp(val, fp->name) == 0) {
6392 nr_matches = 1;
6393 break;
6394 }
6395 }
6396
6397 if (!nr_matches) {
6398 *reason = "unknown value";
6399 return -EINVAL;
6400 }
6401 if (nr_matches > 1) {
6402 *reason = "ambigious value";
6403 return -EINVAL;
6404 }
6405
6406 force_ent->param = *match_fp;
6407
6408 return 0;
6409}
6410
6411static void __init ata_parse_force_param(void)
6412{
6413 int idx = 0, size = 1;
6414 int last_port = -1, last_device = -1;
6415 char *p, *cur, *next;
6416
6417
6418 for (p = ata_force_param_buf; *p; p++)
6419 if (*p == ',')
6420 size++;
6421
6422 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6423 if (!ata_force_tbl) {
6424 printk(KERN_WARNING "ata: failed to extend force table, "
6425 "libata.force ignored\n");
6426 return;
6427 }
6428
6429
6430 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6431 const char *reason = "";
6432 struct ata_force_ent te = { .port = -1, .device = -1 };
6433
6434 next = cur;
6435 if (ata_parse_force_one(&next, &te, &reason)) {
6436 printk(KERN_WARNING "ata: failed to parse force "
6437 "parameter \"%s\" (%s)\n",
6438 cur, reason);
6439 continue;
6440 }
6441
6442 if (te.port == -1) {
6443 te.port = last_port;
6444 te.device = last_device;
6445 }
6446
6447 ata_force_tbl[idx++] = te;
6448
6449 last_port = te.port;
6450 last_device = te.device;
6451 }
6452
6453 ata_force_tbl_size = idx;
6454}
6455
6456static int __init ata_init(void)
6457{
6458 int rc;
6459
6460 ata_parse_force_param();
6461
6462 rc = ata_sff_init();
6463 if (rc) {
6464 kfree(ata_force_tbl);
6465 return rc;
6466 }
6467
6468 libata_transport_init();
6469 ata_scsi_transport_template = ata_attach_transport();
6470 if (!ata_scsi_transport_template) {
6471 ata_sff_exit();
6472 rc = -ENOMEM;
6473 goto err_out;
6474 }
6475
6476 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6477 return 0;
6478
6479err_out:
6480 return rc;
6481}
6482
6483static void __exit ata_exit(void)
6484{
6485 ata_release_transport(ata_scsi_transport_template);
6486 libata_transport_exit();
6487 ata_sff_exit();
6488 kfree(ata_force_tbl);
6489}
6490
6491subsys_initcall(ata_init);
6492module_exit(ata_exit);
6493
6494static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6495
6496int ata_ratelimit(void)
6497{
6498 return __ratelimit(&ratelimit);
6499}
6500
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515void ata_msleep(struct ata_port *ap, unsigned int msecs)
6516{
6517 bool owns_eh = ap && ap->host->eh_owner == current;
6518
6519 if (owns_eh)
6520 ata_eh_release(ap);
6521
6522 msleep(msecs);
6523
6524 if (owns_eh)
6525 ata_eh_acquire(ap);
6526}
6527
6528
6529
6530
6531
6532
6533
6534
6535
6536
6537
6538
6539
6540
6541
6542
6543
6544
6545
6546
6547
6548
6549
6550
6551
6552u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6553 unsigned long interval, unsigned long timeout)
6554{
6555 unsigned long deadline;
6556 u32 tmp;
6557
6558 tmp = ioread32(reg);
6559
6560
6561
6562
6563
6564 deadline = ata_deadline(jiffies, timeout);
6565
6566 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6567 ata_msleep(ap, interval);
6568 tmp = ioread32(reg);
6569 }
6570
6571 return tmp;
6572}
6573
6574
6575
6576
6577static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6578{
6579 return AC_ERR_SYSTEM;
6580}
6581
6582static void ata_dummy_error_handler(struct ata_port *ap)
6583{
6584
6585}
6586
6587struct ata_port_operations ata_dummy_port_ops = {
6588 .qc_prep = ata_noop_qc_prep,
6589 .qc_issue = ata_dummy_qc_issue,
6590 .error_handler = ata_dummy_error_handler,
6591};
6592
6593const struct ata_port_info ata_dummy_port_info = {
6594 .port_ops = &ata_dummy_port_ops,
6595};
6596
6597
6598
6599
6600
6601
6602
6603EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6604EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6605EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6606EXPORT_SYMBOL_GPL(ata_base_port_ops);
6607EXPORT_SYMBOL_GPL(sata_port_ops);
6608EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6609EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6610EXPORT_SYMBOL_GPL(ata_link_next);
6611EXPORT_SYMBOL_GPL(ata_dev_next);
6612EXPORT_SYMBOL_GPL(ata_std_bios_param);
6613EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6614EXPORT_SYMBOL_GPL(ata_host_init);
6615EXPORT_SYMBOL_GPL(ata_host_alloc);
6616EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6617EXPORT_SYMBOL_GPL(ata_slave_link_init);
6618EXPORT_SYMBOL_GPL(ata_host_start);
6619EXPORT_SYMBOL_GPL(ata_host_register);
6620EXPORT_SYMBOL_GPL(ata_host_activate);
6621EXPORT_SYMBOL_GPL(ata_host_detach);
6622EXPORT_SYMBOL_GPL(ata_sg_init);
6623EXPORT_SYMBOL_GPL(ata_qc_complete);
6624EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6625EXPORT_SYMBOL_GPL(atapi_cmd_type);
6626EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6627EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6628EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6629EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6630EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6631EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6632EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6633EXPORT_SYMBOL_GPL(ata_mode_string);
6634EXPORT_SYMBOL_GPL(ata_id_xfermask);
6635EXPORT_SYMBOL_GPL(ata_do_set_mode);
6636EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6637EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6638EXPORT_SYMBOL_GPL(ata_dev_disable);
6639EXPORT_SYMBOL_GPL(sata_set_spd);
6640EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6641EXPORT_SYMBOL_GPL(sata_link_debounce);
6642EXPORT_SYMBOL_GPL(sata_link_resume);
6643EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6644EXPORT_SYMBOL_GPL(ata_std_prereset);
6645EXPORT_SYMBOL_GPL(sata_link_hardreset);
6646EXPORT_SYMBOL_GPL(sata_std_hardreset);
6647EXPORT_SYMBOL_GPL(ata_std_postreset);
6648EXPORT_SYMBOL_GPL(ata_dev_classify);
6649EXPORT_SYMBOL_GPL(ata_dev_pair);
6650EXPORT_SYMBOL_GPL(ata_ratelimit);
6651EXPORT_SYMBOL_GPL(ata_msleep);
6652EXPORT_SYMBOL_GPL(ata_wait_register);
6653EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6654EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6655EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6656EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6657EXPORT_SYMBOL_GPL(sata_scr_valid);
6658EXPORT_SYMBOL_GPL(sata_scr_read);
6659EXPORT_SYMBOL_GPL(sata_scr_write);
6660EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6661EXPORT_SYMBOL_GPL(ata_link_online);
6662EXPORT_SYMBOL_GPL(ata_link_offline);
6663#ifdef CONFIG_PM
6664EXPORT_SYMBOL_GPL(ata_host_suspend);
6665EXPORT_SYMBOL_GPL(ata_host_resume);
6666#endif
6667EXPORT_SYMBOL_GPL(ata_id_string);
6668EXPORT_SYMBOL_GPL(ata_id_c_string);
6669EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6670EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6671
6672EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6673EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6674EXPORT_SYMBOL_GPL(ata_timing_compute);
6675EXPORT_SYMBOL_GPL(ata_timing_merge);
6676EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6677
6678#ifdef CONFIG_PCI
6679EXPORT_SYMBOL_GPL(pci_test_config_bits);
6680EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6681#ifdef CONFIG_PM
6682EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6683EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6684EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6685EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6686#endif
6687#endif
6688
6689EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6690EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6691EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6692EXPORT_SYMBOL_GPL(ata_port_desc);
6693#ifdef CONFIG_PCI
6694EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6695#endif
6696EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6697EXPORT_SYMBOL_GPL(ata_link_abort);
6698EXPORT_SYMBOL_GPL(ata_port_abort);
6699EXPORT_SYMBOL_GPL(ata_port_freeze);
6700EXPORT_SYMBOL_GPL(sata_async_notification);
6701EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6702EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6703EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6704EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6705EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6706EXPORT_SYMBOL_GPL(ata_do_eh);
6707EXPORT_SYMBOL_GPL(ata_std_error_handler);
6708
6709EXPORT_SYMBOL_GPL(ata_cable_40wire);
6710EXPORT_SYMBOL_GPL(ata_cable_80wire);
6711EXPORT_SYMBOL_GPL(ata_cable_unknown);
6712EXPORT_SYMBOL_GPL(ata_cable_ignore);
6713EXPORT_SYMBOL_GPL(ata_cable_sata);
6714