1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
57#include <linux/scatterlist.h>
58#include <linux/io.h>
59#include <linux/async.h>
60#include <linux/log2.h>
61#include <scsi/scsi.h>
62#include <scsi/scsi_cmnd.h>
63#include <scsi/scsi_host.h>
64#include <linux/libata.h>
65#include <asm/byteorder.h>
66#include <linux/cdrom.h>
67
68#include "libata.h"
69
70
71
72const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
73const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
74const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
75
76const struct ata_port_operations ata_base_port_ops = {
77 .prereset = ata_std_prereset,
78 .postreset = ata_std_postreset,
79 .error_handler = ata_std_error_handler,
80};
81
82const struct ata_port_operations sata_port_ops = {
83 .inherits = &ata_base_port_ops,
84
85 .qc_defer = ata_std_qc_defer,
86 .hardreset = sata_std_hardreset,
87};
88
89static unsigned int ata_dev_init_params(struct ata_device *dev,
90 u16 heads, u16 sectors);
91static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
92static unsigned int ata_dev_set_feature(struct ata_device *dev,
93 u8 enable, u8 feature);
94static void ata_dev_xfermask(struct ata_device *dev);
95static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
96
97unsigned int ata_print_id = 1;
98static struct workqueue_struct *ata_wq;
99
100struct workqueue_struct *ata_aux_wq;
101
102struct ata_force_param {
103 const char *name;
104 unsigned int cbl;
105 int spd_limit;
106 unsigned long xfer_mask;
107 unsigned int horkage_on;
108 unsigned int horkage_off;
109 unsigned int lflags;
110};
111
112struct ata_force_ent {
113 int port;
114 int device;
115 struct ata_force_param param;
116};
117
118static struct ata_force_ent *ata_force_tbl;
119static int ata_force_tbl_size;
120
121static char ata_force_param_buf[PAGE_SIZE] __initdata;
122
123module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
124MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
125
126static int atapi_enabled = 1;
127module_param(atapi_enabled, int, 0444);
128MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
129
130static int atapi_dmadir = 0;
131module_param(atapi_dmadir, int, 0444);
132MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
133
134int atapi_passthru16 = 1;
135module_param(atapi_passthru16, int, 0444);
136MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
137
138int libata_fua = 0;
139module_param_named(fua, libata_fua, int, 0444);
140MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
141
142static int ata_ignore_hpa;
143module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
144MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
145
146static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
147module_param_named(dma, libata_dma_mask, int, 0444);
148MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
149
150static int ata_probe_timeout;
151module_param(ata_probe_timeout, int, 0444);
152MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
153
154int libata_noacpi = 0;
155module_param_named(noacpi, libata_noacpi, int, 0444);
156MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
157
158int libata_allow_tpm = 0;
159module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
160MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
161
162MODULE_AUTHOR("Jeff Garzik");
163MODULE_DESCRIPTION("Library module for ATA devices");
164MODULE_LICENSE("GPL");
165MODULE_VERSION(DRV_VERSION);
166
167
168static bool ata_sstatus_online(u32 sstatus)
169{
170 return (sstatus & 0xf) == 0x3;
171}
172
173
174
175
176
177
178
179
180
181
182
183
184
185struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
186 enum ata_link_iter_mode mode)
187{
188 BUG_ON(mode != ATA_LITER_EDGE &&
189 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
190
191
192 if (!link)
193 switch (mode) {
194 case ATA_LITER_EDGE:
195 case ATA_LITER_PMP_FIRST:
196 if (sata_pmp_attached(ap))
197 return ap->pmp_link;
198
199 case ATA_LITER_HOST_FIRST:
200 return &ap->link;
201 }
202
203
204 if (link == &ap->link)
205 switch (mode) {
206 case ATA_LITER_HOST_FIRST:
207 if (sata_pmp_attached(ap))
208 return ap->pmp_link;
209
210 case ATA_LITER_PMP_FIRST:
211 if (unlikely(ap->slave_link))
212 return ap->slave_link;
213
214 case ATA_LITER_EDGE:
215 return NULL;
216 }
217
218
219 if (unlikely(link == ap->slave_link))
220 return NULL;
221
222
223 if (++link < ap->pmp_link + ap->nr_pmp_links)
224 return link;
225
226 if (mode == ATA_LITER_PMP_FIRST)
227 return &ap->link;
228
229 return NULL;
230}
231
232
233
234
235
236
237
238
239
240
241
242
243
244struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
245 enum ata_dev_iter_mode mode)
246{
247 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
248 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
249
250
251 if (!dev)
252 switch (mode) {
253 case ATA_DITER_ENABLED:
254 case ATA_DITER_ALL:
255 dev = link->device;
256 goto check;
257 case ATA_DITER_ENABLED_REVERSE:
258 case ATA_DITER_ALL_REVERSE:
259 dev = link->device + ata_link_max_devices(link) - 1;
260 goto check;
261 }
262
263 next:
264
265 switch (mode) {
266 case ATA_DITER_ENABLED:
267 case ATA_DITER_ALL:
268 if (++dev < link->device + ata_link_max_devices(link))
269 goto check;
270 return NULL;
271 case ATA_DITER_ENABLED_REVERSE:
272 case ATA_DITER_ALL_REVERSE:
273 if (--dev >= link->device)
274 goto check;
275 return NULL;
276 }
277
278 check:
279 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
280 !ata_dev_enabled(dev))
281 goto next;
282 return dev;
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299struct ata_link *ata_dev_phys_link(struct ata_device *dev)
300{
301 struct ata_port *ap = dev->link->ap;
302
303 if (!ap->slave_link)
304 return dev->link;
305 if (!dev->devno)
306 return &ap->link;
307 return ap->slave_link;
308}
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323void ata_force_cbl(struct ata_port *ap)
324{
325 int i;
326
327 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
328 const struct ata_force_ent *fe = &ata_force_tbl[i];
329
330 if (fe->port != -1 && fe->port != ap->print_id)
331 continue;
332
333 if (fe->param.cbl == ATA_CBL_NONE)
334 continue;
335
336 ap->cbl = fe->param.cbl;
337 ata_port_printk(ap, KERN_NOTICE,
338 "FORCE: cable set to %s\n", fe->param.name);
339 return;
340 }
341}
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359static void ata_force_link_limits(struct ata_link *link)
360{
361 bool did_spd = false;
362 int linkno = link->pmp;
363 int i;
364
365 if (ata_is_host_link(link))
366 linkno += 15;
367
368 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
369 const struct ata_force_ent *fe = &ata_force_tbl[i];
370
371 if (fe->port != -1 && fe->port != link->ap->print_id)
372 continue;
373
374 if (fe->device != -1 && fe->device != linkno)
375 continue;
376
377
378 if (!did_spd && fe->param.spd_limit) {
379 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
380 ata_link_printk(link, KERN_NOTICE,
381 "FORCE: PHY spd limit set to %s\n",
382 fe->param.name);
383 did_spd = true;
384 }
385
386
387 if (fe->param.lflags) {
388 link->flags |= fe->param.lflags;
389 ata_link_printk(link, KERN_NOTICE,
390 "FORCE: link flag 0x%x forced -> 0x%x\n",
391 fe->param.lflags, link->flags);
392 }
393 }
394}
395
396
397
398
399
400
401
402
403
404
405
406
407static void ata_force_xfermask(struct ata_device *dev)
408{
409 int devno = dev->link->pmp + dev->devno;
410 int alt_devno = devno;
411 int i;
412
413
414 if (ata_is_host_link(dev->link))
415 alt_devno += 15;
416
417 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
418 const struct ata_force_ent *fe = &ata_force_tbl[i];
419 unsigned long pio_mask, mwdma_mask, udma_mask;
420
421 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
422 continue;
423
424 if (fe->device != -1 && fe->device != devno &&
425 fe->device != alt_devno)
426 continue;
427
428 if (!fe->param.xfer_mask)
429 continue;
430
431 ata_unpack_xfermask(fe->param.xfer_mask,
432 &pio_mask, &mwdma_mask, &udma_mask);
433 if (udma_mask)
434 dev->udma_mask = udma_mask;
435 else if (mwdma_mask) {
436 dev->udma_mask = 0;
437 dev->mwdma_mask = mwdma_mask;
438 } else {
439 dev->udma_mask = 0;
440 dev->mwdma_mask = 0;
441 dev->pio_mask = pio_mask;
442 }
443
444 ata_dev_printk(dev, KERN_NOTICE,
445 "FORCE: xfer_mask set to %s\n", fe->param.name);
446 return;
447 }
448}
449
450
451
452
453
454
455
456
457
458
459
460
461static void ata_force_horkage(struct ata_device *dev)
462{
463 int devno = dev->link->pmp + dev->devno;
464 int alt_devno = devno;
465 int i;
466
467
468 if (ata_is_host_link(dev->link))
469 alt_devno += 15;
470
471 for (i = 0; i < ata_force_tbl_size; i++) {
472 const struct ata_force_ent *fe = &ata_force_tbl[i];
473
474 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
475 continue;
476
477 if (fe->device != -1 && fe->device != devno &&
478 fe->device != alt_devno)
479 continue;
480
481 if (!(~dev->horkage & fe->param.horkage_on) &&
482 !(dev->horkage & fe->param.horkage_off))
483 continue;
484
485 dev->horkage |= fe->param.horkage_on;
486 dev->horkage &= ~fe->param.horkage_off;
487
488 ata_dev_printk(dev, KERN_NOTICE,
489 "FORCE: horkage modified (%s)\n", fe->param.name);
490 }
491}
492
493
494
495
496
497
498
499
500
501
502
503
504
505int atapi_cmd_type(u8 opcode)
506{
507 switch (opcode) {
508 case GPCMD_READ_10:
509 case GPCMD_READ_12:
510 return ATAPI_READ;
511
512 case GPCMD_WRITE_10:
513 case GPCMD_WRITE_12:
514 case GPCMD_WRITE_AND_VERIFY_10:
515 return ATAPI_WRITE;
516
517 case GPCMD_READ_CD:
518 case GPCMD_READ_CD_MSF:
519 return ATAPI_READ_CD;
520
521 case ATA_16:
522 case ATA_12:
523 if (atapi_passthru16)
524 return ATAPI_PASS_THRU;
525
526 default:
527 return ATAPI_MISC;
528 }
529}
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
545{
546 fis[0] = 0x27;
547 fis[1] = pmp & 0xf;
548 if (is_cmd)
549 fis[1] |= (1 << 7);
550
551 fis[2] = tf->command;
552 fis[3] = tf->feature;
553
554 fis[4] = tf->lbal;
555 fis[5] = tf->lbam;
556 fis[6] = tf->lbah;
557 fis[7] = tf->device;
558
559 fis[8] = tf->hob_lbal;
560 fis[9] = tf->hob_lbam;
561 fis[10] = tf->hob_lbah;
562 fis[11] = tf->hob_feature;
563
564 fis[12] = tf->nsect;
565 fis[13] = tf->hob_nsect;
566 fis[14] = 0;
567 fis[15] = tf->ctl;
568
569 fis[16] = 0;
570 fis[17] = 0;
571 fis[18] = 0;
572 fis[19] = 0;
573}
574
575
576
577
578
579
580
581
582
583
584
585
586void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
587{
588 tf->command = fis[2];
589 tf->feature = fis[3];
590
591 tf->lbal = fis[4];
592 tf->lbam = fis[5];
593 tf->lbah = fis[6];
594 tf->device = fis[7];
595
596 tf->hob_lbal = fis[8];
597 tf->hob_lbam = fis[9];
598 tf->hob_lbah = fis[10];
599
600 tf->nsect = fis[12];
601 tf->hob_nsect = fis[13];
602}
603
604static const u8 ata_rw_cmds[] = {
605
606 ATA_CMD_READ_MULTI,
607 ATA_CMD_WRITE_MULTI,
608 ATA_CMD_READ_MULTI_EXT,
609 ATA_CMD_WRITE_MULTI_EXT,
610 0,
611 0,
612 0,
613 ATA_CMD_WRITE_MULTI_FUA_EXT,
614
615 ATA_CMD_PIO_READ,
616 ATA_CMD_PIO_WRITE,
617 ATA_CMD_PIO_READ_EXT,
618 ATA_CMD_PIO_WRITE_EXT,
619 0,
620 0,
621 0,
622 0,
623
624 ATA_CMD_READ,
625 ATA_CMD_WRITE,
626 ATA_CMD_READ_EXT,
627 ATA_CMD_WRITE_EXT,
628 0,
629 0,
630 0,
631 ATA_CMD_WRITE_FUA_EXT
632};
633
634
635
636
637
638
639
640
641
642
643
644
645static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
646{
647 u8 cmd;
648
649 int index, fua, lba48, write;
650
651 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
652 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
653 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
654
655 if (dev->flags & ATA_DFLAG_PIO) {
656 tf->protocol = ATA_PROT_PIO;
657 index = dev->multi_count ? 0 : 8;
658 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
659
660 tf->protocol = ATA_PROT_PIO;
661 index = dev->multi_count ? 0 : 8;
662 } else {
663 tf->protocol = ATA_PROT_DMA;
664 index = 16;
665 }
666
667 cmd = ata_rw_cmds[index + fua + lba48 + write];
668 if (cmd) {
669 tf->command = cmd;
670 return 0;
671 }
672 return -1;
673}
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
691{
692 u64 block = 0;
693
694 if (tf->flags & ATA_TFLAG_LBA) {
695 if (tf->flags & ATA_TFLAG_LBA48) {
696 block |= (u64)tf->hob_lbah << 40;
697 block |= (u64)tf->hob_lbam << 32;
698 block |= (u64)tf->hob_lbal << 24;
699 } else
700 block |= (tf->device & 0xf) << 24;
701
702 block |= tf->lbah << 16;
703 block |= tf->lbam << 8;
704 block |= tf->lbal;
705 } else {
706 u32 cyl, head, sect;
707
708 cyl = tf->lbam | (tf->lbah << 8);
709 head = tf->device & 0xf;
710 sect = tf->lbal;
711
712 if (!sect) {
713 ata_dev_printk(dev, KERN_WARNING, "device reported "
714 "invalid CHS sector 0\n");
715 sect = 1;
716 }
717
718 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
719 }
720
721 return block;
722}
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
745 u64 block, u32 n_block, unsigned int tf_flags,
746 unsigned int tag)
747{
748 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
749 tf->flags |= tf_flags;
750
751 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
752
753 if (!lba_48_ok(block, n_block))
754 return -ERANGE;
755
756 tf->protocol = ATA_PROT_NCQ;
757 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
758
759 if (tf->flags & ATA_TFLAG_WRITE)
760 tf->command = ATA_CMD_FPDMA_WRITE;
761 else
762 tf->command = ATA_CMD_FPDMA_READ;
763
764 tf->nsect = tag << 3;
765 tf->hob_feature = (n_block >> 8) & 0xff;
766 tf->feature = n_block & 0xff;
767
768 tf->hob_lbah = (block >> 40) & 0xff;
769 tf->hob_lbam = (block >> 32) & 0xff;
770 tf->hob_lbal = (block >> 24) & 0xff;
771 tf->lbah = (block >> 16) & 0xff;
772 tf->lbam = (block >> 8) & 0xff;
773 tf->lbal = block & 0xff;
774
775 tf->device = 1 << 6;
776 if (tf->flags & ATA_TFLAG_FUA)
777 tf->device |= 1 << 7;
778 } else if (dev->flags & ATA_DFLAG_LBA) {
779 tf->flags |= ATA_TFLAG_LBA;
780
781 if (lba_28_ok(block, n_block)) {
782
783 tf->device |= (block >> 24) & 0xf;
784 } else if (lba_48_ok(block, n_block)) {
785 if (!(dev->flags & ATA_DFLAG_LBA48))
786 return -ERANGE;
787
788
789 tf->flags |= ATA_TFLAG_LBA48;
790
791 tf->hob_nsect = (n_block >> 8) & 0xff;
792
793 tf->hob_lbah = (block >> 40) & 0xff;
794 tf->hob_lbam = (block >> 32) & 0xff;
795 tf->hob_lbal = (block >> 24) & 0xff;
796 } else
797
798 return -ERANGE;
799
800 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
801 return -EINVAL;
802
803 tf->nsect = n_block & 0xff;
804
805 tf->lbah = (block >> 16) & 0xff;
806 tf->lbam = (block >> 8) & 0xff;
807 tf->lbal = block & 0xff;
808
809 tf->device |= ATA_LBA;
810 } else {
811
812 u32 sect, head, cyl, track;
813
814
815 if (!lba_28_ok(block, n_block))
816 return -ERANGE;
817
818 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
819 return -EINVAL;
820
821
822 track = (u32)block / dev->sectors;
823 cyl = track / dev->heads;
824 head = track % dev->heads;
825 sect = (u32)block % dev->sectors + 1;
826
827 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
828 (u32)block, track, cyl, head, sect);
829
830
831
832
833
834 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
835 return -ERANGE;
836
837 tf->nsect = n_block & 0xff;
838 tf->lbal = sect;
839 tf->lbam = cyl;
840 tf->lbah = cyl >> 8;
841 tf->device |= head;
842 }
843
844 return 0;
845}
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862unsigned long ata_pack_xfermask(unsigned long pio_mask,
863 unsigned long mwdma_mask,
864 unsigned long udma_mask)
865{
866 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
867 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
868 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
869}
870
871
872
873
874
875
876
877
878
879
880
881void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
882 unsigned long *mwdma_mask, unsigned long *udma_mask)
883{
884 if (pio_mask)
885 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
886 if (mwdma_mask)
887 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
888 if (udma_mask)
889 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
890}
891
892static const struct ata_xfer_ent {
893 int shift, bits;
894 u8 base;
895} ata_xfer_tbl[] = {
896 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
897 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
898 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
899 { -1, },
900};
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915u8 ata_xfer_mask2mode(unsigned long xfer_mask)
916{
917 int highbit = fls(xfer_mask) - 1;
918 const struct ata_xfer_ent *ent;
919
920 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
921 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
922 return ent->base + highbit - ent->shift;
923 return 0xff;
924}
925
926
927
928
929
930
931
932
933
934
935
936
937
938unsigned long ata_xfer_mode2mask(u8 xfer_mode)
939{
940 const struct ata_xfer_ent *ent;
941
942 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
943 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
944 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
945 & ~((1 << ent->shift) - 1);
946 return 0;
947}
948
949
950
951
952
953
954
955
956
957
958
959
960
961int ata_xfer_mode2shift(unsigned long xfer_mode)
962{
963 const struct ata_xfer_ent *ent;
964
965 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
966 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
967 return ent->shift;
968 return -1;
969}
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985const char *ata_mode_string(unsigned long xfer_mask)
986{
987 static const char * const xfer_mode_str[] = {
988 "PIO0",
989 "PIO1",
990 "PIO2",
991 "PIO3",
992 "PIO4",
993 "PIO5",
994 "PIO6",
995 "MWDMA0",
996 "MWDMA1",
997 "MWDMA2",
998 "MWDMA3",
999 "MWDMA4",
1000 "UDMA/16",
1001 "UDMA/25",
1002 "UDMA/33",
1003 "UDMA/44",
1004 "UDMA/66",
1005 "UDMA/100",
1006 "UDMA/133",
1007 "UDMA7",
1008 };
1009 int highbit;
1010
1011 highbit = fls(xfer_mask) - 1;
1012 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1013 return xfer_mode_str[highbit];
1014 return "<n/a>";
1015}
1016
1017static const char *sata_spd_string(unsigned int spd)
1018{
1019 static const char * const spd_str[] = {
1020 "1.5 Gbps",
1021 "3.0 Gbps",
1022 "6.0 Gbps",
1023 };
1024
1025 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1026 return "<unknown>";
1027 return spd_str[spd - 1];
1028}
1029
1030static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
1031{
1032 struct ata_link *link = dev->link;
1033 struct ata_port *ap = link->ap;
1034 u32 scontrol;
1035 unsigned int err_mask;
1036 int rc;
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
1047 ap->pm_policy = NOT_AVAILABLE;
1048 return -EINVAL;
1049 }
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
1062 if (rc)
1063 return rc;
1064
1065 switch (policy) {
1066 case MIN_POWER:
1067
1068 scontrol &= ~(0x3 << 8);
1069 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1070 if (rc)
1071 return rc;
1072
1073
1074 if (dev->flags & ATA_DFLAG_DIPM)
1075 err_mask = ata_dev_set_feature(dev,
1076 SETFEATURES_SATA_ENABLE, SATA_DIPM);
1077 break;
1078 case MEDIUM_POWER:
1079
1080 scontrol &= ~(0x1 << 8);
1081 scontrol |= (0x2 << 8);
1082 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1083 if (rc)
1084 return rc;
1085
1086
1087
1088
1089
1090
1091 break;
1092 case NOT_AVAILABLE:
1093 case MAX_PERFORMANCE:
1094
1095 scontrol |= (0x3 << 8);
1096 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1097 if (rc)
1098 return rc;
1099
1100
1101
1102
1103
1104
1105 break;
1106 }
1107
1108
1109 (void) err_mask;
1110
1111 return 0;
1112}
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1128{
1129 int rc = 0;
1130 struct ata_port *ap = dev->link->ap;
1131
1132
1133 if (ap->ops->enable_pm)
1134 rc = ap->ops->enable_pm(ap, policy);
1135 if (rc)
1136 goto enable_pm_out;
1137 rc = ata_dev_set_dipm(dev, policy);
1138
1139enable_pm_out:
1140 if (rc)
1141 ap->pm_policy = MAX_PERFORMANCE;
1142 else
1143 ap->pm_policy = policy;
1144 return ;
1145}
1146
1147#ifdef CONFIG_PM
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160static void ata_dev_disable_pm(struct ata_device *dev)
1161{
1162 struct ata_port *ap = dev->link->ap;
1163
1164 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1165 if (ap->ops->disable_pm)
1166 ap->ops->disable_pm(ap);
1167}
1168#endif
1169
1170void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1171{
1172 ap->pm_policy = policy;
1173 ap->link.eh_info.action |= ATA_EH_LPM;
1174 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1175 ata_port_schedule_eh(ap);
1176}
1177
1178#ifdef CONFIG_PM
1179static void ata_lpm_enable(struct ata_host *host)
1180{
1181 struct ata_link *link;
1182 struct ata_port *ap;
1183 struct ata_device *dev;
1184 int i;
1185
1186 for (i = 0; i < host->n_ports; i++) {
1187 ap = host->ports[i];
1188 ata_for_each_link(link, ap, EDGE) {
1189 ata_for_each_dev(dev, link, ALL)
1190 ata_dev_disable_pm(dev);
1191 }
1192 }
1193}
1194
1195static void ata_lpm_disable(struct ata_host *host)
1196{
1197 int i;
1198
1199 for (i = 0; i < host->n_ports; i++) {
1200 struct ata_port *ap = host->ports[i];
1201 ata_lpm_schedule(ap, ap->pm_policy);
1202 }
1203}
1204#endif
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1222{
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1245 DPRINTK("found ATA device by sig\n");
1246 return ATA_DEV_ATA;
1247 }
1248
1249 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1250 DPRINTK("found ATAPI device by sig\n");
1251 return ATA_DEV_ATAPI;
1252 }
1253
1254 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1255 DPRINTK("found PMP device by sig\n");
1256 return ATA_DEV_PMP;
1257 }
1258
1259 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1260 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1261 return ATA_DEV_SEMB;
1262 }
1263
1264 DPRINTK("unknown device\n");
1265 return ATA_DEV_UNKNOWN;
1266}
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283void ata_id_string(const u16 *id, unsigned char *s,
1284 unsigned int ofs, unsigned int len)
1285{
1286 unsigned int c;
1287
1288 BUG_ON(len & 1);
1289
1290 while (len > 0) {
1291 c = id[ofs] >> 8;
1292 *s = c;
1293 s++;
1294
1295 c = id[ofs] & 0xff;
1296 *s = c;
1297 s++;
1298
1299 ofs++;
1300 len -= 2;
1301 }
1302}
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318void ata_id_c_string(const u16 *id, unsigned char *s,
1319 unsigned int ofs, unsigned int len)
1320{
1321 unsigned char *p;
1322
1323 ata_id_string(id, s, ofs, len - 1);
1324
1325 p = s + strnlen(s, len - 1);
1326 while (p > s && p[-1] == ' ')
1327 p--;
1328 *p = '\0';
1329}
1330
1331static u64 ata_id_n_sectors(const u16 *id)
1332{
1333 if (ata_id_has_lba(id)) {
1334 if (ata_id_has_lba48(id))
1335 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1336 else
1337 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1338 } else {
1339 if (ata_id_current_chs_valid(id))
1340 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1341 id[ATA_ID_CUR_SECTORS];
1342 else
1343 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1344 id[ATA_ID_SECTORS];
1345 }
1346}
1347
1348u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1349{
1350 u64 sectors = 0;
1351
1352 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1353 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1354 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1355 sectors |= (tf->lbah & 0xff) << 16;
1356 sectors |= (tf->lbam & 0xff) << 8;
1357 sectors |= (tf->lbal & 0xff);
1358
1359 return sectors;
1360}
1361
1362u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1363{
1364 u64 sectors = 0;
1365
1366 sectors |= (tf->device & 0x0f) << 24;
1367 sectors |= (tf->lbah & 0xff) << 16;
1368 sectors |= (tf->lbam & 0xff) << 8;
1369 sectors |= (tf->lbal & 0xff);
1370
1371 return sectors;
1372}
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1387{
1388 unsigned int err_mask;
1389 struct ata_taskfile tf;
1390 int lba48 = ata_id_has_lba48(dev->id);
1391
1392 ata_tf_init(dev, &tf);
1393
1394
1395 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1396
1397 if (lba48) {
1398 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1399 tf.flags |= ATA_TFLAG_LBA48;
1400 } else
1401 tf.command = ATA_CMD_READ_NATIVE_MAX;
1402
1403 tf.protocol |= ATA_PROT_NODATA;
1404 tf.device |= ATA_LBA;
1405
1406 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1407 if (err_mask) {
1408 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1409 "max address (err_mask=0x%x)\n", err_mask);
1410 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1411 return -EACCES;
1412 return -EIO;
1413 }
1414
1415 if (lba48)
1416 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1417 else
1418 *max_sectors = ata_tf_to_lba(&tf) + 1;
1419 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1420 (*max_sectors)--;
1421 return 0;
1422}
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1437{
1438 unsigned int err_mask;
1439 struct ata_taskfile tf;
1440 int lba48 = ata_id_has_lba48(dev->id);
1441
1442 new_sectors--;
1443
1444 ata_tf_init(dev, &tf);
1445
1446 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1447
1448 if (lba48) {
1449 tf.command = ATA_CMD_SET_MAX_EXT;
1450 tf.flags |= ATA_TFLAG_LBA48;
1451
1452 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1453 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1454 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1455 } else {
1456 tf.command = ATA_CMD_SET_MAX;
1457
1458 tf.device |= (new_sectors >> 24) & 0xf;
1459 }
1460
1461 tf.protocol |= ATA_PROT_NODATA;
1462 tf.device |= ATA_LBA;
1463
1464 tf.lbal = (new_sectors >> 0) & 0xff;
1465 tf.lbam = (new_sectors >> 8) & 0xff;
1466 tf.lbah = (new_sectors >> 16) & 0xff;
1467
1468 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1469 if (err_mask) {
1470 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1471 "max address (err_mask=0x%x)\n", err_mask);
1472 if (err_mask == AC_ERR_DEV &&
1473 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1474 return -EACCES;
1475 return -EIO;
1476 }
1477
1478 return 0;
1479}
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492static int ata_hpa_resize(struct ata_device *dev)
1493{
1494 struct ata_eh_context *ehc = &dev->link->eh_context;
1495 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1496 u64 sectors = ata_id_n_sectors(dev->id);
1497 u64 native_sectors;
1498 int rc;
1499
1500
1501 if (dev->class != ATA_DEV_ATA ||
1502 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1503 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1504 return 0;
1505
1506
1507 rc = ata_read_native_max_address(dev, &native_sectors);
1508 if (rc) {
1509
1510
1511
1512 if (rc == -EACCES || !ata_ignore_hpa) {
1513 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1514 "broken, skipping HPA handling\n");
1515 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1516
1517
1518 if (rc == -EACCES)
1519 rc = 0;
1520 }
1521
1522 return rc;
1523 }
1524 dev->n_native_sectors = native_sectors;
1525
1526
1527 if (native_sectors <= sectors || !ata_ignore_hpa) {
1528 if (!print_info || native_sectors == sectors)
1529 return 0;
1530
1531 if (native_sectors > sectors)
1532 ata_dev_printk(dev, KERN_INFO,
1533 "HPA detected: current %llu, native %llu\n",
1534 (unsigned long long)sectors,
1535 (unsigned long long)native_sectors);
1536 else if (native_sectors < sectors)
1537 ata_dev_printk(dev, KERN_WARNING,
1538 "native sectors (%llu) is smaller than "
1539 "sectors (%llu)\n",
1540 (unsigned long long)native_sectors,
1541 (unsigned long long)sectors);
1542 return 0;
1543 }
1544
1545
1546 rc = ata_set_max_sectors(dev, native_sectors);
1547 if (rc == -EACCES) {
1548
1549 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1550 "(%llu -> %llu), skipping HPA handling\n",
1551 (unsigned long long)sectors,
1552 (unsigned long long)native_sectors);
1553 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1554 return 0;
1555 } else if (rc)
1556 return rc;
1557
1558
1559 rc = ata_dev_reread_id(dev, 0);
1560 if (rc) {
1561 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1562 "data after HPA resizing\n");
1563 return rc;
1564 }
1565
1566 if (print_info) {
1567 u64 new_sectors = ata_id_n_sectors(dev->id);
1568 ata_dev_printk(dev, KERN_INFO,
1569 "HPA unlocked: %llu -> %llu, native %llu\n",
1570 (unsigned long long)sectors,
1571 (unsigned long long)new_sectors,
1572 (unsigned long long)native_sectors);
1573 }
1574
1575 return 0;
1576}
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589static inline void ata_dump_id(const u16 *id)
1590{
1591 DPRINTK("49==0x%04x "
1592 "53==0x%04x "
1593 "63==0x%04x "
1594 "64==0x%04x "
1595 "75==0x%04x \n",
1596 id[49],
1597 id[53],
1598 id[63],
1599 id[64],
1600 id[75]);
1601 DPRINTK("80==0x%04x "
1602 "81==0x%04x "
1603 "82==0x%04x "
1604 "83==0x%04x "
1605 "84==0x%04x \n",
1606 id[80],
1607 id[81],
1608 id[82],
1609 id[83],
1610 id[84]);
1611 DPRINTK("88==0x%04x "
1612 "93==0x%04x\n",
1613 id[88],
1614 id[93]);
1615}
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632unsigned long ata_id_xfermask(const u16 *id)
1633{
1634 unsigned long pio_mask, mwdma_mask, udma_mask;
1635
1636
1637 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1638 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1639 pio_mask <<= 3;
1640 pio_mask |= 0x7;
1641 } else {
1642
1643
1644
1645
1646 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1647 if (mode < 5)
1648 pio_mask = (2 << mode) - 1;
1649 else
1650 pio_mask = 1;
1651
1652
1653
1654
1655
1656
1657
1658 }
1659
1660 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1661
1662 if (ata_id_is_cfa(id)) {
1663
1664
1665
1666 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1667 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1668
1669 if (pio)
1670 pio_mask |= (1 << 5);
1671 if (pio > 1)
1672 pio_mask |= (1 << 6);
1673 if (dma)
1674 mwdma_mask |= (1 << 3);
1675 if (dma > 1)
1676 mwdma_mask |= (1 << 4);
1677 }
1678
1679 udma_mask = 0;
1680 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1681 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1682
1683 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1684}
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1705{
1706 ap->port_task_data = data;
1707
1708
1709 queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1710}
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722void ata_port_flush_task(struct ata_port *ap)
1723{
1724 DPRINTK("ENTER\n");
1725
1726 cancel_rearming_delayed_work(&ap->port_task);
1727
1728 if (ata_msg_ctl(ap))
1729 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1730}
1731
1732static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1733{
1734 struct completion *waiting = qc->private_data;
1735
1736 complete(waiting);
1737}
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761unsigned ata_exec_internal_sg(struct ata_device *dev,
1762 struct ata_taskfile *tf, const u8 *cdb,
1763 int dma_dir, struct scatterlist *sgl,
1764 unsigned int n_elem, unsigned long timeout)
1765{
1766 struct ata_link *link = dev->link;
1767 struct ata_port *ap = link->ap;
1768 u8 command = tf->command;
1769 int auto_timeout = 0;
1770 struct ata_queued_cmd *qc;
1771 unsigned int tag, preempted_tag;
1772 u32 preempted_sactive, preempted_qc_active;
1773 int preempted_nr_active_links;
1774 DECLARE_COMPLETION_ONSTACK(wait);
1775 unsigned long flags;
1776 unsigned int err_mask;
1777 int rc;
1778
1779 spin_lock_irqsave(ap->lock, flags);
1780
1781
1782 if (ap->pflags & ATA_PFLAG_FROZEN) {
1783 spin_unlock_irqrestore(ap->lock, flags);
1784 return AC_ERR_SYSTEM;
1785 }
1786
1787
1788
1789
1790
1791
1792
1793
1794 if (ap->ops->error_handler)
1795 tag = ATA_TAG_INTERNAL;
1796 else
1797 tag = 0;
1798
1799 if (test_and_set_bit(tag, &ap->qc_allocated))
1800 BUG();
1801 qc = __ata_qc_from_tag(ap, tag);
1802
1803 qc->tag = tag;
1804 qc->scsicmd = NULL;
1805 qc->ap = ap;
1806 qc->dev = dev;
1807 ata_qc_reinit(qc);
1808
1809 preempted_tag = link->active_tag;
1810 preempted_sactive = link->sactive;
1811 preempted_qc_active = ap->qc_active;
1812 preempted_nr_active_links = ap->nr_active_links;
1813 link->active_tag = ATA_TAG_POISON;
1814 link->sactive = 0;
1815 ap->qc_active = 0;
1816 ap->nr_active_links = 0;
1817
1818
1819 qc->tf = *tf;
1820 if (cdb)
1821 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1822 qc->flags |= ATA_QCFLAG_RESULT_TF;
1823 qc->dma_dir = dma_dir;
1824 if (dma_dir != DMA_NONE) {
1825 unsigned int i, buflen = 0;
1826 struct scatterlist *sg;
1827
1828 for_each_sg(sgl, sg, n_elem, i)
1829 buflen += sg->length;
1830
1831 ata_sg_init(qc, sgl, n_elem);
1832 qc->nbytes = buflen;
1833 }
1834
1835 qc->private_data = &wait;
1836 qc->complete_fn = ata_qc_complete_internal;
1837
1838 ata_qc_issue(qc);
1839
1840 spin_unlock_irqrestore(ap->lock, flags);
1841
1842 if (!timeout) {
1843 if (ata_probe_timeout)
1844 timeout = ata_probe_timeout * 1000;
1845 else {
1846 timeout = ata_internal_cmd_timeout(dev, command);
1847 auto_timeout = 1;
1848 }
1849 }
1850
1851 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1852
1853 ata_port_flush_task(ap);
1854
1855 if (!rc) {
1856 spin_lock_irqsave(ap->lock, flags);
1857
1858
1859
1860
1861
1862
1863 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1864 qc->err_mask |= AC_ERR_TIMEOUT;
1865
1866 if (ap->ops->error_handler)
1867 ata_port_freeze(ap);
1868 else
1869 ata_qc_complete(qc);
1870
1871 if (ata_msg_warn(ap))
1872 ata_dev_printk(dev, KERN_WARNING,
1873 "qc timeout (cmd 0x%x)\n", command);
1874 }
1875
1876 spin_unlock_irqrestore(ap->lock, flags);
1877 }
1878
1879
1880 if (ap->ops->post_internal_cmd)
1881 ap->ops->post_internal_cmd(qc);
1882
1883
1884 if (qc->flags & ATA_QCFLAG_FAILED) {
1885 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1886 qc->err_mask |= AC_ERR_DEV;
1887
1888 if (!qc->err_mask)
1889 qc->err_mask |= AC_ERR_OTHER;
1890
1891 if (qc->err_mask & ~AC_ERR_OTHER)
1892 qc->err_mask &= ~AC_ERR_OTHER;
1893 }
1894
1895
1896 spin_lock_irqsave(ap->lock, flags);
1897
1898 *tf = qc->result_tf;
1899 err_mask = qc->err_mask;
1900
1901 ata_qc_free(qc);
1902 link->active_tag = preempted_tag;
1903 link->sactive = preempted_sactive;
1904 ap->qc_active = preempted_qc_active;
1905 ap->nr_active_links = preempted_nr_active_links;
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918 if (ap->flags & ATA_FLAG_DISABLED) {
1919 err_mask |= AC_ERR_SYSTEM;
1920 ata_port_probe(ap);
1921 }
1922
1923 spin_unlock_irqrestore(ap->lock, flags);
1924
1925 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1926 ata_internal_cmd_timed_out(dev, command);
1927
1928 return err_mask;
1929}
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950unsigned ata_exec_internal(struct ata_device *dev,
1951 struct ata_taskfile *tf, const u8 *cdb,
1952 int dma_dir, void *buf, unsigned int buflen,
1953 unsigned long timeout)
1954{
1955 struct scatterlist *psg = NULL, sg;
1956 unsigned int n_elem = 0;
1957
1958 if (dma_dir != DMA_NONE) {
1959 WARN_ON(!buf);
1960 sg_init_one(&sg, buf, buflen);
1961 psg = &sg;
1962 n_elem++;
1963 }
1964
1965 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1966 timeout);
1967}
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1984{
1985 struct ata_taskfile tf;
1986
1987 ata_tf_init(dev, &tf);
1988
1989 tf.command = cmd;
1990 tf.flags |= ATA_TFLAG_DEVICE;
1991 tf.protocol = ATA_PROT_NODATA;
1992
1993 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1994}
1995
1996
1997
1998
1999
2000
2001
2002
2003unsigned int ata_pio_need_iordy(const struct ata_device *adev)
2004{
2005
2006
2007
2008
2009 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
2010 return 0;
2011
2012
2013
2014 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
2015 return 0;
2016
2017 if (ata_id_is_cfa(adev->id)
2018 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
2019 return 0;
2020
2021 if (adev->pio_mode > XFER_PIO_2)
2022 return 1;
2023
2024 if (ata_id_has_iordy(adev->id))
2025 return 1;
2026 return 0;
2027}
2028
2029
2030
2031
2032
2033
2034
2035
2036static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2037{
2038
2039 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
2040 u16 pio = adev->id[ATA_ID_EIDE_PIO];
2041
2042 if (pio) {
2043
2044 if (pio > 240)
2045 return 3 << ATA_SHIFT_PIO;
2046 return 7 << ATA_SHIFT_PIO;
2047 }
2048 }
2049 return 3 << ATA_SHIFT_PIO;
2050}
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062unsigned int ata_do_dev_read_id(struct ata_device *dev,
2063 struct ata_taskfile *tf, u16 *id)
2064{
2065 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
2066 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
2067}
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
2091 unsigned int flags, u16 *id)
2092{
2093 struct ata_port *ap = dev->link->ap;
2094 unsigned int class = *p_class;
2095 struct ata_taskfile tf;
2096 unsigned int err_mask = 0;
2097 const char *reason;
2098 bool is_semb = class == ATA_DEV_SEMB;
2099 int may_fallback = 1, tried_spinup = 0;
2100 int rc;
2101
2102 if (ata_msg_ctl(ap))
2103 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2104
2105retry:
2106 ata_tf_init(dev, &tf);
2107
2108 switch (class) {
2109 case ATA_DEV_SEMB:
2110 class = ATA_DEV_ATA;
2111 case ATA_DEV_ATA:
2112 tf.command = ATA_CMD_ID_ATA;
2113 break;
2114 case ATA_DEV_ATAPI:
2115 tf.command = ATA_CMD_ID_ATAPI;
2116 break;
2117 default:
2118 rc = -ENODEV;
2119 reason = "unsupported class";
2120 goto err_out;
2121 }
2122
2123 tf.protocol = ATA_PROT_PIO;
2124
2125
2126
2127
2128 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2129
2130
2131
2132
2133 tf.flags |= ATA_TFLAG_POLLING;
2134
2135 if (ap->ops->read_id)
2136 err_mask = ap->ops->read_id(dev, &tf, id);
2137 else
2138 err_mask = ata_do_dev_read_id(dev, &tf, id);
2139
2140 if (err_mask) {
2141 if (err_mask & AC_ERR_NODEV_HINT) {
2142 ata_dev_printk(dev, KERN_DEBUG,
2143 "NODEV after polling detection\n");
2144 return -ENOENT;
2145 }
2146
2147 if (is_semb) {
2148 ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on "
2149 "device w/ SEMB sig, disabled\n");
2150
2151 *p_class = ATA_DEV_SEMB_UNSUP;
2152 return 0;
2153 }
2154
2155 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2156
2157
2158
2159
2160
2161 if (may_fallback) {
2162 may_fallback = 0;
2163
2164 if (class == ATA_DEV_ATA)
2165 class = ATA_DEV_ATAPI;
2166 else
2167 class = ATA_DEV_ATA;
2168 goto retry;
2169 }
2170
2171
2172
2173
2174
2175 ata_dev_printk(dev, KERN_DEBUG,
2176 "both IDENTIFYs aborted, assuming NODEV\n");
2177 return -ENOENT;
2178 }
2179
2180 rc = -EIO;
2181 reason = "I/O error";
2182 goto err_out;
2183 }
2184
2185
2186
2187
2188 may_fallback = 0;
2189
2190 swap_buf_le16(id, ATA_ID_WORDS);
2191
2192
2193 rc = -EINVAL;
2194 reason = "device reports invalid type";
2195
2196 if (class == ATA_DEV_ATA) {
2197 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2198 goto err_out;
2199 } else {
2200 if (ata_id_is_ata(id))
2201 goto err_out;
2202 }
2203
2204 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2205 tried_spinup = 1;
2206
2207
2208
2209
2210
2211 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2212 if (err_mask && id[2] != 0x738c) {
2213 rc = -EIO;
2214 reason = "SPINUP failed";
2215 goto err_out;
2216 }
2217
2218
2219
2220
2221 if (id[2] == 0x37c8)
2222 goto retry;
2223 }
2224
2225 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2238 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2239 if (err_mask) {
2240 rc = -EIO;
2241 reason = "INIT_DEV_PARAMS failed";
2242 goto err_out;
2243 }
2244
2245
2246
2247
2248 flags &= ~ATA_READID_POSTRESET;
2249 goto retry;
2250 }
2251 }
2252
2253 *p_class = class;
2254
2255 return 0;
2256
2257 err_out:
2258 if (ata_msg_warn(ap))
2259 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2260 "(%s, err_mask=0x%x)\n", reason, err_mask);
2261 return rc;
2262}
2263
2264static int ata_do_link_spd_horkage(struct ata_device *dev)
2265{
2266 struct ata_link *plink = ata_dev_phys_link(dev);
2267 u32 target, target_limit;
2268
2269 if (!sata_scr_valid(plink))
2270 return 0;
2271
2272 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2273 target = 1;
2274 else
2275 return 0;
2276
2277 target_limit = (1 << target) - 1;
2278
2279
2280 if (plink->sata_spd_limit <= target_limit)
2281 return 0;
2282
2283 plink->sata_spd_limit = target_limit;
2284
2285
2286
2287
2288
2289 if (plink->sata_spd > target) {
2290 ata_dev_printk(dev, KERN_INFO,
2291 "applying link speed limit horkage to %s\n",
2292 sata_spd_string(target));
2293 return -EAGAIN;
2294 }
2295 return 0;
2296}
2297
2298static inline u8 ata_dev_knobble(struct ata_device *dev)
2299{
2300 struct ata_port *ap = dev->link->ap;
2301
2302 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2303 return 0;
2304
2305 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2306}
2307
2308static int ata_dev_config_ncq(struct ata_device *dev,
2309 char *desc, size_t desc_sz)
2310{
2311 struct ata_port *ap = dev->link->ap;
2312 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2313 unsigned int err_mask;
2314 char *aa_desc = "";
2315
2316 if (!ata_id_has_ncq(dev->id)) {
2317 desc[0] = '\0';
2318 return 0;
2319 }
2320 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2321 snprintf(desc, desc_sz, "NCQ (not used)");
2322 return 0;
2323 }
2324 if (ap->flags & ATA_FLAG_NCQ) {
2325 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2326 dev->flags |= ATA_DFLAG_NCQ;
2327 }
2328
2329 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2330 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2331 ata_id_has_fpdma_aa(dev->id)) {
2332 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2333 SATA_FPDMA_AA);
2334 if (err_mask) {
2335 ata_dev_printk(dev, KERN_ERR, "failed to enable AA"
2336 "(error_mask=0x%x)\n", err_mask);
2337 if (err_mask != AC_ERR_DEV) {
2338 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2339 return -EIO;
2340 }
2341 } else
2342 aa_desc = ", AA";
2343 }
2344
2345 if (hdepth >= ddepth)
2346 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2347 else
2348 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2349 ddepth, aa_desc);
2350 return 0;
2351}
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366int ata_dev_configure(struct ata_device *dev)
2367{
2368 struct ata_port *ap = dev->link->ap;
2369 struct ata_eh_context *ehc = &dev->link->eh_context;
2370 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2371 const u16 *id = dev->id;
2372 unsigned long xfer_mask;
2373 char revbuf[7];
2374 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2375 char modelbuf[ATA_ID_PROD_LEN+1];
2376 int rc;
2377
2378 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2379 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2380 __func__);
2381 return 0;
2382 }
2383
2384 if (ata_msg_probe(ap))
2385 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2386
2387
2388 dev->horkage |= ata_dev_blacklisted(dev);
2389 ata_force_horkage(dev);
2390
2391 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2392 ata_dev_printk(dev, KERN_INFO,
2393 "unsupported device, disabling\n");
2394 ata_dev_disable(dev);
2395 return 0;
2396 }
2397
2398 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2399 dev->class == ATA_DEV_ATAPI) {
2400 ata_dev_printk(dev, KERN_WARNING,
2401 "WARNING: ATAPI is %s, device ignored.\n",
2402 atapi_enabled ? "not supported with this driver"
2403 : "disabled");
2404 ata_dev_disable(dev);
2405 return 0;
2406 }
2407
2408 rc = ata_do_link_spd_horkage(dev);
2409 if (rc)
2410 return rc;
2411
2412
2413 rc = ata_acpi_on_devcfg(dev);
2414 if (rc)
2415 return rc;
2416
2417
2418 rc = ata_hpa_resize(dev);
2419 if (rc)
2420 return rc;
2421
2422
2423 if (ata_msg_probe(ap))
2424 ata_dev_printk(dev, KERN_DEBUG,
2425 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2426 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2427 __func__,
2428 id[49], id[82], id[83], id[84],
2429 id[85], id[86], id[87], id[88]);
2430
2431
2432 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2433 dev->max_sectors = 0;
2434 dev->cdb_len = 0;
2435 dev->n_sectors = 0;
2436 dev->cylinders = 0;
2437 dev->heads = 0;
2438 dev->sectors = 0;
2439 dev->multi_count = 0;
2440
2441
2442
2443
2444
2445
2446 xfer_mask = ata_id_xfermask(id);
2447
2448 if (ata_msg_probe(ap))
2449 ata_dump_id(id);
2450
2451
2452 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2453 sizeof(fwrevbuf));
2454
2455 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2456 sizeof(modelbuf));
2457
2458
2459 if (dev->class == ATA_DEV_ATA) {
2460 if (ata_id_is_cfa(id)) {
2461
2462 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2463 ata_dev_printk(dev, KERN_WARNING,
2464 "supports DRM functions and may "
2465 "not be fully accessable.\n");
2466 snprintf(revbuf, 7, "CFA");
2467 } else {
2468 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2469
2470 if (ata_id_has_tpm(id))
2471 ata_dev_printk(dev, KERN_WARNING,
2472 "supports DRM functions and may "
2473 "not be fully accessable.\n");
2474 }
2475
2476 dev->n_sectors = ata_id_n_sectors(id);
2477
2478
2479 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2480 unsigned int max = dev->id[47] & 0xff;
2481 unsigned int cnt = dev->id[59] & 0xff;
2482
2483 if (is_power_of_2(max) && is_power_of_2(cnt))
2484 if (cnt <= max)
2485 dev->multi_count = cnt;
2486 }
2487
2488 if (ata_id_has_lba(id)) {
2489 const char *lba_desc;
2490 char ncq_desc[24];
2491
2492 lba_desc = "LBA";
2493 dev->flags |= ATA_DFLAG_LBA;
2494 if (ata_id_has_lba48(id)) {
2495 dev->flags |= ATA_DFLAG_LBA48;
2496 lba_desc = "LBA48";
2497
2498 if (dev->n_sectors >= (1UL << 28) &&
2499 ata_id_has_flush_ext(id))
2500 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2501 }
2502
2503
2504 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2505 if (rc)
2506 return rc;
2507
2508
2509 if (ata_msg_drv(ap) && print_info) {
2510 ata_dev_printk(dev, KERN_INFO,
2511 "%s: %s, %s, max %s\n",
2512 revbuf, modelbuf, fwrevbuf,
2513 ata_mode_string(xfer_mask));
2514 ata_dev_printk(dev, KERN_INFO,
2515 "%Lu sectors, multi %u: %s %s\n",
2516 (unsigned long long)dev->n_sectors,
2517 dev->multi_count, lba_desc, ncq_desc);
2518 }
2519 } else {
2520
2521
2522
2523 dev->cylinders = id[1];
2524 dev->heads = id[3];
2525 dev->sectors = id[6];
2526
2527 if (ata_id_current_chs_valid(id)) {
2528
2529 dev->cylinders = id[54];
2530 dev->heads = id[55];
2531 dev->sectors = id[56];
2532 }
2533
2534
2535 if (ata_msg_drv(ap) && print_info) {
2536 ata_dev_printk(dev, KERN_INFO,
2537 "%s: %s, %s, max %s\n",
2538 revbuf, modelbuf, fwrevbuf,
2539 ata_mode_string(xfer_mask));
2540 ata_dev_printk(dev, KERN_INFO,
2541 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2542 (unsigned long long)dev->n_sectors,
2543 dev->multi_count, dev->cylinders,
2544 dev->heads, dev->sectors);
2545 }
2546 }
2547
2548 dev->cdb_len = 16;
2549 }
2550
2551
2552 else if (dev->class == ATA_DEV_ATAPI) {
2553 const char *cdb_intr_string = "";
2554 const char *atapi_an_string = "";
2555 const char *dma_dir_string = "";
2556 u32 sntf;
2557
2558 rc = atapi_cdb_len(id);
2559 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2560 if (ata_msg_warn(ap))
2561 ata_dev_printk(dev, KERN_WARNING,
2562 "unsupported CDB len\n");
2563 rc = -EINVAL;
2564 goto err_out_nosup;
2565 }
2566 dev->cdb_len = (unsigned int) rc;
2567
2568
2569
2570
2571
2572
2573 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2574 (!sata_pmp_attached(ap) ||
2575 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2576 unsigned int err_mask;
2577
2578
2579 err_mask = ata_dev_set_feature(dev,
2580 SETFEATURES_SATA_ENABLE, SATA_AN);
2581 if (err_mask)
2582 ata_dev_printk(dev, KERN_ERR,
2583 "failed to enable ATAPI AN "
2584 "(err_mask=0x%x)\n", err_mask);
2585 else {
2586 dev->flags |= ATA_DFLAG_AN;
2587 atapi_an_string = ", ATAPI AN";
2588 }
2589 }
2590
2591 if (ata_id_cdb_intr(dev->id)) {
2592 dev->flags |= ATA_DFLAG_CDB_INTR;
2593 cdb_intr_string = ", CDB intr";
2594 }
2595
2596 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2597 dev->flags |= ATA_DFLAG_DMADIR;
2598 dma_dir_string = ", DMADIR";
2599 }
2600
2601
2602 if (ata_msg_drv(ap) && print_info)
2603 ata_dev_printk(dev, KERN_INFO,
2604 "ATAPI: %s, %s, max %s%s%s%s\n",
2605 modelbuf, fwrevbuf,
2606 ata_mode_string(xfer_mask),
2607 cdb_intr_string, atapi_an_string,
2608 dma_dir_string);
2609 }
2610
2611
2612 dev->max_sectors = ATA_MAX_SECTORS;
2613 if (dev->flags & ATA_DFLAG_LBA48)
2614 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2615
2616 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2617 if (ata_id_has_hipm(dev->id))
2618 dev->flags |= ATA_DFLAG_HIPM;
2619 if (ata_id_has_dipm(dev->id))
2620 dev->flags |= ATA_DFLAG_DIPM;
2621 }
2622
2623
2624
2625 if (ata_dev_knobble(dev)) {
2626 if (ata_msg_drv(ap) && print_info)
2627 ata_dev_printk(dev, KERN_INFO,
2628 "applying bridge limits\n");
2629 dev->udma_mask &= ATA_UDMA5;
2630 dev->max_sectors = ATA_MAX_SECTORS;
2631 }
2632
2633 if ((dev->class == ATA_DEV_ATAPI) &&
2634 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2635 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2636 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2637 }
2638
2639 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2640 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2641 dev->max_sectors);
2642
2643 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2644 dev->horkage |= ATA_HORKAGE_IPM;
2645
2646
2647 ap->pm_policy = MAX_PERFORMANCE;
2648 }
2649
2650 if (ap->ops->dev_config)
2651 ap->ops->dev_config(dev);
2652
2653 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2654
2655
2656
2657
2658
2659
2660 if (print_info) {
2661 ata_dev_printk(dev, KERN_WARNING,
2662"Drive reports diagnostics failure. This may indicate a drive\n");
2663 ata_dev_printk(dev, KERN_WARNING,
2664"fault or invalid emulation. Contact drive vendor for information.\n");
2665 }
2666 }
2667
2668 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2669 ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
2670 "firmware update to be fully functional.\n");
2671 ata_dev_printk(dev, KERN_WARNING, " contact the vendor "
2672 "or visit http://ata.wiki.kernel.org.\n");
2673 }
2674
2675 return 0;
2676
2677err_out_nosup:
2678 if (ata_msg_probe(ap))
2679 ata_dev_printk(dev, KERN_DEBUG,
2680 "%s: EXIT, err\n", __func__);
2681 return rc;
2682}
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692int ata_cable_40wire(struct ata_port *ap)
2693{
2694 return ATA_CBL_PATA40;
2695}
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705int ata_cable_80wire(struct ata_port *ap)
2706{
2707 return ATA_CBL_PATA80;
2708}
2709
2710
2711
2712
2713
2714
2715
2716
2717int ata_cable_unknown(struct ata_port *ap)
2718{
2719 return ATA_CBL_PATA_UNK;
2720}
2721
2722
2723
2724
2725
2726
2727
2728
2729int ata_cable_ignore(struct ata_port *ap)
2730{
2731 return ATA_CBL_PATA_IGN;
2732}
2733
2734
2735
2736
2737
2738
2739
2740
2741int ata_cable_sata(struct ata_port *ap)
2742{
2743 return ATA_CBL_SATA;
2744}
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761int ata_bus_probe(struct ata_port *ap)
2762{
2763 unsigned int classes[ATA_MAX_DEVICES];
2764 int tries[ATA_MAX_DEVICES];
2765 int rc;
2766 struct ata_device *dev;
2767
2768 ata_port_probe(ap);
2769
2770 ata_for_each_dev(dev, &ap->link, ALL)
2771 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2772
2773 retry:
2774 ata_for_each_dev(dev, &ap->link, ALL) {
2775
2776
2777
2778
2779
2780
2781
2782 dev->pio_mode = XFER_PIO_0;
2783
2784
2785
2786
2787
2788
2789 if (ap->ops->set_piomode)
2790 ap->ops->set_piomode(ap, dev);
2791 }
2792
2793
2794 ap->ops->phy_reset(ap);
2795
2796 ata_for_each_dev(dev, &ap->link, ALL) {
2797 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2798 dev->class != ATA_DEV_UNKNOWN)
2799 classes[dev->devno] = dev->class;
2800 else
2801 classes[dev->devno] = ATA_DEV_NONE;
2802
2803 dev->class = ATA_DEV_UNKNOWN;
2804 }
2805
2806 ata_port_probe(ap);
2807
2808
2809
2810
2811
2812 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2813 if (tries[dev->devno])
2814 dev->class = classes[dev->devno];
2815
2816 if (!ata_dev_enabled(dev))
2817 continue;
2818
2819 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2820 dev->id);
2821 if (rc)
2822 goto fail;
2823 }
2824
2825
2826 if (ap->ops->cable_detect)
2827 ap->cbl = ap->ops->cable_detect(ap);
2828
2829
2830
2831
2832
2833
2834 ata_for_each_dev(dev, &ap->link, ENABLED)
2835 if (ata_id_is_sata(dev->id))
2836 ap->cbl = ATA_CBL_SATA;
2837
2838
2839
2840
2841 ata_for_each_dev(dev, &ap->link, ENABLED) {
2842 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2843 rc = ata_dev_configure(dev);
2844 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2845 if (rc)
2846 goto fail;
2847 }
2848
2849
2850 rc = ata_set_mode(&ap->link, &dev);
2851 if (rc)
2852 goto fail;
2853
2854 ata_for_each_dev(dev, &ap->link, ENABLED)
2855 return 0;
2856
2857
2858 ata_port_disable(ap);
2859 return -ENODEV;
2860
2861 fail:
2862 tries[dev->devno]--;
2863
2864 switch (rc) {
2865 case -EINVAL:
2866
2867 tries[dev->devno] = 0;
2868 break;
2869
2870 case -ENODEV:
2871
2872 tries[dev->devno] = min(tries[dev->devno], 1);
2873 case -EIO:
2874 if (tries[dev->devno] == 1) {
2875
2876
2877
2878 sata_down_spd_limit(&ap->link, 0);
2879 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2880 }
2881 }
2882
2883 if (!tries[dev->devno])
2884 ata_dev_disable(dev);
2885
2886 goto retry;
2887}
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900void ata_port_probe(struct ata_port *ap)
2901{
2902 ap->flags &= ~ATA_FLAG_DISABLED;
2903}
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914static void sata_print_link_status(struct ata_link *link)
2915{
2916 u32 sstatus, scontrol, tmp;
2917
2918 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2919 return;
2920 sata_scr_read(link, SCR_CONTROL, &scontrol);
2921
2922 if (ata_phys_link_online(link)) {
2923 tmp = (sstatus >> 4) & 0xf;
2924 ata_link_printk(link, KERN_INFO,
2925 "SATA link up %s (SStatus %X SControl %X)\n",
2926 sata_spd_string(tmp), sstatus, scontrol);
2927 } else {
2928 ata_link_printk(link, KERN_INFO,
2929 "SATA link down (SStatus %X SControl %X)\n",
2930 sstatus, scontrol);
2931 }
2932}
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942struct ata_device *ata_dev_pair(struct ata_device *adev)
2943{
2944 struct ata_link *link = adev->link;
2945 struct ata_device *pair = &link->device[1 - adev->devno];
2946 if (!ata_dev_enabled(pair))
2947 return NULL;
2948 return pair;
2949}
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964void ata_port_disable(struct ata_port *ap)
2965{
2966 ap->link.device[0].class = ATA_DEV_NONE;
2967 ap->link.device[1].class = ATA_DEV_NONE;
2968 ap->flags |= ATA_FLAG_DISABLED;
2969}
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2992{
2993 u32 sstatus, spd, mask;
2994 int rc, bit;
2995
2996 if (!sata_scr_valid(link))
2997 return -EOPNOTSUPP;
2998
2999
3000
3001
3002 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3003 if (rc == 0 && ata_sstatus_online(sstatus))
3004 spd = (sstatus >> 4) & 0xf;
3005 else
3006 spd = link->sata_spd;
3007
3008 mask = link->sata_spd_limit;
3009 if (mask <= 1)
3010 return -EINVAL;
3011
3012
3013 bit = fls(mask) - 1;
3014 mask &= ~(1 << bit);
3015
3016
3017
3018
3019 if (spd > 1)
3020 mask &= (1 << (spd - 1)) - 1;
3021 else
3022 mask &= 1;
3023
3024
3025 if (!mask)
3026 return -EINVAL;
3027
3028 if (spd_limit) {
3029 if (mask & ((1 << spd_limit) - 1))
3030 mask &= (1 << spd_limit) - 1;
3031 else {
3032 bit = ffs(mask) - 1;
3033 mask = 1 << bit;
3034 }
3035 }
3036
3037 link->sata_spd_limit = mask;
3038
3039 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
3040 sata_spd_string(fls(mask)));
3041
3042 return 0;
3043}
3044
3045static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
3046{
3047 struct ata_link *host_link = &link->ap->link;
3048 u32 limit, target, spd;
3049
3050 limit = link->sata_spd_limit;
3051
3052
3053
3054
3055
3056 if (!ata_is_host_link(link) && host_link->sata_spd)
3057 limit &= (1 << host_link->sata_spd) - 1;
3058
3059 if (limit == UINT_MAX)
3060 target = 0;
3061 else
3062 target = fls(limit);
3063
3064 spd = (*scontrol >> 4) & 0xf;
3065 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3066
3067 return spd != target;
3068}
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085static int sata_set_spd_needed(struct ata_link *link)
3086{
3087 u32 scontrol;
3088
3089 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3090 return 1;
3091
3092 return __sata_set_spd_needed(link, &scontrol);
3093}
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108int sata_set_spd(struct ata_link *link)
3109{
3110 u32 scontrol;
3111 int rc;
3112
3113 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3114 return rc;
3115
3116 if (!__sata_set_spd_needed(link, &scontrol))
3117 return 0;
3118
3119 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3120 return rc;
3121
3122 return 1;
3123}
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137static const struct ata_timing ata_timing[] = {
3138
3139 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
3140 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
3141 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
3142 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
3143 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
3144 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
3145 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
3146
3147 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
3148 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
3149 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
3150
3151 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
3152 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
3153 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
3154 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
3155 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
3156
3157
3158 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
3159 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
3160 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
3161 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
3162 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
3163 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
3164 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
3165
3166 { 0xFF }
3167};
3168
3169#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3170#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
3171
3172static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3173{
3174 q->setup = EZ(t->setup * 1000, T);
3175 q->act8b = EZ(t->act8b * 1000, T);
3176 q->rec8b = EZ(t->rec8b * 1000, T);
3177 q->cyc8b = EZ(t->cyc8b * 1000, T);
3178 q->active = EZ(t->active * 1000, T);
3179 q->recover = EZ(t->recover * 1000, T);
3180 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
3181 q->cycle = EZ(t->cycle * 1000, T);
3182 q->udma = EZ(t->udma * 1000, UT);
3183}
3184
3185void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3186 struct ata_timing *m, unsigned int what)
3187{
3188 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3189 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3190 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3191 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3192 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3193 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3194 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3195 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3196 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3197}
3198
3199const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3200{
3201 const struct ata_timing *t = ata_timing;
3202
3203 while (xfer_mode > t->mode)
3204 t++;
3205
3206 if (xfer_mode == t->mode)
3207 return t;
3208 return NULL;
3209}
3210
3211int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3212 struct ata_timing *t, int T, int UT)
3213{
3214 const struct ata_timing *s;
3215 struct ata_timing p;
3216
3217
3218
3219
3220
3221 if (!(s = ata_timing_find_mode(speed)))
3222 return -EINVAL;
3223
3224 memcpy(t, s, sizeof(*s));
3225
3226
3227
3228
3229
3230
3231 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
3232 memset(&p, 0, sizeof(p));
3233 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3234 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3235 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
3236 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
3237 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3238 }
3239 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3240 }
3241
3242
3243
3244
3245
3246 ata_timing_quantize(t, t, T, UT);
3247
3248
3249
3250
3251
3252
3253
3254 if (speed > XFER_PIO_6) {
3255 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3256 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3257 }
3258
3259
3260
3261
3262
3263 if (t->act8b + t->rec8b < t->cyc8b) {
3264 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3265 t->rec8b = t->cyc8b - t->act8b;
3266 }
3267
3268 if (t->active + t->recover < t->cycle) {
3269 t->active += (t->cycle - (t->active + t->recover)) / 2;
3270 t->recover = t->cycle - t->active;
3271 }
3272
3273
3274
3275
3276 if (t->active + t->recover > t->cycle)
3277 t->cycle = t->active + t->recover;
3278
3279 return 0;
3280}
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3299{
3300 u8 base_mode = 0xff, last_mode = 0xff;
3301 const struct ata_xfer_ent *ent;
3302 const struct ata_timing *t;
3303
3304 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3305 if (ent->shift == xfer_shift)
3306 base_mode = ent->base;
3307
3308 for (t = ata_timing_find_mode(base_mode);
3309 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3310 unsigned short this_cycle;
3311
3312 switch (xfer_shift) {
3313 case ATA_SHIFT_PIO:
3314 case ATA_SHIFT_MWDMA:
3315 this_cycle = t->cycle;
3316 break;
3317 case ATA_SHIFT_UDMA:
3318 this_cycle = t->udma;
3319 break;
3320 default:
3321 return 0xff;
3322 }
3323
3324 if (cycle > this_cycle)
3325 break;
3326
3327 last_mode = t->mode;
3328 }
3329
3330 return last_mode;
3331}
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3349{
3350 char buf[32];
3351 unsigned long orig_mask, xfer_mask;
3352 unsigned long pio_mask, mwdma_mask, udma_mask;
3353 int quiet, highbit;
3354
3355 quiet = !!(sel & ATA_DNXFER_QUIET);
3356 sel &= ~ATA_DNXFER_QUIET;
3357
3358 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3359 dev->mwdma_mask,
3360 dev->udma_mask);
3361 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3362
3363 switch (sel) {
3364 case ATA_DNXFER_PIO:
3365 highbit = fls(pio_mask) - 1;
3366 pio_mask &= ~(1 << highbit);
3367 break;
3368
3369 case ATA_DNXFER_DMA:
3370 if (udma_mask) {
3371 highbit = fls(udma_mask) - 1;
3372 udma_mask &= ~(1 << highbit);
3373 if (!udma_mask)
3374 return -ENOENT;
3375 } else if (mwdma_mask) {
3376 highbit = fls(mwdma_mask) - 1;
3377 mwdma_mask &= ~(1 << highbit);
3378 if (!mwdma_mask)
3379 return -ENOENT;
3380 }
3381 break;
3382
3383 case ATA_DNXFER_40C:
3384 udma_mask &= ATA_UDMA_MASK_40C;
3385 break;
3386
3387 case ATA_DNXFER_FORCE_PIO0:
3388 pio_mask &= 1;
3389 case ATA_DNXFER_FORCE_PIO:
3390 mwdma_mask = 0;
3391 udma_mask = 0;
3392 break;
3393
3394 default:
3395 BUG();
3396 }
3397
3398 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3399
3400 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3401 return -ENOENT;
3402
3403 if (!quiet) {
3404 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3405 snprintf(buf, sizeof(buf), "%s:%s",
3406 ata_mode_string(xfer_mask),
3407 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3408 else
3409 snprintf(buf, sizeof(buf), "%s",
3410 ata_mode_string(xfer_mask));
3411
3412 ata_dev_printk(dev, KERN_WARNING,
3413 "limiting speed to %s\n", buf);
3414 }
3415
3416 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3417 &dev->udma_mask);
3418
3419 return 0;
3420}
3421
3422static int ata_dev_set_mode(struct ata_device *dev)
3423{
3424 struct ata_port *ap = dev->link->ap;
3425 struct ata_eh_context *ehc = &dev->link->eh_context;
3426 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3427 const char *dev_err_whine = "";
3428 int ign_dev_err = 0;
3429 unsigned int err_mask = 0;
3430 int rc;
3431
3432 dev->flags &= ~ATA_DFLAG_PIO;
3433 if (dev->xfer_shift == ATA_SHIFT_PIO)
3434 dev->flags |= ATA_DFLAG_PIO;
3435
3436 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3437 dev_err_whine = " (SET_XFERMODE skipped)";
3438 else {
3439 if (nosetxfer)
3440 ata_dev_printk(dev, KERN_WARNING,
3441 "NOSETXFER but PATA detected - can't "
3442 "skip SETXFER, might malfunction\n");
3443 err_mask = ata_dev_set_xfermode(dev);
3444 }
3445
3446 if (err_mask & ~AC_ERR_DEV)
3447 goto fail;
3448
3449
3450 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3451 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3452 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3453 if (rc)
3454 return rc;
3455
3456 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3457
3458 if (ata_id_is_cfa(dev->id))
3459 ign_dev_err = 1;
3460
3461
3462 if (ata_id_major_version(dev->id) == 0 &&
3463 dev->pio_mode <= XFER_PIO_2)
3464 ign_dev_err = 1;
3465
3466
3467
3468 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3469 ign_dev_err = 1;
3470 }
3471
3472
3473 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3474 dev->dma_mode == XFER_MW_DMA_0 &&
3475 (dev->id[63] >> 8) & 1)
3476 ign_dev_err = 1;
3477
3478
3479 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3480 ign_dev_err = 1;
3481
3482 if (err_mask & AC_ERR_DEV) {
3483 if (!ign_dev_err)
3484 goto fail;
3485 else
3486 dev_err_whine = " (device error ignored)";
3487 }
3488
3489 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3490 dev->xfer_shift, (int)dev->xfer_mode);
3491
3492 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3493 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3494 dev_err_whine);
3495
3496 return 0;
3497
3498 fail:
3499 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3500 "(err_mask=0x%x)\n", err_mask);
3501 return -EIO;
3502}
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3522{
3523 struct ata_port *ap = link->ap;
3524 struct ata_device *dev;
3525 int rc = 0, used_dma = 0, found = 0;
3526
3527
3528 ata_for_each_dev(dev, link, ENABLED) {
3529 unsigned long pio_mask, dma_mask;
3530 unsigned int mode_mask;
3531
3532 mode_mask = ATA_DMA_MASK_ATA;
3533 if (dev->class == ATA_DEV_ATAPI)
3534 mode_mask = ATA_DMA_MASK_ATAPI;
3535 else if (ata_id_is_cfa(dev->id))
3536 mode_mask = ATA_DMA_MASK_CFA;
3537
3538 ata_dev_xfermask(dev);
3539 ata_force_xfermask(dev);
3540
3541 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3542 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3543
3544 if (libata_dma_mask & mode_mask)
3545 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3546 else
3547 dma_mask = 0;
3548
3549 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3550 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3551
3552 found = 1;
3553 if (ata_dma_enabled(dev))
3554 used_dma = 1;
3555 }
3556 if (!found)
3557 goto out;
3558
3559
3560 ata_for_each_dev(dev, link, ENABLED) {
3561 if (dev->pio_mode == 0xff) {
3562 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3563 rc = -EINVAL;
3564 goto out;
3565 }
3566
3567 dev->xfer_mode = dev->pio_mode;
3568 dev->xfer_shift = ATA_SHIFT_PIO;
3569 if (ap->ops->set_piomode)
3570 ap->ops->set_piomode(ap, dev);
3571 }
3572
3573
3574 ata_for_each_dev(dev, link, ENABLED) {
3575 if (!ata_dma_enabled(dev))
3576 continue;
3577
3578 dev->xfer_mode = dev->dma_mode;
3579 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3580 if (ap->ops->set_dmamode)
3581 ap->ops->set_dmamode(ap, dev);
3582 }
3583
3584
3585 ata_for_each_dev(dev, link, ENABLED) {
3586 rc = ata_dev_set_mode(dev);
3587 if (rc)
3588 goto out;
3589 }
3590
3591
3592
3593
3594 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3595 ap->host->simplex_claimed = ap;
3596
3597 out:
3598 if (rc)
3599 *r_failed_dev = dev;
3600 return rc;
3601}
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3624 int (*check_ready)(struct ata_link *link))
3625{
3626 unsigned long start = jiffies;
3627 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3628 int warned = 0;
3629
3630
3631
3632
3633
3634 WARN_ON(link == link->ap->slave_link);
3635
3636 if (time_after(nodev_deadline, deadline))
3637 nodev_deadline = deadline;
3638
3639 while (1) {
3640 unsigned long now = jiffies;
3641 int ready, tmp;
3642
3643 ready = tmp = check_ready(link);
3644 if (ready > 0)
3645 return 0;
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658 if (ready == -ENODEV) {
3659 if (ata_link_online(link))
3660 ready = 0;
3661 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3662 !ata_link_offline(link) &&
3663 time_before(now, nodev_deadline))
3664 ready = 0;
3665 }
3666
3667 if (ready)
3668 return ready;
3669 if (time_after(now, deadline))
3670 return -EBUSY;
3671
3672 if (!warned && time_after(now, start + 5 * HZ) &&
3673 (deadline - now > 3 * HZ)) {
3674 ata_link_printk(link, KERN_WARNING,
3675 "link is slow to respond, please be patient "
3676 "(ready=%d)\n", tmp);
3677 warned = 1;
3678 }
3679
3680 msleep(50);
3681 }
3682}
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3699 int (*check_ready)(struct ata_link *link))
3700{
3701 msleep(ATA_WAIT_AFTER_RESET);
3702
3703 return ata_wait_ready(link, deadline, check_ready);
3704}
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3729 unsigned long deadline)
3730{
3731 unsigned long interval = params[0];
3732 unsigned long duration = params[1];
3733 unsigned long last_jiffies, t;
3734 u32 last, cur;
3735 int rc;
3736
3737 t = ata_deadline(jiffies, params[2]);
3738 if (time_before(t, deadline))
3739 deadline = t;
3740
3741 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3742 return rc;
3743 cur &= 0xf;
3744
3745 last = cur;
3746 last_jiffies = jiffies;
3747
3748 while (1) {
3749 msleep(interval);
3750 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3751 return rc;
3752 cur &= 0xf;
3753
3754
3755 if (cur == last) {
3756 if (cur == 1 && time_before(jiffies, deadline))
3757 continue;
3758 if (time_after(jiffies,
3759 ata_deadline(last_jiffies, duration)))
3760 return 0;
3761 continue;
3762 }
3763
3764
3765 last = cur;
3766 last_jiffies = jiffies;
3767
3768
3769
3770
3771 if (time_after(jiffies, deadline))
3772 return -EPIPE;
3773 }
3774}
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790int sata_link_resume(struct ata_link *link, const unsigned long *params,
3791 unsigned long deadline)
3792{
3793 u32 scontrol, serror;
3794 int rc;
3795
3796 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3797 return rc;
3798
3799 scontrol = (scontrol & 0x0f0) | 0x300;
3800
3801 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3802 return rc;
3803
3804
3805
3806
3807 msleep(200);
3808
3809 if ((rc = sata_link_debounce(link, params, deadline)))
3810 return rc;
3811
3812
3813 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3814 rc = sata_scr_write(link, SCR_ERROR, serror);
3815
3816 return rc != -EINVAL ? rc : 0;
3817}
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3837{
3838 struct ata_port *ap = link->ap;
3839 struct ata_eh_context *ehc = &link->eh_context;
3840 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3841 int rc;
3842
3843
3844 if (ehc->i.action & ATA_EH_HARDRESET)
3845 return 0;
3846
3847
3848 if (ap->flags & ATA_FLAG_SATA) {
3849 rc = sata_link_resume(link, timing, deadline);
3850
3851 if (rc && rc != -EOPNOTSUPP)
3852 ata_link_printk(link, KERN_WARNING, "failed to resume "
3853 "link for reset (errno=%d)\n", rc);
3854 }
3855
3856
3857 if (ata_phys_link_offline(link))
3858 ehc->i.action &= ~ATA_EH_SOFTRESET;
3859
3860 return 0;
3861}
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3888 unsigned long deadline,
3889 bool *online, int (*check_ready)(struct ata_link *))
3890{
3891 u32 scontrol;
3892 int rc;
3893
3894 DPRINTK("ENTER\n");
3895
3896 if (online)
3897 *online = false;
3898
3899 if (sata_set_spd_needed(link)) {
3900
3901
3902
3903
3904
3905 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3906 goto out;
3907
3908 scontrol = (scontrol & 0x0f0) | 0x304;
3909
3910 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3911 goto out;
3912
3913 sata_set_spd(link);
3914 }
3915
3916
3917 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3918 goto out;
3919
3920 scontrol = (scontrol & 0x0f0) | 0x301;
3921
3922 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3923 goto out;
3924
3925
3926
3927
3928 msleep(1);
3929
3930
3931 rc = sata_link_resume(link, timing, deadline);
3932 if (rc)
3933 goto out;
3934
3935 if (ata_phys_link_offline(link))
3936 goto out;
3937
3938
3939 if (online)
3940 *online = true;
3941
3942 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3943
3944
3945
3946
3947
3948 if (check_ready) {
3949 unsigned long pmp_deadline;
3950
3951 pmp_deadline = ata_deadline(jiffies,
3952 ATA_TMOUT_PMP_SRST_WAIT);
3953 if (time_after(pmp_deadline, deadline))
3954 pmp_deadline = deadline;
3955 ata_wait_ready(link, pmp_deadline, check_ready);
3956 }
3957 rc = -EAGAIN;
3958 goto out;
3959 }
3960
3961 rc = 0;
3962 if (check_ready)
3963 rc = ata_wait_ready(link, deadline, check_ready);
3964 out:
3965 if (rc && rc != -EAGAIN) {
3966
3967 if (online)
3968 *online = false;
3969 ata_link_printk(link, KERN_ERR,
3970 "COMRESET failed (errno=%d)\n", rc);
3971 }
3972 DPRINTK("EXIT, rc=%d\n", rc);
3973 return rc;
3974}
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3991 unsigned long deadline)
3992{
3993 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3994 bool online;
3995 int rc;
3996
3997
3998 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3999 return online ? -EAGAIN : rc;
4000}
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4015{
4016 u32 serror;
4017
4018 DPRINTK("ENTER\n");
4019
4020
4021 if (!sata_scr_read(link, SCR_ERROR, &serror))
4022 sata_scr_write(link, SCR_ERROR, serror);
4023
4024
4025 sata_print_link_status(link);
4026
4027 DPRINTK("EXIT\n");
4028}
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4047 const u16 *new_id)
4048{
4049 const u16 *old_id = dev->id;
4050 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4051 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4052
4053 if (dev->class != new_class) {
4054 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4055 dev->class, new_class);
4056 return 0;
4057 }
4058
4059 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4060 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4061 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4062 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4063
4064 if (strcmp(model[0], model[1])) {
4065 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4066 "'%s' != '%s'\n", model[0], model[1]);
4067 return 0;
4068 }
4069
4070 if (strcmp(serial[0], serial[1])) {
4071 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4072 "'%s' != '%s'\n", serial[0], serial[1]);
4073 return 0;
4074 }
4075
4076 return 1;
4077}
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4094{
4095 unsigned int class = dev->class;
4096 u16 *id = (void *)dev->link->ap->sector_buf;
4097 int rc;
4098
4099
4100 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4101 if (rc)
4102 return rc;
4103
4104
4105 if (!ata_dev_same_device(dev, class, id))
4106 return -ENODEV;
4107
4108 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4109 return 0;
4110}
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4128 unsigned int readid_flags)
4129{
4130 u64 n_sectors = dev->n_sectors;
4131 u64 n_native_sectors = dev->n_native_sectors;
4132 int rc;
4133
4134 if (!ata_dev_enabled(dev))
4135 return -ENODEV;
4136
4137
4138 if (ata_class_enabled(new_class) &&
4139 new_class != ATA_DEV_ATA &&
4140 new_class != ATA_DEV_ATAPI &&
4141 new_class != ATA_DEV_SEMB) {
4142 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4143 dev->class, new_class);
4144 rc = -ENODEV;
4145 goto fail;
4146 }
4147
4148
4149 rc = ata_dev_reread_id(dev, readid_flags);
4150 if (rc)
4151 goto fail;
4152
4153
4154 rc = ata_dev_configure(dev);
4155 if (rc)
4156 goto fail;
4157
4158
4159 if (dev->class == ATA_DEV_ATA && n_sectors &&
4160 dev->n_sectors != n_sectors) {
4161 ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch "
4162 "%llu != %llu\n",
4163 (unsigned long long)n_sectors,
4164 (unsigned long long)dev->n_sectors);
4165
4166
4167
4168
4169
4170 if (dev->n_native_sectors == n_native_sectors &&
4171 dev->n_sectors > n_sectors &&
4172 dev->n_sectors == n_native_sectors) {
4173 ata_dev_printk(dev, KERN_WARNING,
4174 "new n_sectors matches native, probably "
4175 "late HPA unlock, continuing\n");
4176
4177 dev->n_sectors = n_sectors;
4178 } else {
4179
4180 dev->n_native_sectors = n_native_sectors;
4181 dev->n_sectors = n_sectors;
4182 rc = -ENODEV;
4183 goto fail;
4184 }
4185 }
4186
4187 return 0;
4188
4189 fail:
4190 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4191 return rc;
4192}
4193
4194struct ata_blacklist_entry {
4195 const char *model_num;
4196 const char *model_rev;
4197 unsigned long horkage;
4198};
4199
4200static const struct ata_blacklist_entry ata_device_blacklist [] = {
4201
4202 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4203 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4204 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4205 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4206 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4207 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4208 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4209 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4210 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4211 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4212 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4213 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4214 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4215 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4216 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4217 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4218 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4219 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4220 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4221 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4222 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4223 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4224 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4225 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4226 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4227 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4228 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4229 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4230 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4231 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4232
4233 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4234
4235
4236 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4237 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4238
4239
4240
4241
4242
4243 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4244 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4245
4246 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4247
4248 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4249 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4250 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4251 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4252 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4253
4254
4255 { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ |
4256 ATA_HORKAGE_FIRMWARE_WARN },
4257 { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ |
4258 ATA_HORKAGE_FIRMWARE_WARN },
4259 { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ |
4260 ATA_HORKAGE_FIRMWARE_WARN },
4261 { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ |
4262 ATA_HORKAGE_FIRMWARE_WARN },
4263 { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ |
4264 ATA_HORKAGE_FIRMWARE_WARN },
4265
4266 { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ |
4267 ATA_HORKAGE_FIRMWARE_WARN },
4268 { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ |
4269 ATA_HORKAGE_FIRMWARE_WARN },
4270 { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ |
4271 ATA_HORKAGE_FIRMWARE_WARN },
4272 { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ |
4273 ATA_HORKAGE_FIRMWARE_WARN },
4274 { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ |
4275 ATA_HORKAGE_FIRMWARE_WARN },
4276
4277 { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ |
4278 ATA_HORKAGE_FIRMWARE_WARN },
4279 { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ |
4280 ATA_HORKAGE_FIRMWARE_WARN },
4281 { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ |
4282 ATA_HORKAGE_FIRMWARE_WARN },
4283 { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ |
4284 ATA_HORKAGE_FIRMWARE_WARN },
4285 { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ |
4286 ATA_HORKAGE_FIRMWARE_WARN },
4287
4288 { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ |
4289 ATA_HORKAGE_FIRMWARE_WARN },
4290 { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ |
4291 ATA_HORKAGE_FIRMWARE_WARN },
4292 { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ |
4293 ATA_HORKAGE_FIRMWARE_WARN },
4294 { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ |
4295 ATA_HORKAGE_FIRMWARE_WARN },
4296 { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ |
4297 ATA_HORKAGE_FIRMWARE_WARN },
4298
4299 { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ |
4300 ATA_HORKAGE_FIRMWARE_WARN },
4301 { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ |
4302 ATA_HORKAGE_FIRMWARE_WARN },
4303 { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ |
4304 ATA_HORKAGE_FIRMWARE_WARN },
4305 { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ |
4306 ATA_HORKAGE_FIRMWARE_WARN },
4307 { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ |
4308 ATA_HORKAGE_FIRMWARE_WARN },
4309
4310 { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ |
4311 ATA_HORKAGE_FIRMWARE_WARN },
4312 { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ |
4313 ATA_HORKAGE_FIRMWARE_WARN },
4314 { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ |
4315 ATA_HORKAGE_FIRMWARE_WARN },
4316 { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ |
4317 ATA_HORKAGE_FIRMWARE_WARN },
4318 { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ |
4319 ATA_HORKAGE_FIRMWARE_WARN },
4320
4321
4322
4323 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4324 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4325 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4326
4327
4328 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4329 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4330 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4331 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4332
4333
4334 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4335
4336
4337 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4338 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4339 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4340
4341
4342 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4343
4344 { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, },
4345 { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, },
4346 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
4347 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4348 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4349 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
4350
4351
4352 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4353
4354
4355 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4356
4357
4358
4359
4360
4361 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
4362
4363
4364 { }
4365};
4366
4367static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4368{
4369 const char *p;
4370 int len;
4371
4372
4373
4374
4375 p = strchr(patt, wildchar);
4376 if (p && ((*(p + 1)) == 0))
4377 len = p - patt;
4378 else {
4379 len = strlen(name);
4380 if (!len) {
4381 if (!*patt)
4382 return 0;
4383 return -1;
4384 }
4385 }
4386
4387 return strncmp(patt, name, len);
4388}
4389
4390static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4391{
4392 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4393 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4394 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4395
4396 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4397 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4398
4399 while (ad->model_num) {
4400 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4401 if (ad->model_rev == NULL)
4402 return ad->horkage;
4403 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4404 return ad->horkage;
4405 }
4406 ad++;
4407 }
4408 return 0;
4409}
4410
4411static int ata_dma_blacklisted(const struct ata_device *dev)
4412{
4413
4414
4415
4416
4417 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4418 (dev->flags & ATA_DFLAG_CDB_INTR))
4419 return 1;
4420 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4421}
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431static int ata_is_40wire(struct ata_device *dev)
4432{
4433 if (dev->horkage & ATA_HORKAGE_IVB)
4434 return ata_drive_40wire_relaxed(dev->id);
4435 return ata_drive_40wire(dev->id);
4436}
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451static int cable_is_40wire(struct ata_port *ap)
4452{
4453 struct ata_link *link;
4454 struct ata_device *dev;
4455
4456
4457 if (ap->cbl == ATA_CBL_PATA40)
4458 return 1;
4459
4460
4461 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4462 return 0;
4463
4464
4465
4466
4467
4468 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4469 return 0;
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480 ata_for_each_link(link, ap, EDGE) {
4481 ata_for_each_dev(dev, link, ENABLED) {
4482 if (!ata_is_40wire(dev))
4483 return 0;
4484 }
4485 }
4486 return 1;
4487}
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501static void ata_dev_xfermask(struct ata_device *dev)
4502{
4503 struct ata_link *link = dev->link;
4504 struct ata_port *ap = link->ap;
4505 struct ata_host *host = ap->host;
4506 unsigned long xfer_mask;
4507
4508
4509 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4510 ap->mwdma_mask, ap->udma_mask);
4511
4512
4513 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4514 dev->mwdma_mask, dev->udma_mask);
4515 xfer_mask &= ata_id_xfermask(dev->id);
4516
4517
4518
4519
4520
4521 if (ata_dev_pair(dev)) {
4522
4523 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4524
4525 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4526 }
4527
4528 if (ata_dma_blacklisted(dev)) {
4529 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4530 ata_dev_printk(dev, KERN_WARNING,
4531 "device is on DMA blacklist, disabling DMA\n");
4532 }
4533
4534 if ((host->flags & ATA_HOST_SIMPLEX) &&
4535 host->simplex_claimed && host->simplex_claimed != ap) {
4536 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4537 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4538 "other device, disabling DMA\n");
4539 }
4540
4541 if (ap->flags & ATA_FLAG_NO_IORDY)
4542 xfer_mask &= ata_pio_mask_no_iordy(dev);
4543
4544 if (ap->ops->mode_filter)
4545 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4556
4557 if (cable_is_40wire(ap)) {
4558 ata_dev_printk(dev, KERN_WARNING,
4559 "limited to UDMA/33 due to 40-wire cable\n");
4560 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4561 }
4562
4563 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4564 &dev->mwdma_mask, &dev->udma_mask);
4565}
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4582{
4583 struct ata_taskfile tf;
4584 unsigned int err_mask;
4585
4586
4587 DPRINTK("set features - xfer mode\n");
4588
4589
4590
4591
4592 ata_tf_init(dev, &tf);
4593 tf.command = ATA_CMD_SET_FEATURES;
4594 tf.feature = SETFEATURES_XFER;
4595 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4596 tf.protocol = ATA_PROT_NODATA;
4597
4598 if (ata_pio_need_iordy(dev))
4599 tf.nsect = dev->xfer_mode;
4600
4601 else if (ata_id_has_iordy(dev->id))
4602 tf.nsect = 0x01;
4603 else
4604 return 0;
4605
4606 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4607
4608 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4609 return err_mask;
4610}
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4627 u8 feature)
4628{
4629 struct ata_taskfile tf;
4630 unsigned int err_mask;
4631
4632
4633 DPRINTK("set features - SATA features\n");
4634
4635 ata_tf_init(dev, &tf);
4636 tf.command = ATA_CMD_SET_FEATURES;
4637 tf.feature = enable;
4638 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4639 tf.protocol = ATA_PROT_NODATA;
4640 tf.nsect = feature;
4641
4642 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4643
4644 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4645 return err_mask;
4646}
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660static unsigned int ata_dev_init_params(struct ata_device *dev,
4661 u16 heads, u16 sectors)
4662{
4663 struct ata_taskfile tf;
4664 unsigned int err_mask;
4665
4666
4667 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4668 return AC_ERR_INVALID;
4669
4670
4671 DPRINTK("init dev params \n");
4672
4673 ata_tf_init(dev, &tf);
4674 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4675 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4676 tf.protocol = ATA_PROT_NODATA;
4677 tf.nsect = sectors;
4678 tf.device |= (heads - 1) & 0x0f;
4679
4680 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4681
4682
4683
4684 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4685 err_mask = 0;
4686
4687 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4688 return err_mask;
4689}
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700void ata_sg_clean(struct ata_queued_cmd *qc)
4701{
4702 struct ata_port *ap = qc->ap;
4703 struct scatterlist *sg = qc->sg;
4704 int dir = qc->dma_dir;
4705
4706 WARN_ON_ONCE(sg == NULL);
4707
4708 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4709
4710 if (qc->n_elem)
4711 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4712
4713 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4714 qc->sg = NULL;
4715}
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731int atapi_check_dma(struct ata_queued_cmd *qc)
4732{
4733 struct ata_port *ap = qc->ap;
4734
4735
4736
4737
4738 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4739 unlikely(qc->nbytes & 15))
4740 return 1;
4741
4742 if (ap->ops->check_atapi_dma)
4743 return ap->ops->check_atapi_dma(qc);
4744
4745 return 0;
4746}
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763int ata_std_qc_defer(struct ata_queued_cmd *qc)
4764{
4765 struct ata_link *link = qc->dev->link;
4766
4767 if (qc->tf.protocol == ATA_PROT_NCQ) {
4768 if (!ata_tag_valid(link->active_tag))
4769 return 0;
4770 } else {
4771 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4772 return 0;
4773 }
4774
4775 return ATA_DEFER_LINK;
4776}
4777
4778void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4794 unsigned int n_elem)
4795{
4796 qc->sg = sg;
4797 qc->n_elem = n_elem;
4798 qc->cursg = qc->sg;
4799}
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814static int ata_sg_setup(struct ata_queued_cmd *qc)
4815{
4816 struct ata_port *ap = qc->ap;
4817 unsigned int n_elem;
4818
4819 VPRINTK("ENTER, ata%u\n", ap->print_id);
4820
4821 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4822 if (n_elem < 1)
4823 return -1;
4824
4825 DPRINTK("%d sg elements mapped\n", n_elem);
4826 qc->orig_n_elem = qc->n_elem;
4827 qc->n_elem = n_elem;
4828 qc->flags |= ATA_QCFLAG_DMAMAP;
4829
4830 return 0;
4831}
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845void swap_buf_le16(u16 *buf, unsigned int buf_words)
4846{
4847#ifdef __BIG_ENDIAN
4848 unsigned int i;
4849
4850 for (i = 0; i < buf_words; i++)
4851 buf[i] = le16_to_cpu(buf[i]);
4852#endif
4853}
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4864{
4865 struct ata_queued_cmd *qc = NULL;
4866 unsigned int i;
4867
4868
4869 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4870 return NULL;
4871
4872
4873 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4874 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4875 qc = __ata_qc_from_tag(ap, i);
4876 break;
4877 }
4878
4879 if (qc)
4880 qc->tag = i;
4881
4882 return qc;
4883}
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4894{
4895 struct ata_port *ap = dev->link->ap;
4896 struct ata_queued_cmd *qc;
4897
4898 qc = ata_qc_new(ap);
4899 if (qc) {
4900 qc->scsicmd = NULL;
4901 qc->ap = ap;
4902 qc->dev = dev;
4903
4904 ata_qc_reinit(qc);
4905 }
4906
4907 return qc;
4908}
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920void ata_qc_free(struct ata_queued_cmd *qc)
4921{
4922 struct ata_port *ap;
4923 unsigned int tag;
4924
4925 WARN_ON_ONCE(qc == NULL);
4926 ap = qc->ap;
4927
4928 qc->flags = 0;
4929 tag = qc->tag;
4930 if (likely(ata_tag_valid(tag))) {
4931 qc->tag = ATA_TAG_POISON;
4932 clear_bit(tag, &ap->qc_allocated);
4933 }
4934}
4935
4936void __ata_qc_complete(struct ata_queued_cmd *qc)
4937{
4938 struct ata_port *ap;
4939 struct ata_link *link;
4940
4941 WARN_ON_ONCE(qc == NULL);
4942 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4943 ap = qc->ap;
4944 link = qc->dev->link;
4945
4946 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4947 ata_sg_clean(qc);
4948
4949
4950 if (qc->tf.protocol == ATA_PROT_NCQ) {
4951 link->sactive &= ~(1 << qc->tag);
4952 if (!link->sactive)
4953 ap->nr_active_links--;
4954 } else {
4955 link->active_tag = ATA_TAG_POISON;
4956 ap->nr_active_links--;
4957 }
4958
4959
4960 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4961 ap->excl_link == link))
4962 ap->excl_link = NULL;
4963
4964
4965
4966
4967
4968 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4969 ap->qc_active &= ~(1 << qc->tag);
4970
4971
4972 qc->complete_fn(qc);
4973}
4974
4975static void fill_result_tf(struct ata_queued_cmd *qc)
4976{
4977 struct ata_port *ap = qc->ap;
4978
4979 qc->result_tf.flags = qc->tf.flags;
4980 ap->ops->qc_fill_rtf(qc);
4981}
4982
4983static void ata_verify_xfer(struct ata_queued_cmd *qc)
4984{
4985 struct ata_device *dev = qc->dev;
4986
4987 if (ata_tag_internal(qc->tag))
4988 return;
4989
4990 if (ata_is_nodata(qc->tf.protocol))
4991 return;
4992
4993 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4994 return;
4995
4996 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4997}
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008
5009void ata_qc_complete(struct ata_queued_cmd *qc)
5010{
5011 struct ata_port *ap = qc->ap;
5012
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026 if (ap->ops->error_handler) {
5027 struct ata_device *dev = qc->dev;
5028 struct ata_eh_info *ehi = &dev->link->eh_info;
5029
5030 if (unlikely(qc->err_mask))
5031 qc->flags |= ATA_QCFLAG_FAILED;
5032
5033 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5034
5035 fill_result_tf(qc);
5036
5037 if (!ata_tag_internal(qc->tag))
5038 ata_qc_schedule_eh(qc);
5039 else
5040 __ata_qc_complete(qc);
5041 return;
5042 }
5043
5044 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5045
5046
5047 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5048 fill_result_tf(qc);
5049
5050
5051
5052
5053 switch (qc->tf.command) {
5054 case ATA_CMD_SET_FEATURES:
5055 if (qc->tf.feature != SETFEATURES_WC_ON &&
5056 qc->tf.feature != SETFEATURES_WC_OFF)
5057 break;
5058
5059 case ATA_CMD_INIT_DEV_PARAMS:
5060 case ATA_CMD_SET_MULTI:
5061
5062 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5063 ata_port_schedule_eh(ap);
5064 break;
5065
5066 case ATA_CMD_SLEEP:
5067 dev->flags |= ATA_DFLAG_SLEEPING;
5068 break;
5069 }
5070
5071 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5072 ata_verify_xfer(qc);
5073
5074 __ata_qc_complete(qc);
5075 } else {
5076 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5077 return;
5078
5079
5080 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5081 fill_result_tf(qc);
5082
5083 __ata_qc_complete(qc);
5084 }
5085}
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5104{
5105 int nr_done = 0;
5106 u32 done_mask;
5107
5108 done_mask = ap->qc_active ^ qc_active;
5109
5110 if (unlikely(done_mask & qc_active)) {
5111 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5112 "(%08x->%08x)\n", ap->qc_active, qc_active);
5113 return -EINVAL;
5114 }
5115
5116 while (done_mask) {
5117 struct ata_queued_cmd *qc;
5118 unsigned int tag = __ffs(done_mask);
5119
5120 qc = ata_qc_from_tag(ap, tag);
5121 if (qc) {
5122 ata_qc_complete(qc);
5123 nr_done++;
5124 }
5125 done_mask &= ~(1 << tag);
5126 }
5127
5128 return nr_done;
5129}
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143void ata_qc_issue(struct ata_queued_cmd *qc)
5144{
5145 struct ata_port *ap = qc->ap;
5146 struct ata_link *link = qc->dev->link;
5147 u8 prot = qc->tf.protocol;
5148
5149
5150
5151
5152
5153 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5154
5155 if (ata_is_ncq(prot)) {
5156 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5157
5158 if (!link->sactive)
5159 ap->nr_active_links++;
5160 link->sactive |= 1 << qc->tag;
5161 } else {
5162 WARN_ON_ONCE(link->sactive);
5163
5164 ap->nr_active_links++;
5165 link->active_tag = qc->tag;
5166 }
5167
5168 qc->flags |= ATA_QCFLAG_ACTIVE;
5169 ap->qc_active |= 1 << qc->tag;
5170
5171
5172
5173
5174 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
5175
5176 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5177 (ap->flags & ATA_FLAG_PIO_DMA)))
5178 if (ata_sg_setup(qc))
5179 goto sg_err;
5180
5181
5182 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5183 link->eh_info.action |= ATA_EH_RESET;
5184 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5185 ata_link_abort(link);
5186 return;
5187 }
5188
5189 ap->ops->qc_prep(qc);
5190
5191 qc->err_mask |= ap->ops->qc_issue(qc);
5192 if (unlikely(qc->err_mask))
5193 goto err;
5194 return;
5195
5196sg_err:
5197 qc->err_mask |= AC_ERR_SYSTEM;
5198err:
5199 ata_qc_complete(qc);
5200}
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214int sata_scr_valid(struct ata_link *link)
5215{
5216 struct ata_port *ap = link->ap;
5217
5218 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5219}
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5238{
5239 if (ata_is_host_link(link)) {
5240 if (sata_scr_valid(link))
5241 return link->ap->ops->scr_read(link, reg, val);
5242 return -EOPNOTSUPP;
5243 }
5244
5245 return sata_pmp_scr_read(link, reg, val);
5246}
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264int sata_scr_write(struct ata_link *link, int reg, u32 val)
5265{
5266 if (ata_is_host_link(link)) {
5267 if (sata_scr_valid(link))
5268 return link->ap->ops->scr_write(link, reg, val);
5269 return -EOPNOTSUPP;
5270 }
5271
5272 return sata_pmp_scr_write(link, reg, val);
5273}
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5291{
5292 if (ata_is_host_link(link)) {
5293 int rc;
5294
5295 if (sata_scr_valid(link)) {
5296 rc = link->ap->ops->scr_write(link, reg, val);
5297 if (rc == 0)
5298 rc = link->ap->ops->scr_read(link, reg, &val);
5299 return rc;
5300 }
5301 return -EOPNOTSUPP;
5302 }
5303
5304 return sata_pmp_scr_write(link, reg, val);
5305}
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321bool ata_phys_link_online(struct ata_link *link)
5322{
5323 u32 sstatus;
5324
5325 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5326 ata_sstatus_online(sstatus))
5327 return true;
5328 return false;
5329}
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345bool ata_phys_link_offline(struct ata_link *link)
5346{
5347 u32 sstatus;
5348
5349 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5350 !ata_sstatus_online(sstatus))
5351 return true;
5352 return false;
5353}
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367
5368
5369
5370
5371bool ata_link_online(struct ata_link *link)
5372{
5373 struct ata_link *slave = link->ap->slave_link;
5374
5375 WARN_ON(link == slave);
5376
5377 return ata_phys_link_online(link) ||
5378 (slave && ata_phys_link_online(slave));
5379}
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397bool ata_link_offline(struct ata_link *link)
5398{
5399 struct ata_link *slave = link->ap->slave_link;
5400
5401 WARN_ON(link == slave);
5402
5403 return ata_phys_link_offline(link) &&
5404 (!slave || ata_phys_link_offline(slave));
5405}
5406
5407#ifdef CONFIG_PM
5408static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5409 unsigned int action, unsigned int ehi_flags,
5410 int wait)
5411{
5412 unsigned long flags;
5413 int i, rc;
5414
5415 for (i = 0; i < host->n_ports; i++) {
5416 struct ata_port *ap = host->ports[i];
5417 struct ata_link *link;
5418
5419
5420
5421
5422 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5423 ata_port_wait_eh(ap);
5424 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5425 }
5426
5427
5428 spin_lock_irqsave(ap->lock, flags);
5429
5430 ap->pm_mesg = mesg;
5431 if (wait) {
5432 rc = 0;
5433 ap->pm_result = &rc;
5434 }
5435
5436 ap->pflags |= ATA_PFLAG_PM_PENDING;
5437 ata_for_each_link(link, ap, HOST_FIRST) {
5438 link->eh_info.action |= action;
5439 link->eh_info.flags |= ehi_flags;
5440 }
5441
5442 ata_port_schedule_eh(ap);
5443
5444 spin_unlock_irqrestore(ap->lock, flags);
5445
5446
5447 if (wait) {
5448 ata_port_wait_eh(ap);
5449 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5450 if (rc)
5451 return rc;
5452 }
5453 }
5454
5455 return 0;
5456}
5457
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5474{
5475 int rc;
5476
5477
5478
5479
5480
5481 ata_lpm_enable(host);
5482
5483 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5484 if (rc == 0)
5485 host->dev->power.power_state = mesg;
5486 return rc;
5487}
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500void ata_host_resume(struct ata_host *host)
5501{
5502 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5503 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5504 host->dev->power.power_state = PMSG_ON;
5505
5506
5507 ata_lpm_disable(host);
5508}
5509#endif
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523int ata_port_start(struct ata_port *ap)
5524{
5525 struct device *dev = ap->dev;
5526
5527 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5528 GFP_KERNEL);
5529 if (!ap->prd)
5530 return -ENOMEM;
5531
5532 return 0;
5533}
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544void ata_dev_init(struct ata_device *dev)
5545{
5546 struct ata_link *link = ata_dev_phys_link(dev);
5547 struct ata_port *ap = link->ap;
5548 unsigned long flags;
5549
5550
5551 link->sata_spd_limit = link->hw_sata_spd_limit;
5552 link->sata_spd = 0;
5553
5554
5555
5556
5557
5558 spin_lock_irqsave(ap->lock, flags);
5559 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5560 dev->horkage = 0;
5561 spin_unlock_irqrestore(ap->lock, flags);
5562
5563 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5564 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5565 dev->pio_mask = UINT_MAX;
5566 dev->mwdma_mask = UINT_MAX;
5567 dev->udma_mask = UINT_MAX;
5568}
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578
5579
5580
5581void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5582{
5583 int i;
5584
5585
5586 memset(link, 0, offsetof(struct ata_link, device[0]));
5587
5588 link->ap = ap;
5589 link->pmp = pmp;
5590 link->active_tag = ATA_TAG_POISON;
5591 link->hw_sata_spd_limit = UINT_MAX;
5592
5593
5594 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5595 struct ata_device *dev = &link->device[i];
5596
5597 dev->link = link;
5598 dev->devno = dev - link->device;
5599#ifdef CONFIG_ATA_ACPI
5600 dev->gtf_filter = ata_acpi_gtf_filter;
5601#endif
5602 ata_dev_init(dev);
5603 }
5604}
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619int sata_link_init_spd(struct ata_link *link)
5620{
5621 u8 spd;
5622 int rc;
5623
5624 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5625 if (rc)
5626 return rc;
5627
5628 spd = (link->saved_scontrol >> 4) & 0xf;
5629 if (spd)
5630 link->hw_sata_spd_limit &= (1 << spd) - 1;
5631
5632 ata_force_link_limits(link);
5633
5634 link->sata_spd_limit = link->hw_sata_spd_limit;
5635
5636 return 0;
5637}
5638
5639
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650
5651struct ata_port *ata_port_alloc(struct ata_host *host)
5652{
5653 struct ata_port *ap;
5654
5655 DPRINTK("ENTER\n");
5656
5657 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5658 if (!ap)
5659 return NULL;
5660
5661 ap->pflags |= ATA_PFLAG_INITIALIZING;
5662 ap->lock = &host->lock;
5663 ap->flags = ATA_FLAG_DISABLED;
5664 ap->print_id = -1;
5665 ap->ctl = ATA_DEVCTL_OBS;
5666 ap->host = host;
5667 ap->dev = host->dev;
5668 ap->last_ctl = 0xFF;
5669
5670#if defined(ATA_VERBOSE_DEBUG)
5671
5672 ap->msg_enable = 0x00FF;
5673#elif defined(ATA_DEBUG)
5674 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5675#else
5676 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5677#endif
5678
5679#ifdef CONFIG_ATA_SFF
5680 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5681#else
5682 INIT_DELAYED_WORK(&ap->port_task, NULL);
5683#endif
5684 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5685 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5686 INIT_LIST_HEAD(&ap->eh_done_q);
5687 init_waitqueue_head(&ap->eh_wait_q);
5688 init_completion(&ap->park_req_pending);
5689 init_timer_deferrable(&ap->fastdrain_timer);
5690 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5691 ap->fastdrain_timer.data = (unsigned long)ap;
5692
5693 ap->cbl = ATA_CBL_NONE;
5694
5695 ata_link_init(ap, &ap->link, 0);
5696
5697#ifdef ATA_IRQ_TRAP
5698 ap->stats.unhandled_irq = 1;
5699 ap->stats.idle_irq = 1;
5700#endif
5701 return ap;
5702}
5703
5704static void ata_host_release(struct device *gendev, void *res)
5705{
5706 struct ata_host *host = dev_get_drvdata(gendev);
5707 int i;
5708
5709 for (i = 0; i < host->n_ports; i++) {
5710 struct ata_port *ap = host->ports[i];
5711
5712 if (!ap)
5713 continue;
5714
5715 if (ap->scsi_host)
5716 scsi_host_put(ap->scsi_host);
5717
5718 kfree(ap->pmp_link);
5719 kfree(ap->slave_link);
5720 kfree(ap);
5721 host->ports[i] = NULL;
5722 }
5723
5724 dev_set_drvdata(gendev, NULL);
5725}
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5748{
5749 struct ata_host *host;
5750 size_t sz;
5751 int i;
5752
5753 DPRINTK("ENTER\n");
5754
5755 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5756 return NULL;
5757
5758
5759 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5760
5761 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5762 if (!host)
5763 goto err_out;
5764
5765 devres_add(dev, host);
5766 dev_set_drvdata(dev, host);
5767
5768 spin_lock_init(&host->lock);
5769 host->dev = dev;
5770 host->n_ports = max_ports;
5771
5772
5773 for (i = 0; i < max_ports; i++) {
5774 struct ata_port *ap;
5775
5776 ap = ata_port_alloc(host);
5777 if (!ap)
5778 goto err_out;
5779
5780 ap->port_no = i;
5781 host->ports[i] = ap;
5782 }
5783
5784 devres_remove_group(dev, NULL);
5785 return host;
5786
5787 err_out:
5788 devres_release_group(dev, NULL);
5789 return NULL;
5790}
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5809 const struct ata_port_info * const * ppi,
5810 int n_ports)
5811{
5812 const struct ata_port_info *pi;
5813 struct ata_host *host;
5814 int i, j;
5815
5816 host = ata_host_alloc(dev, n_ports);
5817 if (!host)
5818 return NULL;
5819
5820 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5821 struct ata_port *ap = host->ports[i];
5822
5823 if (ppi[j])
5824 pi = ppi[j++];
5825
5826 ap->pio_mask = pi->pio_mask;
5827 ap->mwdma_mask = pi->mwdma_mask;
5828 ap->udma_mask = pi->udma_mask;
5829 ap->flags |= pi->flags;
5830 ap->link.flags |= pi->link_flags;
5831 ap->ops = pi->port_ops;
5832
5833 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5834 host->ops = pi->port_ops;
5835 }
5836
5837 return host;
5838}
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878
5879
5880
5881
5882
5883
5884
5885
5886int ata_slave_link_init(struct ata_port *ap)
5887{
5888 struct ata_link *link;
5889
5890 WARN_ON(ap->slave_link);
5891 WARN_ON(ap->flags & ATA_FLAG_PMP);
5892
5893 link = kzalloc(sizeof(*link), GFP_KERNEL);
5894 if (!link)
5895 return -ENOMEM;
5896
5897 ata_link_init(ap, link, 1);
5898 ap->slave_link = link;
5899 return 0;
5900}
5901
5902static void ata_host_stop(struct device *gendev, void *res)
5903{
5904 struct ata_host *host = dev_get_drvdata(gendev);
5905 int i;
5906
5907 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5908
5909 for (i = 0; i < host->n_ports; i++) {
5910 struct ata_port *ap = host->ports[i];
5911
5912 if (ap->ops->port_stop)
5913 ap->ops->port_stop(ap);
5914 }
5915
5916 if (host->ops->host_stop)
5917 host->ops->host_stop(host);
5918}
5919
5920
5921
5922
5923
5924
5925
5926
5927
5928
5929
5930
5931
5932
5933
5934
5935
5936
5937
5938
5939
5940static void ata_finalize_port_ops(struct ata_port_operations *ops)
5941{
5942 static DEFINE_SPINLOCK(lock);
5943 const struct ata_port_operations *cur;
5944 void **begin = (void **)ops;
5945 void **end = (void **)&ops->inherits;
5946 void **pp;
5947
5948 if (!ops || !ops->inherits)
5949 return;
5950
5951 spin_lock(&lock);
5952
5953 for (cur = ops->inherits; cur; cur = cur->inherits) {
5954 void **inherit = (void **)cur;
5955
5956 for (pp = begin; pp < end; pp++, inherit++)
5957 if (!*pp)
5958 *pp = *inherit;
5959 }
5960
5961 for (pp = begin; pp < end; pp++)
5962 if (IS_ERR(*pp))
5963 *pp = NULL;
5964
5965 ops->inherits = NULL;
5966
5967 spin_unlock(&lock);
5968}
5969
5970
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980
5981
5982
5983
5984
5985
5986int ata_host_start(struct ata_host *host)
5987{
5988 int have_stop = 0;
5989 void *start_dr = NULL;
5990 int i, rc;
5991
5992 if (host->flags & ATA_HOST_STARTED)
5993 return 0;
5994
5995 ata_finalize_port_ops(host->ops);
5996
5997 for (i = 0; i < host->n_ports; i++) {
5998 struct ata_port *ap = host->ports[i];
5999
6000 ata_finalize_port_ops(ap->ops);
6001
6002 if (!host->ops && !ata_port_is_dummy(ap))
6003 host->ops = ap->ops;
6004
6005 if (ap->ops->port_stop)
6006 have_stop = 1;
6007 }
6008
6009 if (host->ops->host_stop)
6010 have_stop = 1;
6011
6012 if (have_stop) {
6013 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6014 if (!start_dr)
6015 return -ENOMEM;
6016 }
6017
6018 for (i = 0; i < host->n_ports; i++) {
6019 struct ata_port *ap = host->ports[i];
6020
6021 if (ap->ops->port_start) {
6022 rc = ap->ops->port_start(ap);
6023 if (rc) {
6024 if (rc != -ENODEV)
6025 dev_printk(KERN_ERR, host->dev,
6026 "failed to start port %d "
6027 "(errno=%d)\n", i, rc);
6028 goto err_out;
6029 }
6030 }
6031 ata_eh_freeze_port(ap);
6032 }
6033
6034 if (start_dr)
6035 devres_add(host->dev, start_dr);
6036 host->flags |= ATA_HOST_STARTED;
6037 return 0;
6038
6039 err_out:
6040 while (--i >= 0) {
6041 struct ata_port *ap = host->ports[i];
6042
6043 if (ap->ops->port_stop)
6044 ap->ops->port_stop(ap);
6045 }
6046 devres_free(start_dr);
6047 return rc;
6048}
6049
6050
6051
6052
6053
6054
6055
6056
6057
6058
6059
6060
6061
6062void ata_host_init(struct ata_host *host, struct device *dev,
6063 unsigned long flags, struct ata_port_operations *ops)
6064{
6065 spin_lock_init(&host->lock);
6066 host->dev = dev;
6067 host->flags = flags;
6068 host->ops = ops;
6069}
6070
6071
6072static void async_port_probe(void *data, async_cookie_t cookie)
6073{
6074 int rc;
6075 struct ata_port *ap = data;
6076
6077
6078
6079
6080
6081
6082
6083
6084 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6085 async_synchronize_cookie(cookie);
6086
6087
6088 if (ap->ops->error_handler) {
6089 struct ata_eh_info *ehi = &ap->link.eh_info;
6090 unsigned long flags;
6091
6092 ata_port_probe(ap);
6093
6094
6095 spin_lock_irqsave(ap->lock, flags);
6096
6097 ehi->probe_mask |= ATA_ALL_DEVICES;
6098 ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
6099 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6100
6101 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6102 ap->pflags |= ATA_PFLAG_LOADING;
6103 ata_port_schedule_eh(ap);
6104
6105 spin_unlock_irqrestore(ap->lock, flags);
6106
6107
6108 ata_port_wait_eh(ap);
6109 } else {
6110 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6111 rc = ata_bus_probe(ap);
6112 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6113
6114 if (rc) {
6115
6116
6117
6118
6119
6120
6121 }
6122 }
6123
6124
6125 async_synchronize_cookie(cookie);
6126
6127 ata_scsi_scan_host(ap, 1);
6128
6129}
6130
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6147{
6148 int i, rc;
6149
6150
6151 if (!(host->flags & ATA_HOST_STARTED)) {
6152 dev_printk(KERN_ERR, host->dev,
6153 "BUG: trying to register unstarted host\n");
6154 WARN_ON(1);
6155 return -EINVAL;
6156 }
6157
6158
6159
6160
6161
6162 for (i = host->n_ports; host->ports[i]; i++)
6163 kfree(host->ports[i]);
6164
6165
6166 for (i = 0; i < host->n_ports; i++)
6167 host->ports[i]->print_id = ata_print_id++;
6168
6169 rc = ata_scsi_add_hosts(host, sht);
6170 if (rc)
6171 return rc;
6172
6173
6174 ata_acpi_associate(host);
6175
6176
6177 for (i = 0; i < host->n_ports; i++) {
6178 struct ata_port *ap = host->ports[i];
6179 unsigned long xfer_mask;
6180
6181
6182 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6183 ap->cbl = ATA_CBL_SATA;
6184
6185
6186 sata_link_init_spd(&ap->link);
6187 if (ap->slave_link)
6188 sata_link_init_spd(ap->slave_link);
6189
6190
6191 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6192 ap->udma_mask);
6193
6194 if (!ata_port_is_dummy(ap)) {
6195 ata_port_printk(ap, KERN_INFO,
6196 "%cATA max %s %s\n",
6197 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6198 ata_mode_string(xfer_mask),
6199 ap->link.eh_info.desc);
6200 ata_ehi_clear_desc(&ap->link.eh_info);
6201 } else
6202 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6203 }
6204
6205
6206 for (i = 0; i < host->n_ports; i++) {
6207 struct ata_port *ap = host->ports[i];
6208 async_schedule(async_port_probe, ap);
6209 }
6210
6211 return 0;
6212}
6213
6214
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236
6237int ata_host_activate(struct ata_host *host, int irq,
6238 irq_handler_t irq_handler, unsigned long irq_flags,
6239 struct scsi_host_template *sht)
6240{
6241 int i, rc;
6242
6243 rc = ata_host_start(host);
6244 if (rc)
6245 return rc;
6246
6247
6248 if (!irq) {
6249 WARN_ON(irq_handler);
6250 return ata_host_register(host, sht);
6251 }
6252
6253 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6254 dev_driver_string(host->dev), host);
6255 if (rc)
6256 return rc;
6257
6258 for (i = 0; i < host->n_ports; i++)
6259 ata_port_desc(host->ports[i], "irq %d", irq);
6260
6261 rc = ata_host_register(host, sht);
6262
6263 if (rc)
6264 devm_free_irq(host->dev, irq, host);
6265
6266 return rc;
6267}
6268
6269
6270
6271
6272
6273
6274
6275
6276
6277
6278
6279
6280static void ata_port_detach(struct ata_port *ap)
6281{
6282 unsigned long flags;
6283
6284 if (!ap->ops->error_handler)
6285 goto skip_eh;
6286
6287
6288 spin_lock_irqsave(ap->lock, flags);
6289 ap->pflags |= ATA_PFLAG_UNLOADING;
6290 ata_port_schedule_eh(ap);
6291 spin_unlock_irqrestore(ap->lock, flags);
6292
6293
6294 ata_port_wait_eh(ap);
6295
6296
6297 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6298
6299 cancel_rearming_delayed_work(&ap->hotplug_task);
6300
6301 skip_eh:
6302
6303 scsi_remove_host(ap->scsi_host);
6304}
6305
6306
6307
6308
6309
6310
6311
6312
6313
6314
6315void ata_host_detach(struct ata_host *host)
6316{
6317 int i;
6318
6319 for (i = 0; i < host->n_ports; i++)
6320 ata_port_detach(host->ports[i]);
6321
6322
6323 ata_acpi_dissociate(host);
6324}
6325
6326#ifdef CONFIG_PCI
6327
6328
6329
6330
6331
6332
6333
6334
6335
6336
6337
6338
6339void ata_pci_remove_one(struct pci_dev *pdev)
6340{
6341 struct device *dev = &pdev->dev;
6342 struct ata_host *host = dev_get_drvdata(dev);
6343
6344 ata_host_detach(host);
6345}
6346
6347
6348int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6349{
6350 unsigned long tmp = 0;
6351
6352 switch (bits->width) {
6353 case 1: {
6354 u8 tmp8 = 0;
6355 pci_read_config_byte(pdev, bits->reg, &tmp8);
6356 tmp = tmp8;
6357 break;
6358 }
6359 case 2: {
6360 u16 tmp16 = 0;
6361 pci_read_config_word(pdev, bits->reg, &tmp16);
6362 tmp = tmp16;
6363 break;
6364 }
6365 case 4: {
6366 u32 tmp32 = 0;
6367 pci_read_config_dword(pdev, bits->reg, &tmp32);
6368 tmp = tmp32;
6369 break;
6370 }
6371
6372 default:
6373 return -EINVAL;
6374 }
6375
6376 tmp &= bits->mask;
6377
6378 return (tmp == bits->val) ? 1 : 0;
6379}
6380
6381#ifdef CONFIG_PM
6382void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6383{
6384 pci_save_state(pdev);
6385 pci_disable_device(pdev);
6386
6387 if (mesg.event & PM_EVENT_SLEEP)
6388 pci_set_power_state(pdev, PCI_D3hot);
6389}
6390
6391int ata_pci_device_do_resume(struct pci_dev *pdev)
6392{
6393 int rc;
6394
6395 pci_set_power_state(pdev, PCI_D0);
6396 pci_restore_state(pdev);
6397
6398 rc = pcim_enable_device(pdev);
6399 if (rc) {
6400 dev_printk(KERN_ERR, &pdev->dev,
6401 "failed to enable device after resume (%d)\n", rc);
6402 return rc;
6403 }
6404
6405 pci_set_master(pdev);
6406 return 0;
6407}
6408
6409int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6410{
6411 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6412 int rc = 0;
6413
6414 rc = ata_host_suspend(host, mesg);
6415 if (rc)
6416 return rc;
6417
6418 ata_pci_device_do_suspend(pdev, mesg);
6419
6420 return 0;
6421}
6422
6423int ata_pci_device_resume(struct pci_dev *pdev)
6424{
6425 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6426 int rc;
6427
6428 rc = ata_pci_device_do_resume(pdev);
6429 if (rc == 0)
6430 ata_host_resume(host);
6431 return rc;
6432}
6433#endif
6434
6435#endif
6436
6437static int __init ata_parse_force_one(char **cur,
6438 struct ata_force_ent *force_ent,
6439 const char **reason)
6440{
6441
6442
6443
6444
6445
6446 static struct ata_force_param force_tbl[] __initdata = {
6447 { "40c", .cbl = ATA_CBL_PATA40 },
6448 { "80c", .cbl = ATA_CBL_PATA80 },
6449 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6450 { "unk", .cbl = ATA_CBL_PATA_UNK },
6451 { "ign", .cbl = ATA_CBL_PATA_IGN },
6452 { "sata", .cbl = ATA_CBL_SATA },
6453 { "1.5Gbps", .spd_limit = 1 },
6454 { "3.0Gbps", .spd_limit = 2 },
6455 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6456 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6457 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6458 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6459 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6460 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6461 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6462 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6463 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6464 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6465 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6466 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6467 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6468 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6469 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6470 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6471 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6472 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6473 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6474 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6475 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6476 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6477 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6478 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6479 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6480 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6481 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6482 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6483 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6484 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6485 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6486 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6487 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6488 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6489 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6490 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6491 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6492 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6493 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6494 };
6495 char *start = *cur, *p = *cur;
6496 char *id, *val, *endp;
6497 const struct ata_force_param *match_fp = NULL;
6498 int nr_matches = 0, i;
6499
6500
6501 while (*p != '\0' && *p != ',')
6502 p++;
6503
6504 if (*p == '\0')
6505 *cur = p;
6506 else
6507 *cur = p + 1;
6508
6509 *p = '\0';
6510
6511
6512 p = strchr(start, ':');
6513 if (!p) {
6514 val = strstrip(start);
6515 goto parse_val;
6516 }
6517 *p = '\0';
6518
6519 id = strstrip(start);
6520 val = strstrip(p + 1);
6521
6522
6523 p = strchr(id, '.');
6524 if (p) {
6525 *p++ = '\0';
6526 force_ent->device = simple_strtoul(p, &endp, 10);
6527 if (p == endp || *endp != '\0') {
6528 *reason = "invalid device";
6529 return -EINVAL;
6530 }
6531 }
6532
6533 force_ent->port = simple_strtoul(id, &endp, 10);
6534 if (p == endp || *endp != '\0') {
6535 *reason = "invalid port/link";
6536 return -EINVAL;
6537 }
6538
6539 parse_val:
6540
6541 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6542 const struct ata_force_param *fp = &force_tbl[i];
6543
6544 if (strncasecmp(val, fp->name, strlen(val)))
6545 continue;
6546
6547 nr_matches++;
6548 match_fp = fp;
6549
6550 if (strcasecmp(val, fp->name) == 0) {
6551 nr_matches = 1;
6552 break;
6553 }
6554 }
6555
6556 if (!nr_matches) {
6557 *reason = "unknown value";
6558 return -EINVAL;
6559 }
6560 if (nr_matches > 1) {
6561 *reason = "ambigious value";
6562 return -EINVAL;
6563 }
6564
6565 force_ent->param = *match_fp;
6566
6567 return 0;
6568}
6569
6570static void __init ata_parse_force_param(void)
6571{
6572 int idx = 0, size = 1;
6573 int last_port = -1, last_device = -1;
6574 char *p, *cur, *next;
6575
6576
6577 for (p = ata_force_param_buf; *p; p++)
6578 if (*p == ',')
6579 size++;
6580
6581 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6582 if (!ata_force_tbl) {
6583 printk(KERN_WARNING "ata: failed to extend force table, "
6584 "libata.force ignored\n");
6585 return;
6586 }
6587
6588
6589 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6590 const char *reason = "";
6591 struct ata_force_ent te = { .port = -1, .device = -1 };
6592
6593 next = cur;
6594 if (ata_parse_force_one(&next, &te, &reason)) {
6595 printk(KERN_WARNING "ata: failed to parse force "
6596 "parameter \"%s\" (%s)\n",
6597 cur, reason);
6598 continue;
6599 }
6600
6601 if (te.port == -1) {
6602 te.port = last_port;
6603 te.device = last_device;
6604 }
6605
6606 ata_force_tbl[idx++] = te;
6607
6608 last_port = te.port;
6609 last_device = te.device;
6610 }
6611
6612 ata_force_tbl_size = idx;
6613}
6614
6615static int __init ata_init(void)
6616{
6617 ata_parse_force_param();
6618
6619 ata_wq = create_workqueue("ata");
6620 if (!ata_wq)
6621 goto free_force_tbl;
6622
6623 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6624 if (!ata_aux_wq)
6625 goto free_wq;
6626
6627 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6628 return 0;
6629
6630free_wq:
6631 destroy_workqueue(ata_wq);
6632free_force_tbl:
6633 kfree(ata_force_tbl);
6634 return -ENOMEM;
6635}
6636
6637static void __exit ata_exit(void)
6638{
6639 kfree(ata_force_tbl);
6640 destroy_workqueue(ata_wq);
6641 destroy_workqueue(ata_aux_wq);
6642}
6643
6644subsys_initcall(ata_init);
6645module_exit(ata_exit);
6646
6647static unsigned long ratelimit_time;
6648static DEFINE_SPINLOCK(ata_ratelimit_lock);
6649
6650int ata_ratelimit(void)
6651{
6652 int rc;
6653 unsigned long flags;
6654
6655 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6656
6657 if (time_after(jiffies, ratelimit_time)) {
6658 rc = 1;
6659 ratelimit_time = jiffies + (HZ/5);
6660 } else
6661 rc = 0;
6662
6663 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6664
6665 return rc;
6666}
6667
6668
6669
6670
6671
6672
6673
6674
6675
6676
6677
6678
6679
6680
6681
6682
6683
6684
6685
6686
6687
6688
6689
6690
6691u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6692 unsigned long interval, unsigned long timeout)
6693{
6694 unsigned long deadline;
6695 u32 tmp;
6696
6697 tmp = ioread32(reg);
6698
6699
6700
6701
6702
6703 deadline = ata_deadline(jiffies, timeout);
6704
6705 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6706 msleep(interval);
6707 tmp = ioread32(reg);
6708 }
6709
6710 return tmp;
6711}
6712
6713
6714
6715
6716static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6717{
6718 return AC_ERR_SYSTEM;
6719}
6720
6721static void ata_dummy_error_handler(struct ata_port *ap)
6722{
6723
6724}
6725
6726struct ata_port_operations ata_dummy_port_ops = {
6727 .qc_prep = ata_noop_qc_prep,
6728 .qc_issue = ata_dummy_qc_issue,
6729 .error_handler = ata_dummy_error_handler,
6730};
6731
6732const struct ata_port_info ata_dummy_port_info = {
6733 .port_ops = &ata_dummy_port_ops,
6734};
6735
6736
6737
6738
6739
6740
6741
6742EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6743EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6744EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6745EXPORT_SYMBOL_GPL(ata_base_port_ops);
6746EXPORT_SYMBOL_GPL(sata_port_ops);
6747EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6748EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6749EXPORT_SYMBOL_GPL(ata_link_next);
6750EXPORT_SYMBOL_GPL(ata_dev_next);
6751EXPORT_SYMBOL_GPL(ata_std_bios_param);
6752EXPORT_SYMBOL_GPL(ata_host_init);
6753EXPORT_SYMBOL_GPL(ata_host_alloc);
6754EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6755EXPORT_SYMBOL_GPL(ata_slave_link_init);
6756EXPORT_SYMBOL_GPL(ata_host_start);
6757EXPORT_SYMBOL_GPL(ata_host_register);
6758EXPORT_SYMBOL_GPL(ata_host_activate);
6759EXPORT_SYMBOL_GPL(ata_host_detach);
6760EXPORT_SYMBOL_GPL(ata_sg_init);
6761EXPORT_SYMBOL_GPL(ata_qc_complete);
6762EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6763EXPORT_SYMBOL_GPL(atapi_cmd_type);
6764EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6765EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6766EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6767EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6768EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6769EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6770EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6771EXPORT_SYMBOL_GPL(ata_mode_string);
6772EXPORT_SYMBOL_GPL(ata_id_xfermask);
6773EXPORT_SYMBOL_GPL(ata_port_start);
6774EXPORT_SYMBOL_GPL(ata_do_set_mode);
6775EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6776EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6777EXPORT_SYMBOL_GPL(ata_port_probe);
6778EXPORT_SYMBOL_GPL(ata_dev_disable);
6779EXPORT_SYMBOL_GPL(sata_set_spd);
6780EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6781EXPORT_SYMBOL_GPL(sata_link_debounce);
6782EXPORT_SYMBOL_GPL(sata_link_resume);
6783EXPORT_SYMBOL_GPL(ata_std_prereset);
6784EXPORT_SYMBOL_GPL(sata_link_hardreset);
6785EXPORT_SYMBOL_GPL(sata_std_hardreset);
6786EXPORT_SYMBOL_GPL(ata_std_postreset);
6787EXPORT_SYMBOL_GPL(ata_dev_classify);
6788EXPORT_SYMBOL_GPL(ata_dev_pair);
6789EXPORT_SYMBOL_GPL(ata_port_disable);
6790EXPORT_SYMBOL_GPL(ata_ratelimit);
6791EXPORT_SYMBOL_GPL(ata_wait_register);
6792EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6793EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6794EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6795EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6796EXPORT_SYMBOL_GPL(sata_scr_valid);
6797EXPORT_SYMBOL_GPL(sata_scr_read);
6798EXPORT_SYMBOL_GPL(sata_scr_write);
6799EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6800EXPORT_SYMBOL_GPL(ata_link_online);
6801EXPORT_SYMBOL_GPL(ata_link_offline);
6802#ifdef CONFIG_PM
6803EXPORT_SYMBOL_GPL(ata_host_suspend);
6804EXPORT_SYMBOL_GPL(ata_host_resume);
6805#endif
6806EXPORT_SYMBOL_GPL(ata_id_string);
6807EXPORT_SYMBOL_GPL(ata_id_c_string);
6808EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6809EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6810
6811EXPORT_SYMBOL_GPL(ata_pio_queue_task);
6812EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6813EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6814EXPORT_SYMBOL_GPL(ata_timing_compute);
6815EXPORT_SYMBOL_GPL(ata_timing_merge);
6816EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6817
6818#ifdef CONFIG_PCI
6819EXPORT_SYMBOL_GPL(pci_test_config_bits);
6820EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6821#ifdef CONFIG_PM
6822EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6823EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6824EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6825EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6826#endif
6827#endif
6828
6829EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6830EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6831EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6832EXPORT_SYMBOL_GPL(ata_port_desc);
6833#ifdef CONFIG_PCI
6834EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6835#endif
6836EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6837EXPORT_SYMBOL_GPL(ata_link_abort);
6838EXPORT_SYMBOL_GPL(ata_port_abort);
6839EXPORT_SYMBOL_GPL(ata_port_freeze);
6840EXPORT_SYMBOL_GPL(sata_async_notification);
6841EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6842EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6843EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6844EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6845EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6846EXPORT_SYMBOL_GPL(ata_do_eh);
6847EXPORT_SYMBOL_GPL(ata_std_error_handler);
6848
6849EXPORT_SYMBOL_GPL(ata_cable_40wire);
6850EXPORT_SYMBOL_GPL(ata_cable_80wire);
6851EXPORT_SYMBOL_GPL(ata_cable_unknown);
6852EXPORT_SYMBOL_GPL(ata_cable_ignore);
6853EXPORT_SYMBOL_GPL(ata_cable_sata);
6854