1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
57#include <linux/scatterlist.h>
58#include <linux/io.h>
59#include <linux/async.h>
60#include <linux/log2.h>
61#include <linux/slab.h>
62#include <scsi/scsi.h>
63#include <scsi/scsi_cmnd.h>
64#include <scsi/scsi_host.h>
65#include <linux/libata.h>
66#include <asm/byteorder.h>
67#include <linux/cdrom.h>
68#include <linux/ratelimit.h>
69
70#include "libata.h"
71#include "libata-transport.h"
72
73
74const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
75const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
76const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
77
78const struct ata_port_operations ata_base_port_ops = {
79 .prereset = ata_std_prereset,
80 .postreset = ata_std_postreset,
81 .error_handler = ata_std_error_handler,
82};
83
84const struct ata_port_operations sata_port_ops = {
85 .inherits = &ata_base_port_ops,
86
87 .qc_defer = ata_std_qc_defer,
88 .hardreset = sata_std_hardreset,
89};
90
91static unsigned int ata_dev_init_params(struct ata_device *dev,
92 u16 heads, u16 sectors);
93static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
94static void ata_dev_xfermask(struct ata_device *dev);
95static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
96
97unsigned int ata_print_id = 1;
98
99struct ata_force_param {
100 const char *name;
101 unsigned int cbl;
102 int spd_limit;
103 unsigned long xfer_mask;
104 unsigned int horkage_on;
105 unsigned int horkage_off;
106 unsigned int lflags;
107};
108
109struct ata_force_ent {
110 int port;
111 int device;
112 struct ata_force_param param;
113};
114
115static struct ata_force_ent *ata_force_tbl;
116static int ata_force_tbl_size;
117
118static char ata_force_param_buf[PAGE_SIZE] __initdata;
119
120module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
121MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
122
123static int atapi_enabled = 1;
124module_param(atapi_enabled, int, 0444);
125MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
126
127static int atapi_dmadir = 0;
128module_param(atapi_dmadir, int, 0444);
129MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
130
131int atapi_passthru16 = 1;
132module_param(atapi_passthru16, int, 0444);
133MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
134
135int libata_fua = 0;
136module_param_named(fua, libata_fua, int, 0444);
137MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
138
139static int ata_ignore_hpa;
140module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
141MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
142
143static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
144module_param_named(dma, libata_dma_mask, int, 0444);
145MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
146
147static int ata_probe_timeout;
148module_param(ata_probe_timeout, int, 0444);
149MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
150
151int libata_noacpi = 0;
152module_param_named(noacpi, libata_noacpi, int, 0444);
153MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
154
155int libata_allow_tpm = 0;
156module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
157MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
158
159static int atapi_an;
160module_param(atapi_an, int, 0444);
161MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
162
163MODULE_AUTHOR("Jeff Garzik");
164MODULE_DESCRIPTION("Library module for ATA devices");
165MODULE_LICENSE("GPL");
166MODULE_VERSION(DRV_VERSION);
167
168
169static bool ata_sstatus_online(u32 sstatus)
170{
171 return (sstatus & 0xf) == 0x3;
172}
173
174
175
176
177
178
179
180
181
182
183
184
185
186struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
187 enum ata_link_iter_mode mode)
188{
189 BUG_ON(mode != ATA_LITER_EDGE &&
190 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
191
192
193 if (!link)
194 switch (mode) {
195 case ATA_LITER_EDGE:
196 case ATA_LITER_PMP_FIRST:
197 if (sata_pmp_attached(ap))
198 return ap->pmp_link;
199
200 case ATA_LITER_HOST_FIRST:
201 return &ap->link;
202 }
203
204
205 if (link == &ap->link)
206 switch (mode) {
207 case ATA_LITER_HOST_FIRST:
208 if (sata_pmp_attached(ap))
209 return ap->pmp_link;
210
211 case ATA_LITER_PMP_FIRST:
212 if (unlikely(ap->slave_link))
213 return ap->slave_link;
214
215 case ATA_LITER_EDGE:
216 return NULL;
217 }
218
219
220 if (unlikely(link == ap->slave_link))
221 return NULL;
222
223
224 if (++link < ap->pmp_link + ap->nr_pmp_links)
225 return link;
226
227 if (mode == ATA_LITER_PMP_FIRST)
228 return &ap->link;
229
230 return NULL;
231}
232
233
234
235
236
237
238
239
240
241
242
243
244
245struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
246 enum ata_dev_iter_mode mode)
247{
248 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
249 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
250
251
252 if (!dev)
253 switch (mode) {
254 case ATA_DITER_ENABLED:
255 case ATA_DITER_ALL:
256 dev = link->device;
257 goto check;
258 case ATA_DITER_ENABLED_REVERSE:
259 case ATA_DITER_ALL_REVERSE:
260 dev = link->device + ata_link_max_devices(link) - 1;
261 goto check;
262 }
263
264 next:
265
266 switch (mode) {
267 case ATA_DITER_ENABLED:
268 case ATA_DITER_ALL:
269 if (++dev < link->device + ata_link_max_devices(link))
270 goto check;
271 return NULL;
272 case ATA_DITER_ENABLED_REVERSE:
273 case ATA_DITER_ALL_REVERSE:
274 if (--dev >= link->device)
275 goto check;
276 return NULL;
277 }
278
279 check:
280 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
281 !ata_dev_enabled(dev))
282 goto next;
283 return dev;
284}
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300struct ata_link *ata_dev_phys_link(struct ata_device *dev)
301{
302 struct ata_port *ap = dev->link->ap;
303
304 if (!ap->slave_link)
305 return dev->link;
306 if (!dev->devno)
307 return &ap->link;
308 return ap->slave_link;
309}
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324void ata_force_cbl(struct ata_port *ap)
325{
326 int i;
327
328 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
329 const struct ata_force_ent *fe = &ata_force_tbl[i];
330
331 if (fe->port != -1 && fe->port != ap->print_id)
332 continue;
333
334 if (fe->param.cbl == ATA_CBL_NONE)
335 continue;
336
337 ap->cbl = fe->param.cbl;
338 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
339 return;
340 }
341}
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359static void ata_force_link_limits(struct ata_link *link)
360{
361 bool did_spd = false;
362 int linkno = link->pmp;
363 int i;
364
365 if (ata_is_host_link(link))
366 linkno += 15;
367
368 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
369 const struct ata_force_ent *fe = &ata_force_tbl[i];
370
371 if (fe->port != -1 && fe->port != link->ap->print_id)
372 continue;
373
374 if (fe->device != -1 && fe->device != linkno)
375 continue;
376
377
378 if (!did_spd && fe->param.spd_limit) {
379 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
380 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
381 fe->param.name);
382 did_spd = true;
383 }
384
385
386 if (fe->param.lflags) {
387 link->flags |= fe->param.lflags;
388 ata_link_notice(link,
389 "FORCE: link flag 0x%x forced -> 0x%x\n",
390 fe->param.lflags, link->flags);
391 }
392 }
393}
394
395
396
397
398
399
400
401
402
403
404
405
406static void ata_force_xfermask(struct ata_device *dev)
407{
408 int devno = dev->link->pmp + dev->devno;
409 int alt_devno = devno;
410 int i;
411
412
413 if (ata_is_host_link(dev->link))
414 alt_devno += 15;
415
416 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
417 const struct ata_force_ent *fe = &ata_force_tbl[i];
418 unsigned long pio_mask, mwdma_mask, udma_mask;
419
420 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
421 continue;
422
423 if (fe->device != -1 && fe->device != devno &&
424 fe->device != alt_devno)
425 continue;
426
427 if (!fe->param.xfer_mask)
428 continue;
429
430 ata_unpack_xfermask(fe->param.xfer_mask,
431 &pio_mask, &mwdma_mask, &udma_mask);
432 if (udma_mask)
433 dev->udma_mask = udma_mask;
434 else if (mwdma_mask) {
435 dev->udma_mask = 0;
436 dev->mwdma_mask = mwdma_mask;
437 } else {
438 dev->udma_mask = 0;
439 dev->mwdma_mask = 0;
440 dev->pio_mask = pio_mask;
441 }
442
443 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
444 fe->param.name);
445 return;
446 }
447}
448
449
450
451
452
453
454
455
456
457
458
459
460static void ata_force_horkage(struct ata_device *dev)
461{
462 int devno = dev->link->pmp + dev->devno;
463 int alt_devno = devno;
464 int i;
465
466
467 if (ata_is_host_link(dev->link))
468 alt_devno += 15;
469
470 for (i = 0; i < ata_force_tbl_size; i++) {
471 const struct ata_force_ent *fe = &ata_force_tbl[i];
472
473 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
474 continue;
475
476 if (fe->device != -1 && fe->device != devno &&
477 fe->device != alt_devno)
478 continue;
479
480 if (!(~dev->horkage & fe->param.horkage_on) &&
481 !(dev->horkage & fe->param.horkage_off))
482 continue;
483
484 dev->horkage |= fe->param.horkage_on;
485 dev->horkage &= ~fe->param.horkage_off;
486
487 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
488 fe->param.name);
489 }
490}
491
492
493
494
495
496
497
498
499
500
501
502
503
504int atapi_cmd_type(u8 opcode)
505{
506 switch (opcode) {
507 case GPCMD_READ_10:
508 case GPCMD_READ_12:
509 return ATAPI_READ;
510
511 case GPCMD_WRITE_10:
512 case GPCMD_WRITE_12:
513 case GPCMD_WRITE_AND_VERIFY_10:
514 return ATAPI_WRITE;
515
516 case GPCMD_READ_CD:
517 case GPCMD_READ_CD_MSF:
518 return ATAPI_READ_CD;
519
520 case ATA_16:
521 case ATA_12:
522 if (atapi_passthru16)
523 return ATAPI_PASS_THRU;
524
525 default:
526 return ATAPI_MISC;
527 }
528}
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
544{
545 fis[0] = 0x27;
546 fis[1] = pmp & 0xf;
547 if (is_cmd)
548 fis[1] |= (1 << 7);
549
550 fis[2] = tf->command;
551 fis[3] = tf->feature;
552
553 fis[4] = tf->lbal;
554 fis[5] = tf->lbam;
555 fis[6] = tf->lbah;
556 fis[7] = tf->device;
557
558 fis[8] = tf->hob_lbal;
559 fis[9] = tf->hob_lbam;
560 fis[10] = tf->hob_lbah;
561 fis[11] = tf->hob_feature;
562
563 fis[12] = tf->nsect;
564 fis[13] = tf->hob_nsect;
565 fis[14] = 0;
566 fis[15] = tf->ctl;
567
568 fis[16] = 0;
569 fis[17] = 0;
570 fis[18] = 0;
571 fis[19] = 0;
572}
573
574
575
576
577
578
579
580
581
582
583
584
585void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
586{
587 tf->command = fis[2];
588 tf->feature = fis[3];
589
590 tf->lbal = fis[4];
591 tf->lbam = fis[5];
592 tf->lbah = fis[6];
593 tf->device = fis[7];
594
595 tf->hob_lbal = fis[8];
596 tf->hob_lbam = fis[9];
597 tf->hob_lbah = fis[10];
598
599 tf->nsect = fis[12];
600 tf->hob_nsect = fis[13];
601}
602
603static const u8 ata_rw_cmds[] = {
604
605 ATA_CMD_READ_MULTI,
606 ATA_CMD_WRITE_MULTI,
607 ATA_CMD_READ_MULTI_EXT,
608 ATA_CMD_WRITE_MULTI_EXT,
609 0,
610 0,
611 0,
612 ATA_CMD_WRITE_MULTI_FUA_EXT,
613
614 ATA_CMD_PIO_READ,
615 ATA_CMD_PIO_WRITE,
616 ATA_CMD_PIO_READ_EXT,
617 ATA_CMD_PIO_WRITE_EXT,
618 0,
619 0,
620 0,
621 0,
622
623 ATA_CMD_READ,
624 ATA_CMD_WRITE,
625 ATA_CMD_READ_EXT,
626 ATA_CMD_WRITE_EXT,
627 0,
628 0,
629 0,
630 ATA_CMD_WRITE_FUA_EXT
631};
632
633
634
635
636
637
638
639
640
641
642
643
644static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
645{
646 u8 cmd;
647
648 int index, fua, lba48, write;
649
650 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
651 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
652 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
653
654 if (dev->flags & ATA_DFLAG_PIO) {
655 tf->protocol = ATA_PROT_PIO;
656 index = dev->multi_count ? 0 : 8;
657 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
658
659 tf->protocol = ATA_PROT_PIO;
660 index = dev->multi_count ? 0 : 8;
661 } else {
662 tf->protocol = ATA_PROT_DMA;
663 index = 16;
664 }
665
666 cmd = ata_rw_cmds[index + fua + lba48 + write];
667 if (cmd) {
668 tf->command = cmd;
669 return 0;
670 }
671 return -1;
672}
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
690{
691 u64 block = 0;
692
693 if (tf->flags & ATA_TFLAG_LBA) {
694 if (tf->flags & ATA_TFLAG_LBA48) {
695 block |= (u64)tf->hob_lbah << 40;
696 block |= (u64)tf->hob_lbam << 32;
697 block |= (u64)tf->hob_lbal << 24;
698 } else
699 block |= (tf->device & 0xf) << 24;
700
701 block |= tf->lbah << 16;
702 block |= tf->lbam << 8;
703 block |= tf->lbal;
704 } else {
705 u32 cyl, head, sect;
706
707 cyl = tf->lbam | (tf->lbah << 8);
708 head = tf->device & 0xf;
709 sect = tf->lbal;
710
711 if (!sect) {
712 ata_dev_warn(dev,
713 "device reported invalid CHS sector 0\n");
714 sect = 1;
715 }
716
717 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
718 }
719
720 return block;
721}
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
744 u64 block, u32 n_block, unsigned int tf_flags,
745 unsigned int tag)
746{
747 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
748 tf->flags |= tf_flags;
749
750 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
751
752 if (!lba_48_ok(block, n_block))
753 return -ERANGE;
754
755 tf->protocol = ATA_PROT_NCQ;
756 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
757
758 if (tf->flags & ATA_TFLAG_WRITE)
759 tf->command = ATA_CMD_FPDMA_WRITE;
760 else
761 tf->command = ATA_CMD_FPDMA_READ;
762
763 tf->nsect = tag << 3;
764 tf->hob_feature = (n_block >> 8) & 0xff;
765 tf->feature = n_block & 0xff;
766
767 tf->hob_lbah = (block >> 40) & 0xff;
768 tf->hob_lbam = (block >> 32) & 0xff;
769 tf->hob_lbal = (block >> 24) & 0xff;
770 tf->lbah = (block >> 16) & 0xff;
771 tf->lbam = (block >> 8) & 0xff;
772 tf->lbal = block & 0xff;
773
774 tf->device = 1 << 6;
775 if (tf->flags & ATA_TFLAG_FUA)
776 tf->device |= 1 << 7;
777 } else if (dev->flags & ATA_DFLAG_LBA) {
778 tf->flags |= ATA_TFLAG_LBA;
779
780 if (lba_28_ok(block, n_block)) {
781
782 tf->device |= (block >> 24) & 0xf;
783 } else if (lba_48_ok(block, n_block)) {
784 if (!(dev->flags & ATA_DFLAG_LBA48))
785 return -ERANGE;
786
787
788 tf->flags |= ATA_TFLAG_LBA48;
789
790 tf->hob_nsect = (n_block >> 8) & 0xff;
791
792 tf->hob_lbah = (block >> 40) & 0xff;
793 tf->hob_lbam = (block >> 32) & 0xff;
794 tf->hob_lbal = (block >> 24) & 0xff;
795 } else
796
797 return -ERANGE;
798
799 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
800 return -EINVAL;
801
802 tf->nsect = n_block & 0xff;
803
804 tf->lbah = (block >> 16) & 0xff;
805 tf->lbam = (block >> 8) & 0xff;
806 tf->lbal = block & 0xff;
807
808 tf->device |= ATA_LBA;
809 } else {
810
811 u32 sect, head, cyl, track;
812
813
814 if (!lba_28_ok(block, n_block))
815 return -ERANGE;
816
817 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
818 return -EINVAL;
819
820
821 track = (u32)block / dev->sectors;
822 cyl = track / dev->heads;
823 head = track % dev->heads;
824 sect = (u32)block % dev->sectors + 1;
825
826 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
827 (u32)block, track, cyl, head, sect);
828
829
830
831
832
833 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
834 return -ERANGE;
835
836 tf->nsect = n_block & 0xff;
837 tf->lbal = sect;
838 tf->lbam = cyl;
839 tf->lbah = cyl >> 8;
840 tf->device |= head;
841 }
842
843 return 0;
844}
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861unsigned long ata_pack_xfermask(unsigned long pio_mask,
862 unsigned long mwdma_mask,
863 unsigned long udma_mask)
864{
865 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
866 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
867 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
868}
869
870
871
872
873
874
875
876
877
878
879
880void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
881 unsigned long *mwdma_mask, unsigned long *udma_mask)
882{
883 if (pio_mask)
884 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
885 if (mwdma_mask)
886 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
887 if (udma_mask)
888 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
889}
890
891static const struct ata_xfer_ent {
892 int shift, bits;
893 u8 base;
894} ata_xfer_tbl[] = {
895 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
896 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
897 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
898 { -1, },
899};
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914u8 ata_xfer_mask2mode(unsigned long xfer_mask)
915{
916 int highbit = fls(xfer_mask) - 1;
917 const struct ata_xfer_ent *ent;
918
919 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
920 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
921 return ent->base + highbit - ent->shift;
922 return 0xff;
923}
924
925
926
927
928
929
930
931
932
933
934
935
936
937unsigned long ata_xfer_mode2mask(u8 xfer_mode)
938{
939 const struct ata_xfer_ent *ent;
940
941 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
942 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
943 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
944 & ~((1 << ent->shift) - 1);
945 return 0;
946}
947
948
949
950
951
952
953
954
955
956
957
958
959
960int ata_xfer_mode2shift(unsigned long xfer_mode)
961{
962 const struct ata_xfer_ent *ent;
963
964 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
965 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
966 return ent->shift;
967 return -1;
968}
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984const char *ata_mode_string(unsigned long xfer_mask)
985{
986 static const char * const xfer_mode_str[] = {
987 "PIO0",
988 "PIO1",
989 "PIO2",
990 "PIO3",
991 "PIO4",
992 "PIO5",
993 "PIO6",
994 "MWDMA0",
995 "MWDMA1",
996 "MWDMA2",
997 "MWDMA3",
998 "MWDMA4",
999 "UDMA/16",
1000 "UDMA/25",
1001 "UDMA/33",
1002 "UDMA/44",
1003 "UDMA/66",
1004 "UDMA/100",
1005 "UDMA/133",
1006 "UDMA7",
1007 };
1008 int highbit;
1009
1010 highbit = fls(xfer_mask) - 1;
1011 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1012 return xfer_mode_str[highbit];
1013 return "<n/a>";
1014}
1015
1016const char *sata_spd_string(unsigned int spd)
1017{
1018 static const char * const spd_str[] = {
1019 "1.5 Gbps",
1020 "3.0 Gbps",
1021 "6.0 Gbps",
1022 };
1023
1024 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1025 return "<unknown>";
1026 return spd_str[spd - 1];
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1045{
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1068 DPRINTK("found ATA device by sig\n");
1069 return ATA_DEV_ATA;
1070 }
1071
1072 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1073 DPRINTK("found ATAPI device by sig\n");
1074 return ATA_DEV_ATAPI;
1075 }
1076
1077 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1078 DPRINTK("found PMP device by sig\n");
1079 return ATA_DEV_PMP;
1080 }
1081
1082 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1083 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1084 return ATA_DEV_SEMB;
1085 }
1086
1087 DPRINTK("unknown device\n");
1088 return ATA_DEV_UNKNOWN;
1089}
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106void ata_id_string(const u16 *id, unsigned char *s,
1107 unsigned int ofs, unsigned int len)
1108{
1109 unsigned int c;
1110
1111 BUG_ON(len & 1);
1112
1113 while (len > 0) {
1114 c = id[ofs] >> 8;
1115 *s = c;
1116 s++;
1117
1118 c = id[ofs] & 0xff;
1119 *s = c;
1120 s++;
1121
1122 ofs++;
1123 len -= 2;
1124 }
1125}
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141void ata_id_c_string(const u16 *id, unsigned char *s,
1142 unsigned int ofs, unsigned int len)
1143{
1144 unsigned char *p;
1145
1146 ata_id_string(id, s, ofs, len - 1);
1147
1148 p = s + strnlen(s, len - 1);
1149 while (p > s && p[-1] == ' ')
1150 p--;
1151 *p = '\0';
1152}
1153
1154static u64 ata_id_n_sectors(const u16 *id)
1155{
1156 if (ata_id_has_lba(id)) {
1157 if (ata_id_has_lba48(id))
1158 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1159 else
1160 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1161 } else {
1162 if (ata_id_current_chs_valid(id))
1163 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1164 id[ATA_ID_CUR_SECTORS];
1165 else
1166 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1167 id[ATA_ID_SECTORS];
1168 }
1169}
1170
1171u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1172{
1173 u64 sectors = 0;
1174
1175 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1176 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1177 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1178 sectors |= (tf->lbah & 0xff) << 16;
1179 sectors |= (tf->lbam & 0xff) << 8;
1180 sectors |= (tf->lbal & 0xff);
1181
1182 return sectors;
1183}
1184
1185u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1186{
1187 u64 sectors = 0;
1188
1189 sectors |= (tf->device & 0x0f) << 24;
1190 sectors |= (tf->lbah & 0xff) << 16;
1191 sectors |= (tf->lbam & 0xff) << 8;
1192 sectors |= (tf->lbal & 0xff);
1193
1194 return sectors;
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1210{
1211 unsigned int err_mask;
1212 struct ata_taskfile tf;
1213 int lba48 = ata_id_has_lba48(dev->id);
1214
1215 ata_tf_init(dev, &tf);
1216
1217
1218 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1219
1220 if (lba48) {
1221 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1222 tf.flags |= ATA_TFLAG_LBA48;
1223 } else
1224 tf.command = ATA_CMD_READ_NATIVE_MAX;
1225
1226 tf.protocol |= ATA_PROT_NODATA;
1227 tf.device |= ATA_LBA;
1228
1229 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1230 if (err_mask) {
1231 ata_dev_warn(dev,
1232 "failed to read native max address (err_mask=0x%x)\n",
1233 err_mask);
1234 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1235 return -EACCES;
1236 return -EIO;
1237 }
1238
1239 if (lba48)
1240 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1241 else
1242 *max_sectors = ata_tf_to_lba(&tf) + 1;
1243 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1244 (*max_sectors)--;
1245 return 0;
1246}
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1261{
1262 unsigned int err_mask;
1263 struct ata_taskfile tf;
1264 int lba48 = ata_id_has_lba48(dev->id);
1265
1266 new_sectors--;
1267
1268 ata_tf_init(dev, &tf);
1269
1270 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1271
1272 if (lba48) {
1273 tf.command = ATA_CMD_SET_MAX_EXT;
1274 tf.flags |= ATA_TFLAG_LBA48;
1275
1276 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1277 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1278 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1279 } else {
1280 tf.command = ATA_CMD_SET_MAX;
1281
1282 tf.device |= (new_sectors >> 24) & 0xf;
1283 }
1284
1285 tf.protocol |= ATA_PROT_NODATA;
1286 tf.device |= ATA_LBA;
1287
1288 tf.lbal = (new_sectors >> 0) & 0xff;
1289 tf.lbam = (new_sectors >> 8) & 0xff;
1290 tf.lbah = (new_sectors >> 16) & 0xff;
1291
1292 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1293 if (err_mask) {
1294 ata_dev_warn(dev,
1295 "failed to set max address (err_mask=0x%x)\n",
1296 err_mask);
1297 if (err_mask == AC_ERR_DEV &&
1298 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1299 return -EACCES;
1300 return -EIO;
1301 }
1302
1303 return 0;
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317static int ata_hpa_resize(struct ata_device *dev)
1318{
1319 struct ata_eh_context *ehc = &dev->link->eh_context;
1320 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1321 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1322 u64 sectors = ata_id_n_sectors(dev->id);
1323 u64 native_sectors;
1324 int rc;
1325
1326
1327 if (dev->class != ATA_DEV_ATA ||
1328 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1329 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1330 return 0;
1331
1332
1333 rc = ata_read_native_max_address(dev, &native_sectors);
1334 if (rc) {
1335
1336
1337
1338 if (rc == -EACCES || !unlock_hpa) {
1339 ata_dev_warn(dev,
1340 "HPA support seems broken, skipping HPA handling\n");
1341 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1342
1343
1344 if (rc == -EACCES)
1345 rc = 0;
1346 }
1347
1348 return rc;
1349 }
1350 dev->n_native_sectors = native_sectors;
1351
1352
1353 if (native_sectors <= sectors || !unlock_hpa) {
1354 if (!print_info || native_sectors == sectors)
1355 return 0;
1356
1357 if (native_sectors > sectors)
1358 ata_dev_info(dev,
1359 "HPA detected: current %llu, native %llu\n",
1360 (unsigned long long)sectors,
1361 (unsigned long long)native_sectors);
1362 else if (native_sectors < sectors)
1363 ata_dev_warn(dev,
1364 "native sectors (%llu) is smaller than sectors (%llu)\n",
1365 (unsigned long long)native_sectors,
1366 (unsigned long long)sectors);
1367 return 0;
1368 }
1369
1370
1371 rc = ata_set_max_sectors(dev, native_sectors);
1372 if (rc == -EACCES) {
1373
1374 ata_dev_warn(dev,
1375 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1376 (unsigned long long)sectors,
1377 (unsigned long long)native_sectors);
1378 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1379 return 0;
1380 } else if (rc)
1381 return rc;
1382
1383
1384 rc = ata_dev_reread_id(dev, 0);
1385 if (rc) {
1386 ata_dev_err(dev,
1387 "failed to re-read IDENTIFY data after HPA resizing\n");
1388 return rc;
1389 }
1390
1391 if (print_info) {
1392 u64 new_sectors = ata_id_n_sectors(dev->id);
1393 ata_dev_info(dev,
1394 "HPA unlocked: %llu -> %llu, native %llu\n",
1395 (unsigned long long)sectors,
1396 (unsigned long long)new_sectors,
1397 (unsigned long long)native_sectors);
1398 }
1399
1400 return 0;
1401}
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414static inline void ata_dump_id(const u16 *id)
1415{
1416 DPRINTK("49==0x%04x "
1417 "53==0x%04x "
1418 "63==0x%04x "
1419 "64==0x%04x "
1420 "75==0x%04x \n",
1421 id[49],
1422 id[53],
1423 id[63],
1424 id[64],
1425 id[75]);
1426 DPRINTK("80==0x%04x "
1427 "81==0x%04x "
1428 "82==0x%04x "
1429 "83==0x%04x "
1430 "84==0x%04x \n",
1431 id[80],
1432 id[81],
1433 id[82],
1434 id[83],
1435 id[84]);
1436 DPRINTK("88==0x%04x "
1437 "93==0x%04x\n",
1438 id[88],
1439 id[93]);
1440}
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457unsigned long ata_id_xfermask(const u16 *id)
1458{
1459 unsigned long pio_mask, mwdma_mask, udma_mask;
1460
1461
1462 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1463 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1464 pio_mask <<= 3;
1465 pio_mask |= 0x7;
1466 } else {
1467
1468
1469
1470
1471 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1472 if (mode < 5)
1473 pio_mask = (2 << mode) - 1;
1474 else
1475 pio_mask = 1;
1476
1477
1478
1479
1480
1481
1482
1483 }
1484
1485 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1486
1487 if (ata_id_is_cfa(id)) {
1488
1489
1490
1491 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1492 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1493
1494 if (pio)
1495 pio_mask |= (1 << 5);
1496 if (pio > 1)
1497 pio_mask |= (1 << 6);
1498 if (dma)
1499 mwdma_mask |= (1 << 3);
1500 if (dma > 1)
1501 mwdma_mask |= (1 << 4);
1502 }
1503
1504 udma_mask = 0;
1505 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1506 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1507
1508 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1509}
1510
1511static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1512{
1513 struct completion *waiting = qc->private_data;
1514
1515 complete(waiting);
1516}
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540unsigned ata_exec_internal_sg(struct ata_device *dev,
1541 struct ata_taskfile *tf, const u8 *cdb,
1542 int dma_dir, struct scatterlist *sgl,
1543 unsigned int n_elem, unsigned long timeout)
1544{
1545 struct ata_link *link = dev->link;
1546 struct ata_port *ap = link->ap;
1547 u8 command = tf->command;
1548 int auto_timeout = 0;
1549 struct ata_queued_cmd *qc;
1550 unsigned int tag, preempted_tag;
1551 u32 preempted_sactive, preempted_qc_active;
1552 int preempted_nr_active_links;
1553 DECLARE_COMPLETION_ONSTACK(wait);
1554 unsigned long flags;
1555 unsigned int err_mask;
1556 int rc;
1557
1558 spin_lock_irqsave(ap->lock, flags);
1559
1560
1561 if (ap->pflags & ATA_PFLAG_FROZEN) {
1562 spin_unlock_irqrestore(ap->lock, flags);
1563 return AC_ERR_SYSTEM;
1564 }
1565
1566
1567
1568
1569
1570
1571
1572
1573 if (ap->ops->error_handler)
1574 tag = ATA_TAG_INTERNAL;
1575 else
1576 tag = 0;
1577
1578 if (test_and_set_bit(tag, &ap->qc_allocated))
1579 BUG();
1580 qc = __ata_qc_from_tag(ap, tag);
1581
1582 qc->tag = tag;
1583 qc->scsicmd = NULL;
1584 qc->ap = ap;
1585 qc->dev = dev;
1586 ata_qc_reinit(qc);
1587
1588 preempted_tag = link->active_tag;
1589 preempted_sactive = link->sactive;
1590 preempted_qc_active = ap->qc_active;
1591 preempted_nr_active_links = ap->nr_active_links;
1592 link->active_tag = ATA_TAG_POISON;
1593 link->sactive = 0;
1594 ap->qc_active = 0;
1595 ap->nr_active_links = 0;
1596
1597
1598 qc->tf = *tf;
1599 if (cdb)
1600 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1601 qc->flags |= ATA_QCFLAG_RESULT_TF;
1602 qc->dma_dir = dma_dir;
1603 if (dma_dir != DMA_NONE) {
1604 unsigned int i, buflen = 0;
1605 struct scatterlist *sg;
1606
1607 for_each_sg(sgl, sg, n_elem, i)
1608 buflen += sg->length;
1609
1610 ata_sg_init(qc, sgl, n_elem);
1611 qc->nbytes = buflen;
1612 }
1613
1614 qc->private_data = &wait;
1615 qc->complete_fn = ata_qc_complete_internal;
1616
1617 ata_qc_issue(qc);
1618
1619 spin_unlock_irqrestore(ap->lock, flags);
1620
1621 if (!timeout) {
1622 if (ata_probe_timeout)
1623 timeout = ata_probe_timeout * 1000;
1624 else {
1625 timeout = ata_internal_cmd_timeout(dev, command);
1626 auto_timeout = 1;
1627 }
1628 }
1629
1630 if (ap->ops->error_handler)
1631 ata_eh_release(ap);
1632
1633 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1634
1635 if (ap->ops->error_handler)
1636 ata_eh_acquire(ap);
1637
1638 ata_sff_flush_pio_task(ap);
1639
1640 if (!rc) {
1641 spin_lock_irqsave(ap->lock, flags);
1642
1643
1644
1645
1646
1647
1648 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1649 qc->err_mask |= AC_ERR_TIMEOUT;
1650
1651 if (ap->ops->error_handler)
1652 ata_port_freeze(ap);
1653 else
1654 ata_qc_complete(qc);
1655
1656 if (ata_msg_warn(ap))
1657 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1658 command);
1659 }
1660
1661 spin_unlock_irqrestore(ap->lock, flags);
1662 }
1663
1664
1665 if (ap->ops->post_internal_cmd)
1666 ap->ops->post_internal_cmd(qc);
1667
1668
1669 if (qc->flags & ATA_QCFLAG_FAILED) {
1670 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1671 qc->err_mask |= AC_ERR_DEV;
1672
1673 if (!qc->err_mask)
1674 qc->err_mask |= AC_ERR_OTHER;
1675
1676 if (qc->err_mask & ~AC_ERR_OTHER)
1677 qc->err_mask &= ~AC_ERR_OTHER;
1678 }
1679
1680
1681 spin_lock_irqsave(ap->lock, flags);
1682
1683 *tf = qc->result_tf;
1684 err_mask = qc->err_mask;
1685
1686 ata_qc_free(qc);
1687 link->active_tag = preempted_tag;
1688 link->sactive = preempted_sactive;
1689 ap->qc_active = preempted_qc_active;
1690 ap->nr_active_links = preempted_nr_active_links;
1691
1692 spin_unlock_irqrestore(ap->lock, flags);
1693
1694 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1695 ata_internal_cmd_timed_out(dev, command);
1696
1697 return err_mask;
1698}
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719unsigned ata_exec_internal(struct ata_device *dev,
1720 struct ata_taskfile *tf, const u8 *cdb,
1721 int dma_dir, void *buf, unsigned int buflen,
1722 unsigned long timeout)
1723{
1724 struct scatterlist *psg = NULL, sg;
1725 unsigned int n_elem = 0;
1726
1727 if (dma_dir != DMA_NONE) {
1728 WARN_ON(!buf);
1729 sg_init_one(&sg, buf, buflen);
1730 psg = &sg;
1731 n_elem++;
1732 }
1733
1734 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1735 timeout);
1736}
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1753{
1754 struct ata_taskfile tf;
1755
1756 ata_tf_init(dev, &tf);
1757
1758 tf.command = cmd;
1759 tf.flags |= ATA_TFLAG_DEVICE;
1760 tf.protocol = ATA_PROT_NODATA;
1761
1762 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1773{
1774
1775
1776
1777
1778 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1779 return 0;
1780
1781
1782
1783 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1784 return 0;
1785
1786 if (ata_id_is_cfa(adev->id)
1787 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1788 return 0;
1789
1790 if (adev->pio_mode > XFER_PIO_2)
1791 return 1;
1792
1793 if (ata_id_has_iordy(adev->id))
1794 return 1;
1795 return 0;
1796}
1797
1798
1799
1800
1801
1802
1803
1804
1805static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1806{
1807
1808 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
1809 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1810
1811 if (pio) {
1812
1813 if (pio > 240)
1814 return 3 << ATA_SHIFT_PIO;
1815 return 7 << ATA_SHIFT_PIO;
1816 }
1817 }
1818 return 3 << ATA_SHIFT_PIO;
1819}
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831unsigned int ata_do_dev_read_id(struct ata_device *dev,
1832 struct ata_taskfile *tf, u16 *id)
1833{
1834 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1835 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1860 unsigned int flags, u16 *id)
1861{
1862 struct ata_port *ap = dev->link->ap;
1863 unsigned int class = *p_class;
1864 struct ata_taskfile tf;
1865 unsigned int err_mask = 0;
1866 const char *reason;
1867 bool is_semb = class == ATA_DEV_SEMB;
1868 int may_fallback = 1, tried_spinup = 0;
1869 int rc;
1870
1871 if (ata_msg_ctl(ap))
1872 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1873
1874retry:
1875 ata_tf_init(dev, &tf);
1876
1877 switch (class) {
1878 case ATA_DEV_SEMB:
1879 class = ATA_DEV_ATA;
1880 case ATA_DEV_ATA:
1881 tf.command = ATA_CMD_ID_ATA;
1882 break;
1883 case ATA_DEV_ATAPI:
1884 tf.command = ATA_CMD_ID_ATAPI;
1885 break;
1886 default:
1887 rc = -ENODEV;
1888 reason = "unsupported class";
1889 goto err_out;
1890 }
1891
1892 tf.protocol = ATA_PROT_PIO;
1893
1894
1895
1896
1897 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1898
1899
1900
1901
1902 tf.flags |= ATA_TFLAG_POLLING;
1903
1904 if (ap->ops->read_id)
1905 err_mask = ap->ops->read_id(dev, &tf, id);
1906 else
1907 err_mask = ata_do_dev_read_id(dev, &tf, id);
1908
1909 if (err_mask) {
1910 if (err_mask & AC_ERR_NODEV_HINT) {
1911 ata_dev_dbg(dev, "NODEV after polling detection\n");
1912 return -ENOENT;
1913 }
1914
1915 if (is_semb) {
1916 ata_dev_info(dev,
1917 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1918
1919 *p_class = ATA_DEV_SEMB_UNSUP;
1920 return 0;
1921 }
1922
1923 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1924
1925
1926
1927
1928
1929 if (may_fallback) {
1930 may_fallback = 0;
1931
1932 if (class == ATA_DEV_ATA)
1933 class = ATA_DEV_ATAPI;
1934 else
1935 class = ATA_DEV_ATA;
1936 goto retry;
1937 }
1938
1939
1940
1941
1942
1943 ata_dev_dbg(dev,
1944 "both IDENTIFYs aborted, assuming NODEV\n");
1945 return -ENOENT;
1946 }
1947
1948 rc = -EIO;
1949 reason = "I/O error";
1950 goto err_out;
1951 }
1952
1953 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1954 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1955 "class=%d may_fallback=%d tried_spinup=%d\n",
1956 class, may_fallback, tried_spinup);
1957 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1958 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1959 }
1960
1961
1962
1963
1964 may_fallback = 0;
1965
1966 swap_buf_le16(id, ATA_ID_WORDS);
1967
1968
1969 rc = -EINVAL;
1970 reason = "device reports invalid type";
1971
1972 if (class == ATA_DEV_ATA) {
1973 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1974 goto err_out;
1975 } else {
1976 if (ata_id_is_ata(id))
1977 goto err_out;
1978 }
1979
1980 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1981 tried_spinup = 1;
1982
1983
1984
1985
1986
1987 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1988 if (err_mask && id[2] != 0x738c) {
1989 rc = -EIO;
1990 reason = "SPINUP failed";
1991 goto err_out;
1992 }
1993
1994
1995
1996
1997 if (id[2] == 0x37c8)
1998 goto retry;
1999 }
2000
2001 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2014 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2015 if (err_mask) {
2016 rc = -EIO;
2017 reason = "INIT_DEV_PARAMS failed";
2018 goto err_out;
2019 }
2020
2021
2022
2023
2024 flags &= ~ATA_READID_POSTRESET;
2025 goto retry;
2026 }
2027 }
2028
2029 *p_class = class;
2030
2031 return 0;
2032
2033 err_out:
2034 if (ata_msg_warn(ap))
2035 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2036 reason, err_mask);
2037 return rc;
2038}
2039
2040static int ata_do_link_spd_horkage(struct ata_device *dev)
2041{
2042 struct ata_link *plink = ata_dev_phys_link(dev);
2043 u32 target, target_limit;
2044
2045 if (!sata_scr_valid(plink))
2046 return 0;
2047
2048 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2049 target = 1;
2050 else
2051 return 0;
2052
2053 target_limit = (1 << target) - 1;
2054
2055
2056 if (plink->sata_spd_limit <= target_limit)
2057 return 0;
2058
2059 plink->sata_spd_limit = target_limit;
2060
2061
2062
2063
2064
2065 if (plink->sata_spd > target) {
2066 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2067 sata_spd_string(target));
2068 return -EAGAIN;
2069 }
2070 return 0;
2071}
2072
2073static inline u8 ata_dev_knobble(struct ata_device *dev)
2074{
2075 struct ata_port *ap = dev->link->ap;
2076
2077 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2078 return 0;
2079
2080 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2081}
2082
2083static int ata_dev_config_ncq(struct ata_device *dev,
2084 char *desc, size_t desc_sz)
2085{
2086 struct ata_port *ap = dev->link->ap;
2087 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2088 unsigned int err_mask;
2089 char *aa_desc = "";
2090
2091 if (!ata_id_has_ncq(dev->id)) {
2092 desc[0] = '\0';
2093 return 0;
2094 }
2095 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2096 snprintf(desc, desc_sz, "NCQ (not used)");
2097 return 0;
2098 }
2099 if (ap->flags & ATA_FLAG_NCQ) {
2100 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2101 dev->flags |= ATA_DFLAG_NCQ;
2102 }
2103
2104 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2105 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2106 ata_id_has_fpdma_aa(dev->id)) {
2107 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2108 SATA_FPDMA_AA);
2109 if (err_mask) {
2110 ata_dev_err(dev,
2111 "failed to enable AA (error_mask=0x%x)\n",
2112 err_mask);
2113 if (err_mask != AC_ERR_DEV) {
2114 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2115 return -EIO;
2116 }
2117 } else
2118 aa_desc = ", AA";
2119 }
2120
2121 if (hdepth >= ddepth)
2122 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2123 else
2124 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2125 ddepth, aa_desc);
2126 return 0;
2127}
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142int ata_dev_configure(struct ata_device *dev)
2143{
2144 struct ata_port *ap = dev->link->ap;
2145 struct ata_eh_context *ehc = &dev->link->eh_context;
2146 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2147 const u16 *id = dev->id;
2148 unsigned long xfer_mask;
2149 char revbuf[7];
2150 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2151 char modelbuf[ATA_ID_PROD_LEN+1];
2152 int rc;
2153
2154 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2155 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2156 return 0;
2157 }
2158
2159 if (ata_msg_probe(ap))
2160 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2161
2162
2163 dev->horkage |= ata_dev_blacklisted(dev);
2164 ata_force_horkage(dev);
2165
2166 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2167 ata_dev_info(dev, "unsupported device, disabling\n");
2168 ata_dev_disable(dev);
2169 return 0;
2170 }
2171
2172 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2173 dev->class == ATA_DEV_ATAPI) {
2174 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2175 atapi_enabled ? "not supported with this driver"
2176 : "disabled");
2177 ata_dev_disable(dev);
2178 return 0;
2179 }
2180
2181 rc = ata_do_link_spd_horkage(dev);
2182 if (rc)
2183 return rc;
2184
2185
2186 rc = ata_acpi_on_devcfg(dev);
2187 if (rc)
2188 return rc;
2189
2190
2191 rc = ata_hpa_resize(dev);
2192 if (rc)
2193 return rc;
2194
2195
2196 if (ata_msg_probe(ap))
2197 ata_dev_dbg(dev,
2198 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2199 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2200 __func__,
2201 id[49], id[82], id[83], id[84],
2202 id[85], id[86], id[87], id[88]);
2203
2204
2205 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2206 dev->max_sectors = 0;
2207 dev->cdb_len = 0;
2208 dev->n_sectors = 0;
2209 dev->cylinders = 0;
2210 dev->heads = 0;
2211 dev->sectors = 0;
2212 dev->multi_count = 0;
2213
2214
2215
2216
2217
2218
2219 xfer_mask = ata_id_xfermask(id);
2220
2221 if (ata_msg_probe(ap))
2222 ata_dump_id(id);
2223
2224
2225 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2226 sizeof(fwrevbuf));
2227
2228 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2229 sizeof(modelbuf));
2230
2231
2232 if (dev->class == ATA_DEV_ATA) {
2233 if (ata_id_is_cfa(id)) {
2234
2235 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2236 ata_dev_warn(dev,
2237 "supports DRM functions and may not be fully accessible\n");
2238 snprintf(revbuf, 7, "CFA");
2239 } else {
2240 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2241
2242 if (ata_id_has_tpm(id))
2243 ata_dev_warn(dev,
2244 "supports DRM functions and may not be fully accessible\n");
2245 }
2246
2247 dev->n_sectors = ata_id_n_sectors(id);
2248
2249
2250 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2251 unsigned int max = dev->id[47] & 0xff;
2252 unsigned int cnt = dev->id[59] & 0xff;
2253
2254 if (is_power_of_2(max) && is_power_of_2(cnt))
2255 if (cnt <= max)
2256 dev->multi_count = cnt;
2257 }
2258
2259 if (ata_id_has_lba(id)) {
2260 const char *lba_desc;
2261 char ncq_desc[24];
2262
2263 lba_desc = "LBA";
2264 dev->flags |= ATA_DFLAG_LBA;
2265 if (ata_id_has_lba48(id)) {
2266 dev->flags |= ATA_DFLAG_LBA48;
2267 lba_desc = "LBA48";
2268
2269 if (dev->n_sectors >= (1UL << 28) &&
2270 ata_id_has_flush_ext(id))
2271 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2272 }
2273
2274
2275 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2276 if (rc)
2277 return rc;
2278
2279
2280 if (ata_msg_drv(ap) && print_info) {
2281 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2282 revbuf, modelbuf, fwrevbuf,
2283 ata_mode_string(xfer_mask));
2284 ata_dev_info(dev,
2285 "%llu sectors, multi %u: %s %s\n",
2286 (unsigned long long)dev->n_sectors,
2287 dev->multi_count, lba_desc, ncq_desc);
2288 }
2289 } else {
2290
2291
2292
2293 dev->cylinders = id[1];
2294 dev->heads = id[3];
2295 dev->sectors = id[6];
2296
2297 if (ata_id_current_chs_valid(id)) {
2298
2299 dev->cylinders = id[54];
2300 dev->heads = id[55];
2301 dev->sectors = id[56];
2302 }
2303
2304
2305 if (ata_msg_drv(ap) && print_info) {
2306 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2307 revbuf, modelbuf, fwrevbuf,
2308 ata_mode_string(xfer_mask));
2309 ata_dev_info(dev,
2310 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2311 (unsigned long long)dev->n_sectors,
2312 dev->multi_count, dev->cylinders,
2313 dev->heads, dev->sectors);
2314 }
2315 }
2316
2317 dev->cdb_len = 16;
2318 }
2319
2320
2321 else if (dev->class == ATA_DEV_ATAPI) {
2322 const char *cdb_intr_string = "";
2323 const char *atapi_an_string = "";
2324 const char *dma_dir_string = "";
2325 u32 sntf;
2326
2327 rc = atapi_cdb_len(id);
2328 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2329 if (ata_msg_warn(ap))
2330 ata_dev_warn(dev, "unsupported CDB len\n");
2331 rc = -EINVAL;
2332 goto err_out_nosup;
2333 }
2334 dev->cdb_len = (unsigned int) rc;
2335
2336
2337
2338
2339
2340
2341 if (atapi_an &&
2342 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2343 (!sata_pmp_attached(ap) ||
2344 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2345 unsigned int err_mask;
2346
2347
2348 err_mask = ata_dev_set_feature(dev,
2349 SETFEATURES_SATA_ENABLE, SATA_AN);
2350 if (err_mask)
2351 ata_dev_err(dev,
2352 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2353 err_mask);
2354 else {
2355 dev->flags |= ATA_DFLAG_AN;
2356 atapi_an_string = ", ATAPI AN";
2357 }
2358 }
2359
2360 if (ata_id_cdb_intr(dev->id)) {
2361 dev->flags |= ATA_DFLAG_CDB_INTR;
2362 cdb_intr_string = ", CDB intr";
2363 }
2364
2365 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2366 dev->flags |= ATA_DFLAG_DMADIR;
2367 dma_dir_string = ", DMADIR";
2368 }
2369
2370
2371 if (ata_msg_drv(ap) && print_info)
2372 ata_dev_info(dev,
2373 "ATAPI: %s, %s, max %s%s%s%s\n",
2374 modelbuf, fwrevbuf,
2375 ata_mode_string(xfer_mask),
2376 cdb_intr_string, atapi_an_string,
2377 dma_dir_string);
2378 }
2379
2380
2381 dev->max_sectors = ATA_MAX_SECTORS;
2382 if (dev->flags & ATA_DFLAG_LBA48)
2383 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2384
2385
2386
2387 if (ata_dev_knobble(dev)) {
2388 if (ata_msg_drv(ap) && print_info)
2389 ata_dev_info(dev, "applying bridge limits\n");
2390 dev->udma_mask &= ATA_UDMA5;
2391 dev->max_sectors = ATA_MAX_SECTORS;
2392 }
2393
2394 if ((dev->class == ATA_DEV_ATAPI) &&
2395 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2396 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2397 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2398 }
2399
2400 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2401 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2402 dev->max_sectors);
2403
2404 if (ap->ops->dev_config)
2405 ap->ops->dev_config(dev);
2406
2407 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2408
2409
2410
2411
2412
2413
2414 if (print_info) {
2415 ata_dev_warn(dev,
2416"Drive reports diagnostics failure. This may indicate a drive\n");
2417 ata_dev_warn(dev,
2418"fault or invalid emulation. Contact drive vendor for information.\n");
2419 }
2420 }
2421
2422 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2423 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2424 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2425 }
2426
2427 return 0;
2428
2429err_out_nosup:
2430 if (ata_msg_probe(ap))
2431 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2432 return rc;
2433}
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443int ata_cable_40wire(struct ata_port *ap)
2444{
2445 return ATA_CBL_PATA40;
2446}
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456int ata_cable_80wire(struct ata_port *ap)
2457{
2458 return ATA_CBL_PATA80;
2459}
2460
2461
2462
2463
2464
2465
2466
2467
2468int ata_cable_unknown(struct ata_port *ap)
2469{
2470 return ATA_CBL_PATA_UNK;
2471}
2472
2473
2474
2475
2476
2477
2478
2479
2480int ata_cable_ignore(struct ata_port *ap)
2481{
2482 return ATA_CBL_PATA_IGN;
2483}
2484
2485
2486
2487
2488
2489
2490
2491
2492int ata_cable_sata(struct ata_port *ap)
2493{
2494 return ATA_CBL_SATA;
2495}
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512int ata_bus_probe(struct ata_port *ap)
2513{
2514 unsigned int classes[ATA_MAX_DEVICES];
2515 int tries[ATA_MAX_DEVICES];
2516 int rc;
2517 struct ata_device *dev;
2518
2519 ata_for_each_dev(dev, &ap->link, ALL)
2520 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2521
2522 retry:
2523 ata_for_each_dev(dev, &ap->link, ALL) {
2524
2525
2526
2527
2528
2529
2530
2531 dev->pio_mode = XFER_PIO_0;
2532
2533
2534
2535
2536
2537
2538 if (ap->ops->set_piomode)
2539 ap->ops->set_piomode(ap, dev);
2540 }
2541
2542
2543 ap->ops->phy_reset(ap);
2544
2545 ata_for_each_dev(dev, &ap->link, ALL) {
2546 if (dev->class != ATA_DEV_UNKNOWN)
2547 classes[dev->devno] = dev->class;
2548 else
2549 classes[dev->devno] = ATA_DEV_NONE;
2550
2551 dev->class = ATA_DEV_UNKNOWN;
2552 }
2553
2554
2555
2556
2557
2558 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2559 if (tries[dev->devno])
2560 dev->class = classes[dev->devno];
2561
2562 if (!ata_dev_enabled(dev))
2563 continue;
2564
2565 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2566 dev->id);
2567 if (rc)
2568 goto fail;
2569 }
2570
2571
2572 if (ap->ops->cable_detect)
2573 ap->cbl = ap->ops->cable_detect(ap);
2574
2575
2576
2577
2578
2579
2580 ata_for_each_dev(dev, &ap->link, ENABLED)
2581 if (ata_id_is_sata(dev->id))
2582 ap->cbl = ATA_CBL_SATA;
2583
2584
2585
2586
2587 ata_for_each_dev(dev, &ap->link, ENABLED) {
2588 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2589 rc = ata_dev_configure(dev);
2590 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2591 if (rc)
2592 goto fail;
2593 }
2594
2595
2596 rc = ata_set_mode(&ap->link, &dev);
2597 if (rc)
2598 goto fail;
2599
2600 ata_for_each_dev(dev, &ap->link, ENABLED)
2601 return 0;
2602
2603 return -ENODEV;
2604
2605 fail:
2606 tries[dev->devno]--;
2607
2608 switch (rc) {
2609 case -EINVAL:
2610
2611 tries[dev->devno] = 0;
2612 break;
2613
2614 case -ENODEV:
2615
2616 tries[dev->devno] = min(tries[dev->devno], 1);
2617 case -EIO:
2618 if (tries[dev->devno] == 1) {
2619
2620
2621
2622 sata_down_spd_limit(&ap->link, 0);
2623 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2624 }
2625 }
2626
2627 if (!tries[dev->devno])
2628 ata_dev_disable(dev);
2629
2630 goto retry;
2631}
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642static void sata_print_link_status(struct ata_link *link)
2643{
2644 u32 sstatus, scontrol, tmp;
2645
2646 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2647 return;
2648 sata_scr_read(link, SCR_CONTROL, &scontrol);
2649
2650 if (ata_phys_link_online(link)) {
2651 tmp = (sstatus >> 4) & 0xf;
2652 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2653 sata_spd_string(tmp), sstatus, scontrol);
2654 } else {
2655 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2656 sstatus, scontrol);
2657 }
2658}
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668struct ata_device *ata_dev_pair(struct ata_device *adev)
2669{
2670 struct ata_link *link = adev->link;
2671 struct ata_device *pair = &link->device[1 - adev->devno];
2672 if (!ata_dev_enabled(pair))
2673 return NULL;
2674 return pair;
2675}
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2698{
2699 u32 sstatus, spd, mask;
2700 int rc, bit;
2701
2702 if (!sata_scr_valid(link))
2703 return -EOPNOTSUPP;
2704
2705
2706
2707
2708 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2709 if (rc == 0 && ata_sstatus_online(sstatus))
2710 spd = (sstatus >> 4) & 0xf;
2711 else
2712 spd = link->sata_spd;
2713
2714 mask = link->sata_spd_limit;
2715 if (mask <= 1)
2716 return -EINVAL;
2717
2718
2719 bit = fls(mask) - 1;
2720 mask &= ~(1 << bit);
2721
2722
2723
2724
2725 if (spd > 1)
2726 mask &= (1 << (spd - 1)) - 1;
2727 else
2728 mask &= 1;
2729
2730
2731 if (!mask)
2732 return -EINVAL;
2733
2734 if (spd_limit) {
2735 if (mask & ((1 << spd_limit) - 1))
2736 mask &= (1 << spd_limit) - 1;
2737 else {
2738 bit = ffs(mask) - 1;
2739 mask = 1 << bit;
2740 }
2741 }
2742
2743 link->sata_spd_limit = mask;
2744
2745 ata_link_warn(link, "limiting SATA link speed to %s\n",
2746 sata_spd_string(fls(mask)));
2747
2748 return 0;
2749}
2750
2751static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2752{
2753 struct ata_link *host_link = &link->ap->link;
2754 u32 limit, target, spd;
2755
2756 limit = link->sata_spd_limit;
2757
2758
2759
2760
2761
2762 if (!ata_is_host_link(link) && host_link->sata_spd)
2763 limit &= (1 << host_link->sata_spd) - 1;
2764
2765 if (limit == UINT_MAX)
2766 target = 0;
2767 else
2768 target = fls(limit);
2769
2770 spd = (*scontrol >> 4) & 0xf;
2771 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2772
2773 return spd != target;
2774}
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791static int sata_set_spd_needed(struct ata_link *link)
2792{
2793 u32 scontrol;
2794
2795 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2796 return 1;
2797
2798 return __sata_set_spd_needed(link, &scontrol);
2799}
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814int sata_set_spd(struct ata_link *link)
2815{
2816 u32 scontrol;
2817 int rc;
2818
2819 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2820 return rc;
2821
2822 if (!__sata_set_spd_needed(link, &scontrol))
2823 return 0;
2824
2825 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2826 return rc;
2827
2828 return 1;
2829}
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843static const struct ata_timing ata_timing[] = {
2844
2845 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2846 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2847 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2848 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2849 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2850 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2851 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2852
2853 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2854 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2855 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2856
2857 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2858 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2859 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2860 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2861 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2862
2863
2864 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2865 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2866 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2867 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2868 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2869 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2870 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
2871
2872 { 0xFF }
2873};
2874
2875#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2876#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2877
2878static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2879{
2880 q->setup = EZ(t->setup * 1000, T);
2881 q->act8b = EZ(t->act8b * 1000, T);
2882 q->rec8b = EZ(t->rec8b * 1000, T);
2883 q->cyc8b = EZ(t->cyc8b * 1000, T);
2884 q->active = EZ(t->active * 1000, T);
2885 q->recover = EZ(t->recover * 1000, T);
2886 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2887 q->cycle = EZ(t->cycle * 1000, T);
2888 q->udma = EZ(t->udma * 1000, UT);
2889}
2890
2891void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2892 struct ata_timing *m, unsigned int what)
2893{
2894 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2895 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2896 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2897 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2898 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2899 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2900 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2901 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2902 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2903}
2904
2905const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2906{
2907 const struct ata_timing *t = ata_timing;
2908
2909 while (xfer_mode > t->mode)
2910 t++;
2911
2912 if (xfer_mode == t->mode)
2913 return t;
2914 return NULL;
2915}
2916
2917int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2918 struct ata_timing *t, int T, int UT)
2919{
2920 const u16 *id = adev->id;
2921 const struct ata_timing *s;
2922 struct ata_timing p;
2923
2924
2925
2926
2927
2928 if (!(s = ata_timing_find_mode(speed)))
2929 return -EINVAL;
2930
2931 memcpy(t, s, sizeof(*s));
2932
2933
2934
2935
2936
2937
2938 if (id[ATA_ID_FIELD_VALID] & 2) {
2939 memset(&p, 0, sizeof(p));
2940
2941 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2942 if (speed <= XFER_PIO_2)
2943 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
2944 else if ((speed <= XFER_PIO_4) ||
2945 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
2946 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
2947 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
2948 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
2949
2950 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2951 }
2952
2953
2954
2955
2956
2957 ata_timing_quantize(t, t, T, UT);
2958
2959
2960
2961
2962
2963
2964
2965 if (speed > XFER_PIO_6) {
2966 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2967 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2968 }
2969
2970
2971
2972
2973
2974 if (t->act8b + t->rec8b < t->cyc8b) {
2975 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2976 t->rec8b = t->cyc8b - t->act8b;
2977 }
2978
2979 if (t->active + t->recover < t->cycle) {
2980 t->active += (t->cycle - (t->active + t->recover)) / 2;
2981 t->recover = t->cycle - t->active;
2982 }
2983
2984
2985
2986
2987 if (t->active + t->recover > t->cycle)
2988 t->cycle = t->active + t->recover;
2989
2990 return 0;
2991}
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3010{
3011 u8 base_mode = 0xff, last_mode = 0xff;
3012 const struct ata_xfer_ent *ent;
3013 const struct ata_timing *t;
3014
3015 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3016 if (ent->shift == xfer_shift)
3017 base_mode = ent->base;
3018
3019 for (t = ata_timing_find_mode(base_mode);
3020 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3021 unsigned short this_cycle;
3022
3023 switch (xfer_shift) {
3024 case ATA_SHIFT_PIO:
3025 case ATA_SHIFT_MWDMA:
3026 this_cycle = t->cycle;
3027 break;
3028 case ATA_SHIFT_UDMA:
3029 this_cycle = t->udma;
3030 break;
3031 default:
3032 return 0xff;
3033 }
3034
3035 if (cycle > this_cycle)
3036 break;
3037
3038 last_mode = t->mode;
3039 }
3040
3041 return last_mode;
3042}
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3060{
3061 char buf[32];
3062 unsigned long orig_mask, xfer_mask;
3063 unsigned long pio_mask, mwdma_mask, udma_mask;
3064 int quiet, highbit;
3065
3066 quiet = !!(sel & ATA_DNXFER_QUIET);
3067 sel &= ~ATA_DNXFER_QUIET;
3068
3069 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3070 dev->mwdma_mask,
3071 dev->udma_mask);
3072 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3073
3074 switch (sel) {
3075 case ATA_DNXFER_PIO:
3076 highbit = fls(pio_mask) - 1;
3077 pio_mask &= ~(1 << highbit);
3078 break;
3079
3080 case ATA_DNXFER_DMA:
3081 if (udma_mask) {
3082 highbit = fls(udma_mask) - 1;
3083 udma_mask &= ~(1 << highbit);
3084 if (!udma_mask)
3085 return -ENOENT;
3086 } else if (mwdma_mask) {
3087 highbit = fls(mwdma_mask) - 1;
3088 mwdma_mask &= ~(1 << highbit);
3089 if (!mwdma_mask)
3090 return -ENOENT;
3091 }
3092 break;
3093
3094 case ATA_DNXFER_40C:
3095 udma_mask &= ATA_UDMA_MASK_40C;
3096 break;
3097
3098 case ATA_DNXFER_FORCE_PIO0:
3099 pio_mask &= 1;
3100 case ATA_DNXFER_FORCE_PIO:
3101 mwdma_mask = 0;
3102 udma_mask = 0;
3103 break;
3104
3105 default:
3106 BUG();
3107 }
3108
3109 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3110
3111 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3112 return -ENOENT;
3113
3114 if (!quiet) {
3115 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3116 snprintf(buf, sizeof(buf), "%s:%s",
3117 ata_mode_string(xfer_mask),
3118 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3119 else
3120 snprintf(buf, sizeof(buf), "%s",
3121 ata_mode_string(xfer_mask));
3122
3123 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3124 }
3125
3126 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3127 &dev->udma_mask);
3128
3129 return 0;
3130}
3131
3132static int ata_dev_set_mode(struct ata_device *dev)
3133{
3134 struct ata_port *ap = dev->link->ap;
3135 struct ata_eh_context *ehc = &dev->link->eh_context;
3136 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3137 const char *dev_err_whine = "";
3138 int ign_dev_err = 0;
3139 unsigned int err_mask = 0;
3140 int rc;
3141
3142 dev->flags &= ~ATA_DFLAG_PIO;
3143 if (dev->xfer_shift == ATA_SHIFT_PIO)
3144 dev->flags |= ATA_DFLAG_PIO;
3145
3146 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3147 dev_err_whine = " (SET_XFERMODE skipped)";
3148 else {
3149 if (nosetxfer)
3150 ata_dev_warn(dev,
3151 "NOSETXFER but PATA detected - can't "
3152 "skip SETXFER, might malfunction\n");
3153 err_mask = ata_dev_set_xfermode(dev);
3154 }
3155
3156 if (err_mask & ~AC_ERR_DEV)
3157 goto fail;
3158
3159
3160 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3161 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3162 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3163 if (rc)
3164 return rc;
3165
3166 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3167
3168 if (ata_id_is_cfa(dev->id))
3169 ign_dev_err = 1;
3170
3171
3172 if (ata_id_major_version(dev->id) == 0 &&
3173 dev->pio_mode <= XFER_PIO_2)
3174 ign_dev_err = 1;
3175
3176
3177
3178 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3179 ign_dev_err = 1;
3180 }
3181
3182
3183 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3184 dev->dma_mode == XFER_MW_DMA_0 &&
3185 (dev->id[63] >> 8) & 1)
3186 ign_dev_err = 1;
3187
3188
3189 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3190 ign_dev_err = 1;
3191
3192 if (err_mask & AC_ERR_DEV) {
3193 if (!ign_dev_err)
3194 goto fail;
3195 else
3196 dev_err_whine = " (device error ignored)";
3197 }
3198
3199 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3200 dev->xfer_shift, (int)dev->xfer_mode);
3201
3202 ata_dev_info(dev, "configured for %s%s\n",
3203 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3204 dev_err_whine);
3205
3206 return 0;
3207
3208 fail:
3209 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3210 return -EIO;
3211}
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3231{
3232 struct ata_port *ap = link->ap;
3233 struct ata_device *dev;
3234 int rc = 0, used_dma = 0, found = 0;
3235
3236
3237 ata_for_each_dev(dev, link, ENABLED) {
3238 unsigned long pio_mask, dma_mask;
3239 unsigned int mode_mask;
3240
3241 mode_mask = ATA_DMA_MASK_ATA;
3242 if (dev->class == ATA_DEV_ATAPI)
3243 mode_mask = ATA_DMA_MASK_ATAPI;
3244 else if (ata_id_is_cfa(dev->id))
3245 mode_mask = ATA_DMA_MASK_CFA;
3246
3247 ata_dev_xfermask(dev);
3248 ata_force_xfermask(dev);
3249
3250 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3251 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3252
3253 if (libata_dma_mask & mode_mask)
3254 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3255 else
3256 dma_mask = 0;
3257
3258 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3259 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3260
3261 found = 1;
3262 if (ata_dma_enabled(dev))
3263 used_dma = 1;
3264 }
3265 if (!found)
3266 goto out;
3267
3268
3269 ata_for_each_dev(dev, link, ENABLED) {
3270 if (dev->pio_mode == 0xff) {
3271 ata_dev_warn(dev, "no PIO support\n");
3272 rc = -EINVAL;
3273 goto out;
3274 }
3275
3276 dev->xfer_mode = dev->pio_mode;
3277 dev->xfer_shift = ATA_SHIFT_PIO;
3278 if (ap->ops->set_piomode)
3279 ap->ops->set_piomode(ap, dev);
3280 }
3281
3282
3283 ata_for_each_dev(dev, link, ENABLED) {
3284 if (!ata_dma_enabled(dev))
3285 continue;
3286
3287 dev->xfer_mode = dev->dma_mode;
3288 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3289 if (ap->ops->set_dmamode)
3290 ap->ops->set_dmamode(ap, dev);
3291 }
3292
3293
3294 ata_for_each_dev(dev, link, ENABLED) {
3295 rc = ata_dev_set_mode(dev);
3296 if (rc)
3297 goto out;
3298 }
3299
3300
3301
3302
3303 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3304 ap->host->simplex_claimed = ap;
3305
3306 out:
3307 if (rc)
3308 *r_failed_dev = dev;
3309 return rc;
3310}
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3333 int (*check_ready)(struct ata_link *link))
3334{
3335 unsigned long start = jiffies;
3336 unsigned long nodev_deadline;
3337 int warned = 0;
3338
3339
3340 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3341 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3342 else
3343 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3344
3345
3346
3347
3348
3349 WARN_ON(link == link->ap->slave_link);
3350
3351 if (time_after(nodev_deadline, deadline))
3352 nodev_deadline = deadline;
3353
3354 while (1) {
3355 unsigned long now = jiffies;
3356 int ready, tmp;
3357
3358 ready = tmp = check_ready(link);
3359 if (ready > 0)
3360 return 0;
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373 if (ready == -ENODEV) {
3374 if (ata_link_online(link))
3375 ready = 0;
3376 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3377 !ata_link_offline(link) &&
3378 time_before(now, nodev_deadline))
3379 ready = 0;
3380 }
3381
3382 if (ready)
3383 return ready;
3384 if (time_after(now, deadline))
3385 return -EBUSY;
3386
3387 if (!warned && time_after(now, start + 5 * HZ) &&
3388 (deadline - now > 3 * HZ)) {
3389 ata_link_warn(link,
3390 "link is slow to respond, please be patient "
3391 "(ready=%d)\n", tmp);
3392 warned = 1;
3393 }
3394
3395 ata_msleep(link->ap, 50);
3396 }
3397}
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3414 int (*check_ready)(struct ata_link *link))
3415{
3416 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3417
3418 return ata_wait_ready(link, deadline, check_ready);
3419}
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3444 unsigned long deadline)
3445{
3446 unsigned long interval = params[0];
3447 unsigned long duration = params[1];
3448 unsigned long last_jiffies, t;
3449 u32 last, cur;
3450 int rc;
3451
3452 t = ata_deadline(jiffies, params[2]);
3453 if (time_before(t, deadline))
3454 deadline = t;
3455
3456 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3457 return rc;
3458 cur &= 0xf;
3459
3460 last = cur;
3461 last_jiffies = jiffies;
3462
3463 while (1) {
3464 ata_msleep(link->ap, interval);
3465 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3466 return rc;
3467 cur &= 0xf;
3468
3469
3470 if (cur == last) {
3471 if (cur == 1 && time_before(jiffies, deadline))
3472 continue;
3473 if (time_after(jiffies,
3474 ata_deadline(last_jiffies, duration)))
3475 return 0;
3476 continue;
3477 }
3478
3479
3480 last = cur;
3481 last_jiffies = jiffies;
3482
3483
3484
3485
3486 if (time_after(jiffies, deadline))
3487 return -EPIPE;
3488 }
3489}
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505int sata_link_resume(struct ata_link *link, const unsigned long *params,
3506 unsigned long deadline)
3507{
3508 int tries = ATA_LINK_RESUME_TRIES;
3509 u32 scontrol, serror;
3510 int rc;
3511
3512 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3513 return rc;
3514
3515
3516
3517
3518
3519
3520 do {
3521 scontrol = (scontrol & 0x0f0) | 0x300;
3522 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3523 return rc;
3524
3525
3526
3527
3528
3529 ata_msleep(link->ap, 200);
3530
3531
3532 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3533 return rc;
3534 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3535
3536 if ((scontrol & 0xf0f) != 0x300) {
3537 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3538 scontrol);
3539 return 0;
3540 }
3541
3542 if (tries < ATA_LINK_RESUME_TRIES)
3543 ata_link_warn(link, "link resume succeeded after %d retries\n",
3544 ATA_LINK_RESUME_TRIES - tries);
3545
3546 if ((rc = sata_link_debounce(link, params, deadline)))
3547 return rc;
3548
3549
3550 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3551 rc = sata_scr_write(link, SCR_ERROR, serror);
3552
3553 return rc != -EINVAL ? rc : 0;
3554}
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3575 bool spm_wakeup)
3576{
3577 struct ata_eh_context *ehc = &link->eh_context;
3578 bool woken_up = false;
3579 u32 scontrol;
3580 int rc;
3581
3582 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3583 if (rc)
3584 return rc;
3585
3586 switch (policy) {
3587 case ATA_LPM_MAX_POWER:
3588
3589 scontrol |= (0x3 << 8);
3590
3591 if (spm_wakeup) {
3592 scontrol |= (0x4 << 12);
3593 woken_up = true;
3594 }
3595 break;
3596 case ATA_LPM_MED_POWER:
3597
3598 scontrol &= ~(0x1 << 8);
3599 scontrol |= (0x2 << 8);
3600 break;
3601 case ATA_LPM_MIN_POWER:
3602 if (ata_link_nr_enabled(link) > 0)
3603
3604 scontrol &= ~(0x3 << 8);
3605 else {
3606
3607 scontrol &= ~0xf;
3608 scontrol |= (0x1 << 2);
3609 }
3610 break;
3611 default:
3612 WARN_ON(1);
3613 }
3614
3615 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3616 if (rc)
3617 return rc;
3618
3619
3620 if (woken_up)
3621 msleep(10);
3622
3623
3624 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3625 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3626}
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3646{
3647 struct ata_port *ap = link->ap;
3648 struct ata_eh_context *ehc = &link->eh_context;
3649 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3650 int rc;
3651
3652
3653 if (ehc->i.action & ATA_EH_HARDRESET)
3654 return 0;
3655
3656
3657 if (ap->flags & ATA_FLAG_SATA) {
3658 rc = sata_link_resume(link, timing, deadline);
3659
3660 if (rc && rc != -EOPNOTSUPP)
3661 ata_link_warn(link,
3662 "failed to resume link for reset (errno=%d)\n",
3663 rc);
3664 }
3665
3666
3667 if (ata_phys_link_offline(link))
3668 ehc->i.action &= ~ATA_EH_SOFTRESET;
3669
3670 return 0;
3671}
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3698 unsigned long deadline,
3699 bool *online, int (*check_ready)(struct ata_link *))
3700{
3701 u32 scontrol;
3702 int rc;
3703
3704 DPRINTK("ENTER\n");
3705
3706 if (online)
3707 *online = false;
3708
3709 if (sata_set_spd_needed(link)) {
3710
3711
3712
3713
3714
3715 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3716 goto out;
3717
3718 scontrol = (scontrol & 0x0f0) | 0x304;
3719
3720 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3721 goto out;
3722
3723 sata_set_spd(link);
3724 }
3725
3726
3727 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3728 goto out;
3729
3730 scontrol = (scontrol & 0x0f0) | 0x301;
3731
3732 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3733 goto out;
3734
3735
3736
3737
3738 ata_msleep(link->ap, 1);
3739
3740
3741 rc = sata_link_resume(link, timing, deadline);
3742 if (rc)
3743 goto out;
3744
3745 if (ata_phys_link_offline(link))
3746 goto out;
3747
3748
3749 if (online)
3750 *online = true;
3751
3752 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3753
3754
3755
3756
3757
3758 if (check_ready) {
3759 unsigned long pmp_deadline;
3760
3761 pmp_deadline = ata_deadline(jiffies,
3762 ATA_TMOUT_PMP_SRST_WAIT);
3763 if (time_after(pmp_deadline, deadline))
3764 pmp_deadline = deadline;
3765 ata_wait_ready(link, pmp_deadline, check_ready);
3766 }
3767 rc = -EAGAIN;
3768 goto out;
3769 }
3770
3771 rc = 0;
3772 if (check_ready)
3773 rc = ata_wait_ready(link, deadline, check_ready);
3774 out:
3775 if (rc && rc != -EAGAIN) {
3776
3777 if (online)
3778 *online = false;
3779 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3780 }
3781 DPRINTK("EXIT, rc=%d\n", rc);
3782 return rc;
3783}
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3800 unsigned long deadline)
3801{
3802 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3803 bool online;
3804 int rc;
3805
3806
3807 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3808 return online ? -EAGAIN : rc;
3809}
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3824{
3825 u32 serror;
3826
3827 DPRINTK("ENTER\n");
3828
3829
3830 if (!sata_scr_read(link, SCR_ERROR, &serror))
3831 sata_scr_write(link, SCR_ERROR, serror);
3832
3833
3834 sata_print_link_status(link);
3835
3836 DPRINTK("EXIT\n");
3837}
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3856 const u16 *new_id)
3857{
3858 const u16 *old_id = dev->id;
3859 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3860 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3861
3862 if (dev->class != new_class) {
3863 ata_dev_info(dev, "class mismatch %d != %d\n",
3864 dev->class, new_class);
3865 return 0;
3866 }
3867
3868 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3869 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3870 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3871 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3872
3873 if (strcmp(model[0], model[1])) {
3874 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3875 model[0], model[1]);
3876 return 0;
3877 }
3878
3879 if (strcmp(serial[0], serial[1])) {
3880 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3881 serial[0], serial[1]);
3882 return 0;
3883 }
3884
3885 return 1;
3886}
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3903{
3904 unsigned int class = dev->class;
3905 u16 *id = (void *)dev->link->ap->sector_buf;
3906 int rc;
3907
3908
3909 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3910 if (rc)
3911 return rc;
3912
3913
3914 if (!ata_dev_same_device(dev, class, id))
3915 return -ENODEV;
3916
3917 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3918 return 0;
3919}
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3937 unsigned int readid_flags)
3938{
3939 u64 n_sectors = dev->n_sectors;
3940 u64 n_native_sectors = dev->n_native_sectors;
3941 int rc;
3942
3943 if (!ata_dev_enabled(dev))
3944 return -ENODEV;
3945
3946
3947 if (ata_class_enabled(new_class) &&
3948 new_class != ATA_DEV_ATA &&
3949 new_class != ATA_DEV_ATAPI &&
3950 new_class != ATA_DEV_SEMB) {
3951 ata_dev_info(dev, "class mismatch %u != %u\n",
3952 dev->class, new_class);
3953 rc = -ENODEV;
3954 goto fail;
3955 }
3956
3957
3958 rc = ata_dev_reread_id(dev, readid_flags);
3959 if (rc)
3960 goto fail;
3961
3962
3963 rc = ata_dev_configure(dev);
3964 if (rc)
3965 goto fail;
3966
3967
3968 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3969 dev->n_sectors == n_sectors)
3970 return 0;
3971
3972
3973 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3974 (unsigned long long)n_sectors,
3975 (unsigned long long)dev->n_sectors);
3976
3977
3978
3979
3980
3981
3982 if (dev->n_native_sectors == n_native_sectors &&
3983 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3984 ata_dev_warn(dev,
3985 "new n_sectors matches native, probably "
3986 "late HPA unlock, n_sectors updated\n");
3987
3988 return 0;
3989 }
3990
3991
3992
3993
3994
3995
3996
3997 if (dev->n_native_sectors == n_native_sectors &&
3998 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3999 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4000 ata_dev_warn(dev,
4001 "old n_sectors matches native, probably "
4002 "late HPA lock, will try to unlock HPA\n");
4003
4004 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4005 rc = -EIO;
4006 } else
4007 rc = -ENODEV;
4008
4009
4010 dev->n_native_sectors = n_native_sectors;
4011 dev->n_sectors = n_sectors;
4012 fail:
4013 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4014 return rc;
4015}
4016
4017struct ata_blacklist_entry {
4018 const char *model_num;
4019 const char *model_rev;
4020 unsigned long horkage;
4021};
4022
4023static const struct ata_blacklist_entry ata_device_blacklist [] = {
4024
4025 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4026 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4027 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4028 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4029 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4030 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4031 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4032 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4033 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4034 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4035 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4036 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4037 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4038 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4039 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4040 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4041 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4042 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4043 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4044 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4045 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4046 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4047 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4048 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4049 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4050 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4051 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4052 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4053
4054 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4055
4056
4057 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4058 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4059
4060
4061
4062
4063
4064 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4065 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4066
4067 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4068
4069 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4070 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4071 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4072 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4073 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4074
4075
4076 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4077 ATA_HORKAGE_FIRMWARE_WARN },
4078
4079 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4080 ATA_HORKAGE_FIRMWARE_WARN },
4081
4082 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4083 ATA_HORKAGE_FIRMWARE_WARN },
4084
4085 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4086 ATA_HORKAGE_FIRMWARE_WARN },
4087
4088
4089
4090 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4091 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4092 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4093
4094
4095 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4096
4097
4098 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4099 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4100 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4101 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4102
4103
4104 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4105
4106
4107 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4108 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4109 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4110
4111
4112 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4113
4114 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4115
4116
4117 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4118
4119
4120 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4121
4122
4123
4124
4125
4126 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4127 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4128 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4129
4130
4131 { }
4132};
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161static int glob_match (const char *text, const char *pattern)
4162{
4163 do {
4164
4165 if (*text == *pattern || *pattern == '?') {
4166 if (!*pattern++)
4167 return 0;
4168 } else {
4169
4170 if (!*text || *pattern != '[')
4171 break;
4172 while (*++pattern && *pattern != ']' && *text != *pattern) {
4173 if (*pattern == '-' && *(pattern - 1) != '[')
4174 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4175 ++pattern;
4176 break;
4177 }
4178 }
4179 if (!*pattern || *pattern == ']')
4180 return 1;
4181 while (*pattern && *pattern++ != ']');
4182 }
4183 } while (*++text && *pattern);
4184
4185
4186 if (*pattern == '*') {
4187 if (!*++pattern)
4188 return 0;
4189
4190 while (*text) {
4191 if (glob_match(text, pattern) == 0)
4192 return 0;
4193 ++text;
4194 }
4195 }
4196 if (!*text && !*pattern)
4197 return 0;
4198 return 1;
4199}
4200
4201static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4202{
4203 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4204 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4205 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4206
4207 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4208 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4209
4210 while (ad->model_num) {
4211 if (!glob_match(model_num, ad->model_num)) {
4212 if (ad->model_rev == NULL)
4213 return ad->horkage;
4214 if (!glob_match(model_rev, ad->model_rev))
4215 return ad->horkage;
4216 }
4217 ad++;
4218 }
4219 return 0;
4220}
4221
4222static int ata_dma_blacklisted(const struct ata_device *dev)
4223{
4224
4225
4226
4227
4228 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4229 (dev->flags & ATA_DFLAG_CDB_INTR))
4230 return 1;
4231 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4232}
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242static int ata_is_40wire(struct ata_device *dev)
4243{
4244 if (dev->horkage & ATA_HORKAGE_IVB)
4245 return ata_drive_40wire_relaxed(dev->id);
4246 return ata_drive_40wire(dev->id);
4247}
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262static int cable_is_40wire(struct ata_port *ap)
4263{
4264 struct ata_link *link;
4265 struct ata_device *dev;
4266
4267
4268 if (ap->cbl == ATA_CBL_PATA40)
4269 return 1;
4270
4271
4272 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4273 return 0;
4274
4275
4276
4277
4278
4279 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4280 return 0;
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291 ata_for_each_link(link, ap, EDGE) {
4292 ata_for_each_dev(dev, link, ENABLED) {
4293 if (!ata_is_40wire(dev))
4294 return 0;
4295 }
4296 }
4297 return 1;
4298}
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312static void ata_dev_xfermask(struct ata_device *dev)
4313{
4314 struct ata_link *link = dev->link;
4315 struct ata_port *ap = link->ap;
4316 struct ata_host *host = ap->host;
4317 unsigned long xfer_mask;
4318
4319
4320 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4321 ap->mwdma_mask, ap->udma_mask);
4322
4323
4324 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4325 dev->mwdma_mask, dev->udma_mask);
4326 xfer_mask &= ata_id_xfermask(dev->id);
4327
4328
4329
4330
4331
4332 if (ata_dev_pair(dev)) {
4333
4334 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4335
4336 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4337 }
4338
4339 if (ata_dma_blacklisted(dev)) {
4340 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4341 ata_dev_warn(dev,
4342 "device is on DMA blacklist, disabling DMA\n");
4343 }
4344
4345 if ((host->flags & ATA_HOST_SIMPLEX) &&
4346 host->simplex_claimed && host->simplex_claimed != ap) {
4347 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4348 ata_dev_warn(dev,
4349 "simplex DMA is claimed by other device, disabling DMA\n");
4350 }
4351
4352 if (ap->flags & ATA_FLAG_NO_IORDY)
4353 xfer_mask &= ata_pio_mask_no_iordy(dev);
4354
4355 if (ap->ops->mode_filter)
4356 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4367
4368 if (cable_is_40wire(ap)) {
4369 ata_dev_warn(dev,
4370 "limited to UDMA/33 due to 40-wire cable\n");
4371 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4372 }
4373
4374 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4375 &dev->mwdma_mask, &dev->udma_mask);
4376}
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4393{
4394 struct ata_taskfile tf;
4395 unsigned int err_mask;
4396
4397
4398 DPRINTK("set features - xfer mode\n");
4399
4400
4401
4402
4403 ata_tf_init(dev, &tf);
4404 tf.command = ATA_CMD_SET_FEATURES;
4405 tf.feature = SETFEATURES_XFER;
4406 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4407 tf.protocol = ATA_PROT_NODATA;
4408
4409 if (ata_pio_need_iordy(dev))
4410 tf.nsect = dev->xfer_mode;
4411
4412 else if (ata_id_has_iordy(dev->id))
4413 tf.nsect = 0x01;
4414 else
4415 return 0;
4416
4417 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4418
4419 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4420 return err_mask;
4421}
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4439{
4440 struct ata_taskfile tf;
4441 unsigned int err_mask;
4442
4443
4444 DPRINTK("set features - SATA features\n");
4445
4446 ata_tf_init(dev, &tf);
4447 tf.command = ATA_CMD_SET_FEATURES;
4448 tf.feature = enable;
4449 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4450 tf.protocol = ATA_PROT_NODATA;
4451 tf.nsect = feature;
4452
4453 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4454
4455 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4456 return err_mask;
4457}
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471static unsigned int ata_dev_init_params(struct ata_device *dev,
4472 u16 heads, u16 sectors)
4473{
4474 struct ata_taskfile tf;
4475 unsigned int err_mask;
4476
4477
4478 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4479 return AC_ERR_INVALID;
4480
4481
4482 DPRINTK("init dev params \n");
4483
4484 ata_tf_init(dev, &tf);
4485 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4486 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4487 tf.protocol = ATA_PROT_NODATA;
4488 tf.nsect = sectors;
4489 tf.device |= (heads - 1) & 0x0f;
4490
4491 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4492
4493
4494
4495 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4496 err_mask = 0;
4497
4498 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4499 return err_mask;
4500}
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511void ata_sg_clean(struct ata_queued_cmd *qc)
4512{
4513 struct ata_port *ap = qc->ap;
4514 struct scatterlist *sg = qc->sg;
4515 int dir = qc->dma_dir;
4516
4517 WARN_ON_ONCE(sg == NULL);
4518
4519 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4520
4521 if (qc->n_elem)
4522 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4523
4524 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4525 qc->sg = NULL;
4526}
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542int atapi_check_dma(struct ata_queued_cmd *qc)
4543{
4544 struct ata_port *ap = qc->ap;
4545
4546
4547
4548
4549 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4550 unlikely(qc->nbytes & 15))
4551 return 1;
4552
4553 if (ap->ops->check_atapi_dma)
4554 return ap->ops->check_atapi_dma(qc);
4555
4556 return 0;
4557}
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574int ata_std_qc_defer(struct ata_queued_cmd *qc)
4575{
4576 struct ata_link *link = qc->dev->link;
4577
4578 if (qc->tf.protocol == ATA_PROT_NCQ) {
4579 if (!ata_tag_valid(link->active_tag))
4580 return 0;
4581 } else {
4582 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4583 return 0;
4584 }
4585
4586 return ATA_DEFER_LINK;
4587}
4588
4589void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4605 unsigned int n_elem)
4606{
4607 qc->sg = sg;
4608 qc->n_elem = n_elem;
4609 qc->cursg = qc->sg;
4610}
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625static int ata_sg_setup(struct ata_queued_cmd *qc)
4626{
4627 struct ata_port *ap = qc->ap;
4628 unsigned int n_elem;
4629
4630 VPRINTK("ENTER, ata%u\n", ap->print_id);
4631
4632 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4633 if (n_elem < 1)
4634 return -1;
4635
4636 DPRINTK("%d sg elements mapped\n", n_elem);
4637 qc->orig_n_elem = qc->n_elem;
4638 qc->n_elem = n_elem;
4639 qc->flags |= ATA_QCFLAG_DMAMAP;
4640
4641 return 0;
4642}
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656void swap_buf_le16(u16 *buf, unsigned int buf_words)
4657{
4658#ifdef __BIG_ENDIAN
4659 unsigned int i;
4660
4661 for (i = 0; i < buf_words; i++)
4662 buf[i] = le16_to_cpu(buf[i]);
4663#endif
4664}
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4675{
4676 struct ata_queued_cmd *qc = NULL;
4677 unsigned int i;
4678
4679
4680 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4681 return NULL;
4682
4683
4684 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4685 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4686 qc = __ata_qc_from_tag(ap, i);
4687 break;
4688 }
4689
4690 if (qc)
4691 qc->tag = i;
4692
4693 return qc;
4694}
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4705{
4706 struct ata_port *ap = dev->link->ap;
4707 struct ata_queued_cmd *qc;
4708
4709 qc = ata_qc_new(ap);
4710 if (qc) {
4711 qc->scsicmd = NULL;
4712 qc->ap = ap;
4713 qc->dev = dev;
4714
4715 ata_qc_reinit(qc);
4716 }
4717
4718 return qc;
4719}
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731void ata_qc_free(struct ata_queued_cmd *qc)
4732{
4733 struct ata_port *ap;
4734 unsigned int tag;
4735
4736 WARN_ON_ONCE(qc == NULL);
4737 ap = qc->ap;
4738
4739 qc->flags = 0;
4740 tag = qc->tag;
4741 if (likely(ata_tag_valid(tag))) {
4742 qc->tag = ATA_TAG_POISON;
4743 clear_bit(tag, &ap->qc_allocated);
4744 }
4745}
4746
4747void __ata_qc_complete(struct ata_queued_cmd *qc)
4748{
4749 struct ata_port *ap;
4750 struct ata_link *link;
4751
4752 WARN_ON_ONCE(qc == NULL);
4753 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4754 ap = qc->ap;
4755 link = qc->dev->link;
4756
4757 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4758 ata_sg_clean(qc);
4759
4760
4761 if (qc->tf.protocol == ATA_PROT_NCQ) {
4762 link->sactive &= ~(1 << qc->tag);
4763 if (!link->sactive)
4764 ap->nr_active_links--;
4765 } else {
4766 link->active_tag = ATA_TAG_POISON;
4767 ap->nr_active_links--;
4768 }
4769
4770
4771 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4772 ap->excl_link == link))
4773 ap->excl_link = NULL;
4774
4775
4776
4777
4778
4779 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4780 ap->qc_active &= ~(1 << qc->tag);
4781
4782
4783 qc->complete_fn(qc);
4784}
4785
4786static void fill_result_tf(struct ata_queued_cmd *qc)
4787{
4788 struct ata_port *ap = qc->ap;
4789
4790 qc->result_tf.flags = qc->tf.flags;
4791 ap->ops->qc_fill_rtf(qc);
4792}
4793
4794static void ata_verify_xfer(struct ata_queued_cmd *qc)
4795{
4796 struct ata_device *dev = qc->dev;
4797
4798 if (ata_is_nodata(qc->tf.protocol))
4799 return;
4800
4801 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4802 return;
4803
4804 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4805}
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822void ata_qc_complete(struct ata_queued_cmd *qc)
4823{
4824 struct ata_port *ap = qc->ap;
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839 if (ap->ops->error_handler) {
4840 struct ata_device *dev = qc->dev;
4841 struct ata_eh_info *ehi = &dev->link->eh_info;
4842
4843 if (unlikely(qc->err_mask))
4844 qc->flags |= ATA_QCFLAG_FAILED;
4845
4846
4847
4848
4849
4850 if (unlikely(ata_tag_internal(qc->tag))) {
4851 fill_result_tf(qc);
4852 __ata_qc_complete(qc);
4853 return;
4854 }
4855
4856
4857
4858
4859
4860 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4861 fill_result_tf(qc);
4862 ata_qc_schedule_eh(qc);
4863 return;
4864 }
4865
4866 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4867
4868
4869 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4870 fill_result_tf(qc);
4871
4872
4873
4874
4875 switch (qc->tf.command) {
4876 case ATA_CMD_SET_FEATURES:
4877 if (qc->tf.feature != SETFEATURES_WC_ON &&
4878 qc->tf.feature != SETFEATURES_WC_OFF)
4879 break;
4880
4881 case ATA_CMD_INIT_DEV_PARAMS:
4882 case ATA_CMD_SET_MULTI:
4883
4884 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4885 ata_port_schedule_eh(ap);
4886 break;
4887
4888 case ATA_CMD_SLEEP:
4889 dev->flags |= ATA_DFLAG_SLEEPING;
4890 break;
4891 }
4892
4893 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4894 ata_verify_xfer(qc);
4895
4896 __ata_qc_complete(qc);
4897 } else {
4898 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4899 return;
4900
4901
4902 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4903 fill_result_tf(qc);
4904
4905 __ata_qc_complete(qc);
4906 }
4907}
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4930{
4931 int nr_done = 0;
4932 u32 done_mask;
4933
4934 done_mask = ap->qc_active ^ qc_active;
4935
4936 if (unlikely(done_mask & qc_active)) {
4937 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
4938 ap->qc_active, qc_active);
4939 return -EINVAL;
4940 }
4941
4942 while (done_mask) {
4943 struct ata_queued_cmd *qc;
4944 unsigned int tag = __ffs(done_mask);
4945
4946 qc = ata_qc_from_tag(ap, tag);
4947 if (qc) {
4948 ata_qc_complete(qc);
4949 nr_done++;
4950 }
4951 done_mask &= ~(1 << tag);
4952 }
4953
4954 return nr_done;
4955}
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969void ata_qc_issue(struct ata_queued_cmd *qc)
4970{
4971 struct ata_port *ap = qc->ap;
4972 struct ata_link *link = qc->dev->link;
4973 u8 prot = qc->tf.protocol;
4974
4975
4976
4977
4978
4979 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4980
4981 if (ata_is_ncq(prot)) {
4982 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
4983
4984 if (!link->sactive)
4985 ap->nr_active_links++;
4986 link->sactive |= 1 << qc->tag;
4987 } else {
4988 WARN_ON_ONCE(link->sactive);
4989
4990 ap->nr_active_links++;
4991 link->active_tag = qc->tag;
4992 }
4993
4994 qc->flags |= ATA_QCFLAG_ACTIVE;
4995 ap->qc_active |= 1 << qc->tag;
4996
4997
4998
4999
5000
5001 if (WARN_ON_ONCE(ata_is_data(prot) &&
5002 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5003 goto sys_err;
5004
5005 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5006 (ap->flags & ATA_FLAG_PIO_DMA)))
5007 if (ata_sg_setup(qc))
5008 goto sys_err;
5009
5010
5011 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5012 link->eh_info.action |= ATA_EH_RESET;
5013 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5014 ata_link_abort(link);
5015 return;
5016 }
5017
5018 ap->ops->qc_prep(qc);
5019
5020 qc->err_mask |= ap->ops->qc_issue(qc);
5021 if (unlikely(qc->err_mask))
5022 goto err;
5023 return;
5024
5025sys_err:
5026 qc->err_mask |= AC_ERR_SYSTEM;
5027err:
5028 ata_qc_complete(qc);
5029}
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043int sata_scr_valid(struct ata_link *link)
5044{
5045 struct ata_port *ap = link->ap;
5046
5047 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5048}
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065
5066int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5067{
5068 if (ata_is_host_link(link)) {
5069 if (sata_scr_valid(link))
5070 return link->ap->ops->scr_read(link, reg, val);
5071 return -EOPNOTSUPP;
5072 }
5073
5074 return sata_pmp_scr_read(link, reg, val);
5075}
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093int sata_scr_write(struct ata_link *link, int reg, u32 val)
5094{
5095 if (ata_is_host_link(link)) {
5096 if (sata_scr_valid(link))
5097 return link->ap->ops->scr_write(link, reg, val);
5098 return -EOPNOTSUPP;
5099 }
5100
5101 return sata_pmp_scr_write(link, reg, val);
5102}
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5120{
5121 if (ata_is_host_link(link)) {
5122 int rc;
5123
5124 if (sata_scr_valid(link)) {
5125 rc = link->ap->ops->scr_write(link, reg, val);
5126 if (rc == 0)
5127 rc = link->ap->ops->scr_read(link, reg, &val);
5128 return rc;
5129 }
5130 return -EOPNOTSUPP;
5131 }
5132
5133 return sata_pmp_scr_write(link, reg, val);
5134}
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150bool ata_phys_link_online(struct ata_link *link)
5151{
5152 u32 sstatus;
5153
5154 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5155 ata_sstatus_online(sstatus))
5156 return true;
5157 return false;
5158}
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174bool ata_phys_link_offline(struct ata_link *link)
5175{
5176 u32 sstatus;
5177
5178 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5179 !ata_sstatus_online(sstatus))
5180 return true;
5181 return false;
5182}
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200bool ata_link_online(struct ata_link *link)
5201{
5202 struct ata_link *slave = link->ap->slave_link;
5203
5204 WARN_ON(link == slave);
5205
5206 return ata_phys_link_online(link) ||
5207 (slave && ata_phys_link_online(slave));
5208}
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226bool ata_link_offline(struct ata_link *link)
5227{
5228 struct ata_link *slave = link->ap->slave_link;
5229
5230 WARN_ON(link == slave);
5231
5232 return ata_phys_link_offline(link) &&
5233 (!slave || ata_phys_link_offline(slave));
5234}
5235
5236#ifdef CONFIG_PM
5237static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5238 unsigned int action, unsigned int ehi_flags,
5239 int wait)
5240{
5241 unsigned long flags;
5242 int i, rc;
5243
5244 for (i = 0; i < host->n_ports; i++) {
5245 struct ata_port *ap = host->ports[i];
5246 struct ata_link *link;
5247
5248
5249
5250
5251 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5252 ata_port_wait_eh(ap);
5253 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5254 }
5255
5256
5257 spin_lock_irqsave(ap->lock, flags);
5258
5259 ap->pm_mesg = mesg;
5260 if (wait) {
5261 rc = 0;
5262 ap->pm_result = &rc;
5263 }
5264
5265 ap->pflags |= ATA_PFLAG_PM_PENDING;
5266 ata_for_each_link(link, ap, HOST_FIRST) {
5267 link->eh_info.action |= action;
5268 link->eh_info.flags |= ehi_flags;
5269 }
5270
5271 ata_port_schedule_eh(ap);
5272
5273 spin_unlock_irqrestore(ap->lock, flags);
5274
5275
5276 if (wait) {
5277 ata_port_wait_eh(ap);
5278 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5279 if (rc)
5280 return rc;
5281 }
5282 }
5283
5284 return 0;
5285}
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5303{
5304 unsigned int ehi_flags = ATA_EHI_QUIET;
5305 int rc;
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315 if (mesg.event == PM_EVENT_SUSPEND)
5316 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5317
5318 rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
5319 if (rc == 0)
5320 host->dev->power.power_state = mesg;
5321 return rc;
5322}
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335void ata_host_resume(struct ata_host *host)
5336{
5337 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5338 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5339 host->dev->power.power_state = PMSG_ON;
5340}
5341#endif
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352void ata_dev_init(struct ata_device *dev)
5353{
5354 struct ata_link *link = ata_dev_phys_link(dev);
5355 struct ata_port *ap = link->ap;
5356 unsigned long flags;
5357
5358
5359 link->sata_spd_limit = link->hw_sata_spd_limit;
5360 link->sata_spd = 0;
5361
5362
5363
5364
5365
5366 spin_lock_irqsave(ap->lock, flags);
5367 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5368 dev->horkage = 0;
5369 spin_unlock_irqrestore(ap->lock, flags);
5370
5371 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5372 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5373 dev->pio_mask = UINT_MAX;
5374 dev->mwdma_mask = UINT_MAX;
5375 dev->udma_mask = UINT_MAX;
5376}
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5390{
5391 int i;
5392
5393
5394 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5395 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5396
5397 link->ap = ap;
5398 link->pmp = pmp;
5399 link->active_tag = ATA_TAG_POISON;
5400 link->hw_sata_spd_limit = UINT_MAX;
5401
5402
5403 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5404 struct ata_device *dev = &link->device[i];
5405
5406 dev->link = link;
5407 dev->devno = dev - link->device;
5408#ifdef CONFIG_ATA_ACPI
5409 dev->gtf_filter = ata_acpi_gtf_filter;
5410#endif
5411 ata_dev_init(dev);
5412 }
5413}
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428int sata_link_init_spd(struct ata_link *link)
5429{
5430 u8 spd;
5431 int rc;
5432
5433 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5434 if (rc)
5435 return rc;
5436
5437 spd = (link->saved_scontrol >> 4) & 0xf;
5438 if (spd)
5439 link->hw_sata_spd_limit &= (1 << spd) - 1;
5440
5441 ata_force_link_limits(link);
5442
5443 link->sata_spd_limit = link->hw_sata_spd_limit;
5444
5445 return 0;
5446}
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460struct ata_port *ata_port_alloc(struct ata_host *host)
5461{
5462 struct ata_port *ap;
5463
5464 DPRINTK("ENTER\n");
5465
5466 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5467 if (!ap)
5468 return NULL;
5469
5470 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5471 ap->lock = &host->lock;
5472 ap->print_id = -1;
5473 ap->host = host;
5474 ap->dev = host->dev;
5475
5476#if defined(ATA_VERBOSE_DEBUG)
5477
5478 ap->msg_enable = 0x00FF;
5479#elif defined(ATA_DEBUG)
5480 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5481#else
5482 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5483#endif
5484
5485 mutex_init(&ap->scsi_scan_mutex);
5486 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5487 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5488 INIT_LIST_HEAD(&ap->eh_done_q);
5489 init_waitqueue_head(&ap->eh_wait_q);
5490 init_completion(&ap->park_req_pending);
5491 init_timer_deferrable(&ap->fastdrain_timer);
5492 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5493 ap->fastdrain_timer.data = (unsigned long)ap;
5494
5495 ap->cbl = ATA_CBL_NONE;
5496
5497 ata_link_init(ap, &ap->link, 0);
5498
5499#ifdef ATA_IRQ_TRAP
5500 ap->stats.unhandled_irq = 1;
5501 ap->stats.idle_irq = 1;
5502#endif
5503 ata_sff_port_init(ap);
5504
5505 return ap;
5506}
5507
5508static void ata_host_release(struct device *gendev, void *res)
5509{
5510 struct ata_host *host = dev_get_drvdata(gendev);
5511 int i;
5512
5513 for (i = 0; i < host->n_ports; i++) {
5514 struct ata_port *ap = host->ports[i];
5515
5516 if (!ap)
5517 continue;
5518
5519 if (ap->scsi_host)
5520 scsi_host_put(ap->scsi_host);
5521
5522 kfree(ap->pmp_link);
5523 kfree(ap->slave_link);
5524 kfree(ap);
5525 host->ports[i] = NULL;
5526 }
5527
5528 dev_set_drvdata(gendev, NULL);
5529}
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5552{
5553 struct ata_host *host;
5554 size_t sz;
5555 int i;
5556
5557 DPRINTK("ENTER\n");
5558
5559 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5560 return NULL;
5561
5562
5563 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5564
5565 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5566 if (!host)
5567 goto err_out;
5568
5569 devres_add(dev, host);
5570 dev_set_drvdata(dev, host);
5571
5572 spin_lock_init(&host->lock);
5573 mutex_init(&host->eh_mutex);
5574 host->dev = dev;
5575 host->n_ports = max_ports;
5576
5577
5578 for (i = 0; i < max_ports; i++) {
5579 struct ata_port *ap;
5580
5581 ap = ata_port_alloc(host);
5582 if (!ap)
5583 goto err_out;
5584
5585 ap->port_no = i;
5586 host->ports[i] = ap;
5587 }
5588
5589 devres_remove_group(dev, NULL);
5590 return host;
5591
5592 err_out:
5593 devres_release_group(dev, NULL);
5594 return NULL;
5595}
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5614 const struct ata_port_info * const * ppi,
5615 int n_ports)
5616{
5617 const struct ata_port_info *pi;
5618 struct ata_host *host;
5619 int i, j;
5620
5621 host = ata_host_alloc(dev, n_ports);
5622 if (!host)
5623 return NULL;
5624
5625 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5626 struct ata_port *ap = host->ports[i];
5627
5628 if (ppi[j])
5629 pi = ppi[j++];
5630
5631 ap->pio_mask = pi->pio_mask;
5632 ap->mwdma_mask = pi->mwdma_mask;
5633 ap->udma_mask = pi->udma_mask;
5634 ap->flags |= pi->flags;
5635 ap->link.flags |= pi->link_flags;
5636 ap->ops = pi->port_ops;
5637
5638 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5639 host->ops = pi->port_ops;
5640 }
5641
5642 return host;
5643}
5644
5645
5646
5647
5648
5649
5650
5651
5652
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687
5688
5689
5690
5691int ata_slave_link_init(struct ata_port *ap)
5692{
5693 struct ata_link *link;
5694
5695 WARN_ON(ap->slave_link);
5696 WARN_ON(ap->flags & ATA_FLAG_PMP);
5697
5698 link = kzalloc(sizeof(*link), GFP_KERNEL);
5699 if (!link)
5700 return -ENOMEM;
5701
5702 ata_link_init(ap, link, 1);
5703 ap->slave_link = link;
5704 return 0;
5705}
5706
5707static void ata_host_stop(struct device *gendev, void *res)
5708{
5709 struct ata_host *host = dev_get_drvdata(gendev);
5710 int i;
5711
5712 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5713
5714 for (i = 0; i < host->n_ports; i++) {
5715 struct ata_port *ap = host->ports[i];
5716
5717 if (ap->ops->port_stop)
5718 ap->ops->port_stop(ap);
5719 }
5720
5721 if (host->ops->host_stop)
5722 host->ops->host_stop(host);
5723}
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745static void ata_finalize_port_ops(struct ata_port_operations *ops)
5746{
5747 static DEFINE_SPINLOCK(lock);
5748 const struct ata_port_operations *cur;
5749 void **begin = (void **)ops;
5750 void **end = (void **)&ops->inherits;
5751 void **pp;
5752
5753 if (!ops || !ops->inherits)
5754 return;
5755
5756 spin_lock(&lock);
5757
5758 for (cur = ops->inherits; cur; cur = cur->inherits) {
5759 void **inherit = (void **)cur;
5760
5761 for (pp = begin; pp < end; pp++, inherit++)
5762 if (!*pp)
5763 *pp = *inherit;
5764 }
5765
5766 for (pp = begin; pp < end; pp++)
5767 if (IS_ERR(*pp))
5768 *pp = NULL;
5769
5770 ops->inherits = NULL;
5771
5772 spin_unlock(&lock);
5773}
5774
5775
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791int ata_host_start(struct ata_host *host)
5792{
5793 int have_stop = 0;
5794 void *start_dr = NULL;
5795 int i, rc;
5796
5797 if (host->flags & ATA_HOST_STARTED)
5798 return 0;
5799
5800 ata_finalize_port_ops(host->ops);
5801
5802 for (i = 0; i < host->n_ports; i++) {
5803 struct ata_port *ap = host->ports[i];
5804
5805 ata_finalize_port_ops(ap->ops);
5806
5807 if (!host->ops && !ata_port_is_dummy(ap))
5808 host->ops = ap->ops;
5809
5810 if (ap->ops->port_stop)
5811 have_stop = 1;
5812 }
5813
5814 if (host->ops->host_stop)
5815 have_stop = 1;
5816
5817 if (have_stop) {
5818 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5819 if (!start_dr)
5820 return -ENOMEM;
5821 }
5822
5823 for (i = 0; i < host->n_ports; i++) {
5824 struct ata_port *ap = host->ports[i];
5825
5826 if (ap->ops->port_start) {
5827 rc = ap->ops->port_start(ap);
5828 if (rc) {
5829 if (rc != -ENODEV)
5830 dev_err(host->dev,
5831 "failed to start port %d (errno=%d)\n",
5832 i, rc);
5833 goto err_out;
5834 }
5835 }
5836 ata_eh_freeze_port(ap);
5837 }
5838
5839 if (start_dr)
5840 devres_add(host->dev, start_dr);
5841 host->flags |= ATA_HOST_STARTED;
5842 return 0;
5843
5844 err_out:
5845 while (--i >= 0) {
5846 struct ata_port *ap = host->ports[i];
5847
5848 if (ap->ops->port_stop)
5849 ap->ops->port_stop(ap);
5850 }
5851 devres_free(start_dr);
5852 return rc;
5853}
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866
5867void ata_host_init(struct ata_host *host, struct device *dev,
5868 unsigned long flags, struct ata_port_operations *ops)
5869{
5870 spin_lock_init(&host->lock);
5871 mutex_init(&host->eh_mutex);
5872 host->dev = dev;
5873 host->flags = flags;
5874 host->ops = ops;
5875}
5876
5877int ata_port_probe(struct ata_port *ap)
5878{
5879 int rc = 0;
5880
5881
5882 if (ap->ops->error_handler) {
5883 struct ata_eh_info *ehi = &ap->link.eh_info;
5884 unsigned long flags;
5885
5886
5887 spin_lock_irqsave(ap->lock, flags);
5888
5889 ehi->probe_mask |= ATA_ALL_DEVICES;
5890 ehi->action |= ATA_EH_RESET;
5891 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5892
5893 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5894 ap->pflags |= ATA_PFLAG_LOADING;
5895 ata_port_schedule_eh(ap);
5896
5897 spin_unlock_irqrestore(ap->lock, flags);
5898
5899
5900 ata_port_wait_eh(ap);
5901 } else {
5902 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5903 rc = ata_bus_probe(ap);
5904 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5905 }
5906 return rc;
5907}
5908
5909
5910static void async_port_probe(void *data, async_cookie_t cookie)
5911{
5912 struct ata_port *ap = data;
5913
5914
5915
5916
5917
5918
5919
5920
5921 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5922 async_synchronize_cookie(cookie);
5923
5924 (void)ata_port_probe(ap);
5925
5926
5927 async_synchronize_cookie(cookie);
5928
5929 ata_scsi_scan_host(ap, 1);
5930}
5931
5932
5933
5934
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944
5945
5946
5947
5948int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5949{
5950 int i, rc;
5951
5952
5953 if (!(host->flags & ATA_HOST_STARTED)) {
5954 dev_err(host->dev, "BUG: trying to register unstarted host\n");
5955 WARN_ON(1);
5956 return -EINVAL;
5957 }
5958
5959
5960
5961
5962
5963 for (i = host->n_ports; host->ports[i]; i++)
5964 kfree(host->ports[i]);
5965
5966
5967 for (i = 0; i < host->n_ports; i++)
5968 host->ports[i]->print_id = ata_print_id++;
5969
5970
5971
5972 for (i = 0; i < host->n_ports; i++) {
5973 rc = ata_tport_add(host->dev,host->ports[i]);
5974 if (rc) {
5975 goto err_tadd;
5976 }
5977 }
5978
5979 rc = ata_scsi_add_hosts(host, sht);
5980 if (rc)
5981 goto err_tadd;
5982
5983
5984 ata_acpi_associate(host);
5985
5986
5987 for (i = 0; i < host->n_ports; i++) {
5988 struct ata_port *ap = host->ports[i];
5989 unsigned long xfer_mask;
5990
5991
5992 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5993 ap->cbl = ATA_CBL_SATA;
5994
5995
5996 sata_link_init_spd(&ap->link);
5997 if (ap->slave_link)
5998 sata_link_init_spd(ap->slave_link);
5999
6000
6001 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6002 ap->udma_mask);
6003
6004 if (!ata_port_is_dummy(ap)) {
6005 ata_port_info(ap, "%cATA max %s %s\n",
6006 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6007 ata_mode_string(xfer_mask),
6008 ap->link.eh_info.desc);
6009 ata_ehi_clear_desc(&ap->link.eh_info);
6010 } else
6011 ata_port_info(ap, "DUMMY\n");
6012 }
6013
6014
6015 for (i = 0; i < host->n_ports; i++) {
6016 struct ata_port *ap = host->ports[i];
6017 async_schedule(async_port_probe, ap);
6018 }
6019
6020 return 0;
6021
6022 err_tadd:
6023 while (--i >= 0) {
6024 ata_tport_delete(host->ports[i]);
6025 }
6026 return rc;
6027
6028}
6029
6030
6031
6032
6033
6034
6035
6036
6037
6038
6039
6040
6041
6042
6043
6044
6045
6046
6047
6048
6049
6050
6051
6052
6053int ata_host_activate(struct ata_host *host, int irq,
6054 irq_handler_t irq_handler, unsigned long irq_flags,
6055 struct scsi_host_template *sht)
6056{
6057 int i, rc;
6058
6059 rc = ata_host_start(host);
6060 if (rc)
6061 return rc;
6062
6063
6064 if (!irq) {
6065 WARN_ON(irq_handler);
6066 return ata_host_register(host, sht);
6067 }
6068
6069 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6070 dev_driver_string(host->dev), host);
6071 if (rc)
6072 return rc;
6073
6074 for (i = 0; i < host->n_ports; i++)
6075 ata_port_desc(host->ports[i], "irq %d", irq);
6076
6077 rc = ata_host_register(host, sht);
6078
6079 if (rc)
6080 devm_free_irq(host->dev, irq, host);
6081
6082 return rc;
6083}
6084
6085
6086
6087
6088
6089
6090
6091
6092
6093
6094
6095
6096static void ata_port_detach(struct ata_port *ap)
6097{
6098 unsigned long flags;
6099
6100 if (!ap->ops->error_handler)
6101 goto skip_eh;
6102
6103
6104 spin_lock_irqsave(ap->lock, flags);
6105 ap->pflags |= ATA_PFLAG_UNLOADING;
6106 ata_port_schedule_eh(ap);
6107 spin_unlock_irqrestore(ap->lock, flags);
6108
6109
6110 ata_port_wait_eh(ap);
6111
6112
6113 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6114
6115 cancel_delayed_work_sync(&ap->hotplug_task);
6116
6117 skip_eh:
6118 if (ap->pmp_link) {
6119 int i;
6120 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6121 ata_tlink_delete(&ap->pmp_link[i]);
6122 }
6123 ata_tport_delete(ap);
6124
6125
6126 scsi_remove_host(ap->scsi_host);
6127}
6128
6129
6130
6131
6132
6133
6134
6135
6136
6137
6138void ata_host_detach(struct ata_host *host)
6139{
6140 int i;
6141
6142 for (i = 0; i < host->n_ports; i++)
6143 ata_port_detach(host->ports[i]);
6144
6145
6146 ata_acpi_dissociate(host);
6147}
6148
6149#ifdef CONFIG_PCI
6150
6151
6152
6153
6154
6155
6156
6157
6158
6159
6160
6161
6162void ata_pci_remove_one(struct pci_dev *pdev)
6163{
6164 struct device *dev = &pdev->dev;
6165 struct ata_host *host = dev_get_drvdata(dev);
6166
6167 ata_host_detach(host);
6168}
6169
6170
6171int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6172{
6173 unsigned long tmp = 0;
6174
6175 switch (bits->width) {
6176 case 1: {
6177 u8 tmp8 = 0;
6178 pci_read_config_byte(pdev, bits->reg, &tmp8);
6179 tmp = tmp8;
6180 break;
6181 }
6182 case 2: {
6183 u16 tmp16 = 0;
6184 pci_read_config_word(pdev, bits->reg, &tmp16);
6185 tmp = tmp16;
6186 break;
6187 }
6188 case 4: {
6189 u32 tmp32 = 0;
6190 pci_read_config_dword(pdev, bits->reg, &tmp32);
6191 tmp = tmp32;
6192 break;
6193 }
6194
6195 default:
6196 return -EINVAL;
6197 }
6198
6199 tmp &= bits->mask;
6200
6201 return (tmp == bits->val) ? 1 : 0;
6202}
6203
6204#ifdef CONFIG_PM
6205void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6206{
6207 pci_save_state(pdev);
6208 pci_disable_device(pdev);
6209
6210 if (mesg.event & PM_EVENT_SLEEP)
6211 pci_set_power_state(pdev, PCI_D3hot);
6212}
6213
6214int ata_pci_device_do_resume(struct pci_dev *pdev)
6215{
6216 int rc;
6217
6218 pci_set_power_state(pdev, PCI_D0);
6219 pci_restore_state(pdev);
6220
6221 rc = pcim_enable_device(pdev);
6222 if (rc) {
6223 dev_err(&pdev->dev,
6224 "failed to enable device after resume (%d)\n", rc);
6225 return rc;
6226 }
6227
6228 pci_set_master(pdev);
6229 return 0;
6230}
6231
6232int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6233{
6234 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6235 int rc = 0;
6236
6237 rc = ata_host_suspend(host, mesg);
6238 if (rc)
6239 return rc;
6240
6241 ata_pci_device_do_suspend(pdev, mesg);
6242
6243 return 0;
6244}
6245
6246int ata_pci_device_resume(struct pci_dev *pdev)
6247{
6248 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6249 int rc;
6250
6251 rc = ata_pci_device_do_resume(pdev);
6252 if (rc == 0)
6253 ata_host_resume(host);
6254 return rc;
6255}
6256#endif
6257
6258#endif
6259
6260static int __init ata_parse_force_one(char **cur,
6261 struct ata_force_ent *force_ent,
6262 const char **reason)
6263{
6264
6265
6266
6267
6268
6269 static struct ata_force_param force_tbl[] __initdata = {
6270 { "40c", .cbl = ATA_CBL_PATA40 },
6271 { "80c", .cbl = ATA_CBL_PATA80 },
6272 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6273 { "unk", .cbl = ATA_CBL_PATA_UNK },
6274 { "ign", .cbl = ATA_CBL_PATA_IGN },
6275 { "sata", .cbl = ATA_CBL_SATA },
6276 { "1.5Gbps", .spd_limit = 1 },
6277 { "3.0Gbps", .spd_limit = 2 },
6278 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6279 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6280 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6281 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6282 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6283 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6284 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6285 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6286 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6287 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6288 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6289 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6290 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6291 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6292 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6293 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6294 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6295 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6296 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6297 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6298 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6299 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6300 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6301 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6302 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6303 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6304 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6305 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6306 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6307 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6308 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6309 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6310 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6311 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6312 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6313 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6314 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6315 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6316 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6317 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6318 };
6319 char *start = *cur, *p = *cur;
6320 char *id, *val, *endp;
6321 const struct ata_force_param *match_fp = NULL;
6322 int nr_matches = 0, i;
6323
6324
6325 while (*p != '\0' && *p != ',')
6326 p++;
6327
6328 if (*p == '\0')
6329 *cur = p;
6330 else
6331 *cur = p + 1;
6332
6333 *p = '\0';
6334
6335
6336 p = strchr(start, ':');
6337 if (!p) {
6338 val = strstrip(start);
6339 goto parse_val;
6340 }
6341 *p = '\0';
6342
6343 id = strstrip(start);
6344 val = strstrip(p + 1);
6345
6346
6347 p = strchr(id, '.');
6348 if (p) {
6349 *p++ = '\0';
6350 force_ent->device = simple_strtoul(p, &endp, 10);
6351 if (p == endp || *endp != '\0') {
6352 *reason = "invalid device";
6353 return -EINVAL;
6354 }
6355 }
6356
6357 force_ent->port = simple_strtoul(id, &endp, 10);
6358 if (p == endp || *endp != '\0') {
6359 *reason = "invalid port/link";
6360 return -EINVAL;
6361 }
6362
6363 parse_val:
6364
6365 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6366 const struct ata_force_param *fp = &force_tbl[i];
6367
6368 if (strncasecmp(val, fp->name, strlen(val)))
6369 continue;
6370
6371 nr_matches++;
6372 match_fp = fp;
6373
6374 if (strcasecmp(val, fp->name) == 0) {
6375 nr_matches = 1;
6376 break;
6377 }
6378 }
6379
6380 if (!nr_matches) {
6381 *reason = "unknown value";
6382 return -EINVAL;
6383 }
6384 if (nr_matches > 1) {
6385 *reason = "ambigious value";
6386 return -EINVAL;
6387 }
6388
6389 force_ent->param = *match_fp;
6390
6391 return 0;
6392}
6393
6394static void __init ata_parse_force_param(void)
6395{
6396 int idx = 0, size = 1;
6397 int last_port = -1, last_device = -1;
6398 char *p, *cur, *next;
6399
6400
6401 for (p = ata_force_param_buf; *p; p++)
6402 if (*p == ',')
6403 size++;
6404
6405 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6406 if (!ata_force_tbl) {
6407 printk(KERN_WARNING "ata: failed to extend force table, "
6408 "libata.force ignored\n");
6409 return;
6410 }
6411
6412
6413 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6414 const char *reason = "";
6415 struct ata_force_ent te = { .port = -1, .device = -1 };
6416
6417 next = cur;
6418 if (ata_parse_force_one(&next, &te, &reason)) {
6419 printk(KERN_WARNING "ata: failed to parse force "
6420 "parameter \"%s\" (%s)\n",
6421 cur, reason);
6422 continue;
6423 }
6424
6425 if (te.port == -1) {
6426 te.port = last_port;
6427 te.device = last_device;
6428 }
6429
6430 ata_force_tbl[idx++] = te;
6431
6432 last_port = te.port;
6433 last_device = te.device;
6434 }
6435
6436 ata_force_tbl_size = idx;
6437}
6438
6439static int __init ata_init(void)
6440{
6441 int rc;
6442
6443 ata_parse_force_param();
6444
6445 rc = ata_sff_init();
6446 if (rc) {
6447 kfree(ata_force_tbl);
6448 return rc;
6449 }
6450
6451 libata_transport_init();
6452 ata_scsi_transport_template = ata_attach_transport();
6453 if (!ata_scsi_transport_template) {
6454 ata_sff_exit();
6455 rc = -ENOMEM;
6456 goto err_out;
6457 }
6458
6459 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6460 return 0;
6461
6462err_out:
6463 return rc;
6464}
6465
6466static void __exit ata_exit(void)
6467{
6468 ata_release_transport(ata_scsi_transport_template);
6469 libata_transport_exit();
6470 ata_sff_exit();
6471 kfree(ata_force_tbl);
6472}
6473
6474subsys_initcall(ata_init);
6475module_exit(ata_exit);
6476
6477static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6478
6479int ata_ratelimit(void)
6480{
6481 return __ratelimit(&ratelimit);
6482}
6483
6484
6485
6486
6487
6488
6489
6490
6491
6492
6493
6494
6495
6496
6497
6498void ata_msleep(struct ata_port *ap, unsigned int msecs)
6499{
6500 bool owns_eh = ap && ap->host->eh_owner == current;
6501
6502 if (owns_eh)
6503 ata_eh_release(ap);
6504
6505 msleep(msecs);
6506
6507 if (owns_eh)
6508 ata_eh_acquire(ap);
6509}
6510
6511
6512
6513
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530
6531
6532
6533
6534
6535u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6536 unsigned long interval, unsigned long timeout)
6537{
6538 unsigned long deadline;
6539 u32 tmp;
6540
6541 tmp = ioread32(reg);
6542
6543
6544
6545
6546
6547 deadline = ata_deadline(jiffies, timeout);
6548
6549 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6550 ata_msleep(ap, interval);
6551 tmp = ioread32(reg);
6552 }
6553
6554 return tmp;
6555}
6556
6557
6558
6559
6560static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6561{
6562 return AC_ERR_SYSTEM;
6563}
6564
6565static void ata_dummy_error_handler(struct ata_port *ap)
6566{
6567
6568}
6569
6570struct ata_port_operations ata_dummy_port_ops = {
6571 .qc_prep = ata_noop_qc_prep,
6572 .qc_issue = ata_dummy_qc_issue,
6573 .error_handler = ata_dummy_error_handler,
6574};
6575
6576const struct ata_port_info ata_dummy_port_info = {
6577 .port_ops = &ata_dummy_port_ops,
6578};
6579
6580
6581
6582
6583int ata_port_printk(const struct ata_port *ap, const char *level,
6584 const char *fmt, ...)
6585{
6586 struct va_format vaf;
6587 va_list args;
6588 int r;
6589
6590 va_start(args, fmt);
6591
6592 vaf.fmt = fmt;
6593 vaf.va = &args;
6594
6595 r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
6596
6597 va_end(args);
6598
6599 return r;
6600}
6601EXPORT_SYMBOL(ata_port_printk);
6602
6603int ata_link_printk(const struct ata_link *link, const char *level,
6604 const char *fmt, ...)
6605{
6606 struct va_format vaf;
6607 va_list args;
6608 int r;
6609
6610 va_start(args, fmt);
6611
6612 vaf.fmt = fmt;
6613 vaf.va = &args;
6614
6615 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6616 r = printk("%sata%u.%02u: %pV",
6617 level, link->ap->print_id, link->pmp, &vaf);
6618 else
6619 r = printk("%sata%u: %pV",
6620 level, link->ap->print_id, &vaf);
6621
6622 va_end(args);
6623
6624 return r;
6625}
6626EXPORT_SYMBOL(ata_link_printk);
6627
6628int ata_dev_printk(const struct ata_device *dev, const char *level,
6629 const char *fmt, ...)
6630{
6631 struct va_format vaf;
6632 va_list args;
6633 int r;
6634
6635 va_start(args, fmt);
6636
6637 vaf.fmt = fmt;
6638 vaf.va = &args;
6639
6640 r = printk("%sata%u.%02u: %pV",
6641 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6642 &vaf);
6643
6644 va_end(args);
6645
6646 return r;
6647}
6648EXPORT_SYMBOL(ata_dev_printk);
6649
6650void ata_print_version(const struct device *dev, const char *version)
6651{
6652 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6653}
6654EXPORT_SYMBOL(ata_print_version);
6655
6656
6657
6658
6659
6660
6661
6662EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6663EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6664EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6665EXPORT_SYMBOL_GPL(ata_base_port_ops);
6666EXPORT_SYMBOL_GPL(sata_port_ops);
6667EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6668EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6669EXPORT_SYMBOL_GPL(ata_link_next);
6670EXPORT_SYMBOL_GPL(ata_dev_next);
6671EXPORT_SYMBOL_GPL(ata_std_bios_param);
6672EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6673EXPORT_SYMBOL_GPL(ata_host_init);
6674EXPORT_SYMBOL_GPL(ata_host_alloc);
6675EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6676EXPORT_SYMBOL_GPL(ata_slave_link_init);
6677EXPORT_SYMBOL_GPL(ata_host_start);
6678EXPORT_SYMBOL_GPL(ata_host_register);
6679EXPORT_SYMBOL_GPL(ata_host_activate);
6680EXPORT_SYMBOL_GPL(ata_host_detach);
6681EXPORT_SYMBOL_GPL(ata_sg_init);
6682EXPORT_SYMBOL_GPL(ata_qc_complete);
6683EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6684EXPORT_SYMBOL_GPL(atapi_cmd_type);
6685EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6686EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6687EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6688EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6689EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6690EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6691EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6692EXPORT_SYMBOL_GPL(ata_mode_string);
6693EXPORT_SYMBOL_GPL(ata_id_xfermask);
6694EXPORT_SYMBOL_GPL(ata_do_set_mode);
6695EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6696EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6697EXPORT_SYMBOL_GPL(ata_dev_disable);
6698EXPORT_SYMBOL_GPL(sata_set_spd);
6699EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6700EXPORT_SYMBOL_GPL(sata_link_debounce);
6701EXPORT_SYMBOL_GPL(sata_link_resume);
6702EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6703EXPORT_SYMBOL_GPL(ata_std_prereset);
6704EXPORT_SYMBOL_GPL(sata_link_hardreset);
6705EXPORT_SYMBOL_GPL(sata_std_hardreset);
6706EXPORT_SYMBOL_GPL(ata_std_postreset);
6707EXPORT_SYMBOL_GPL(ata_dev_classify);
6708EXPORT_SYMBOL_GPL(ata_dev_pair);
6709EXPORT_SYMBOL_GPL(ata_ratelimit);
6710EXPORT_SYMBOL_GPL(ata_msleep);
6711EXPORT_SYMBOL_GPL(ata_wait_register);
6712EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6713EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6714EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6715EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6716EXPORT_SYMBOL_GPL(sata_scr_valid);
6717EXPORT_SYMBOL_GPL(sata_scr_read);
6718EXPORT_SYMBOL_GPL(sata_scr_write);
6719EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6720EXPORT_SYMBOL_GPL(ata_link_online);
6721EXPORT_SYMBOL_GPL(ata_link_offline);
6722#ifdef CONFIG_PM
6723EXPORT_SYMBOL_GPL(ata_host_suspend);
6724EXPORT_SYMBOL_GPL(ata_host_resume);
6725#endif
6726EXPORT_SYMBOL_GPL(ata_id_string);
6727EXPORT_SYMBOL_GPL(ata_id_c_string);
6728EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6729EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6730
6731EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6732EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6733EXPORT_SYMBOL_GPL(ata_timing_compute);
6734EXPORT_SYMBOL_GPL(ata_timing_merge);
6735EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6736
6737#ifdef CONFIG_PCI
6738EXPORT_SYMBOL_GPL(pci_test_config_bits);
6739EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6740#ifdef CONFIG_PM
6741EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6742EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6743EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6744EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6745#endif
6746#endif
6747
6748EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6749EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6750EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6751EXPORT_SYMBOL_GPL(ata_port_desc);
6752#ifdef CONFIG_PCI
6753EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6754#endif
6755EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6756EXPORT_SYMBOL_GPL(ata_link_abort);
6757EXPORT_SYMBOL_GPL(ata_port_abort);
6758EXPORT_SYMBOL_GPL(ata_port_freeze);
6759EXPORT_SYMBOL_GPL(sata_async_notification);
6760EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6761EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6762EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6763EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6764EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6765EXPORT_SYMBOL_GPL(ata_do_eh);
6766EXPORT_SYMBOL_GPL(ata_std_error_handler);
6767
6768EXPORT_SYMBOL_GPL(ata_cable_40wire);
6769EXPORT_SYMBOL_GPL(ata_cable_80wire);
6770EXPORT_SYMBOL_GPL(ata_cable_unknown);
6771EXPORT_SYMBOL_GPL(ata_cable_ignore);
6772EXPORT_SYMBOL_GPL(ata_cable_sata);
6773