1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/time.h>
54#include <linux/interrupt.h>
55#include <linux/completion.h>
56#include <linux/suspend.h>
57#include <linux/workqueue.h>
58#include <linux/scatterlist.h>
59#include <linux/io.h>
60#include <linux/async.h>
61#include <linux/log2.h>
62#include <linux/slab.h>
63#include <linux/glob.h>
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_host.h>
67#include <linux/libata.h>
68#include <asm/byteorder.h>
69#include <asm/unaligned.h>
70#include <linux/cdrom.h>
71#include <linux/ratelimit.h>
72#include <linux/pm_runtime.h>
73#include <linux/platform_device.h>
74
75#define CREATE_TRACE_POINTS
76#include <trace/events/libata.h>
77
78#include "libata.h"
79#include "libata-transport.h"
80
81
82const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
83const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
84const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
85
86const struct ata_port_operations ata_base_port_ops = {
87 .prereset = ata_std_prereset,
88 .postreset = ata_std_postreset,
89 .error_handler = ata_std_error_handler,
90 .sched_eh = ata_std_sched_eh,
91 .end_eh = ata_std_end_eh,
92};
93
94const struct ata_port_operations sata_port_ops = {
95 .inherits = &ata_base_port_ops,
96
97 .qc_defer = ata_std_qc_defer,
98 .hardreset = sata_std_hardreset,
99};
100
101static unsigned int ata_dev_init_params(struct ata_device *dev,
102 u16 heads, u16 sectors);
103static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
104static void ata_dev_xfermask(struct ata_device *dev);
105static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
106
107atomic_t ata_print_id = ATOMIC_INIT(0);
108
109struct ata_force_param {
110 const char *name;
111 unsigned int cbl;
112 int spd_limit;
113 unsigned long xfer_mask;
114 unsigned int horkage_on;
115 unsigned int horkage_off;
116 unsigned int lflags;
117};
118
119struct ata_force_ent {
120 int port;
121 int device;
122 struct ata_force_param param;
123};
124
125static struct ata_force_ent *ata_force_tbl;
126static int ata_force_tbl_size;
127
128static char ata_force_param_buf[PAGE_SIZE] __initdata;
129
130module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
131MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
132
133static int atapi_enabled = 1;
134module_param(atapi_enabled, int, 0444);
135MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
136
137static int atapi_dmadir = 0;
138module_param(atapi_dmadir, int, 0444);
139MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
140
141int atapi_passthru16 = 1;
142module_param(atapi_passthru16, int, 0444);
143MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
144
145int libata_fua = 0;
146module_param_named(fua, libata_fua, int, 0444);
147MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
148
149static int ata_ignore_hpa;
150module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
151MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
152
153static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
154module_param_named(dma, libata_dma_mask, int, 0444);
155MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
156
157static int ata_probe_timeout;
158module_param(ata_probe_timeout, int, 0444);
159MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
160
161int libata_noacpi = 0;
162module_param_named(noacpi, libata_noacpi, int, 0444);
163MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
164
165int libata_allow_tpm = 0;
166module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
167MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
168
169static int atapi_an;
170module_param(atapi_an, int, 0444);
171MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
172
173MODULE_AUTHOR("Jeff Garzik");
174MODULE_DESCRIPTION("Library module for ATA devices");
175MODULE_LICENSE("GPL");
176MODULE_VERSION(DRV_VERSION);
177
178
179static bool ata_sstatus_online(u32 sstatus)
180{
181 return (sstatus & 0xf) == 0x3;
182}
183
184
185
186
187
188
189
190
191
192
193
194
195
196struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
197 enum ata_link_iter_mode mode)
198{
199 BUG_ON(mode != ATA_LITER_EDGE &&
200 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
201
202
203 if (!link)
204 switch (mode) {
205 case ATA_LITER_EDGE:
206 case ATA_LITER_PMP_FIRST:
207 if (sata_pmp_attached(ap))
208 return ap->pmp_link;
209
210 case ATA_LITER_HOST_FIRST:
211 return &ap->link;
212 }
213
214
215 if (link == &ap->link)
216 switch (mode) {
217 case ATA_LITER_HOST_FIRST:
218 if (sata_pmp_attached(ap))
219 return ap->pmp_link;
220
221 case ATA_LITER_PMP_FIRST:
222 if (unlikely(ap->slave_link))
223 return ap->slave_link;
224
225 case ATA_LITER_EDGE:
226 return NULL;
227 }
228
229
230 if (unlikely(link == ap->slave_link))
231 return NULL;
232
233
234 if (++link < ap->pmp_link + ap->nr_pmp_links)
235 return link;
236
237 if (mode == ATA_LITER_PMP_FIRST)
238 return &ap->link;
239
240 return NULL;
241}
242
243
244
245
246
247
248
249
250
251
252
253
254
255struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
256 enum ata_dev_iter_mode mode)
257{
258 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
259 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
260
261
262 if (!dev)
263 switch (mode) {
264 case ATA_DITER_ENABLED:
265 case ATA_DITER_ALL:
266 dev = link->device;
267 goto check;
268 case ATA_DITER_ENABLED_REVERSE:
269 case ATA_DITER_ALL_REVERSE:
270 dev = link->device + ata_link_max_devices(link) - 1;
271 goto check;
272 }
273
274 next:
275
276 switch (mode) {
277 case ATA_DITER_ENABLED:
278 case ATA_DITER_ALL:
279 if (++dev < link->device + ata_link_max_devices(link))
280 goto check;
281 return NULL;
282 case ATA_DITER_ENABLED_REVERSE:
283 case ATA_DITER_ALL_REVERSE:
284 if (--dev >= link->device)
285 goto check;
286 return NULL;
287 }
288
289 check:
290 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
291 !ata_dev_enabled(dev))
292 goto next;
293 return dev;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310struct ata_link *ata_dev_phys_link(struct ata_device *dev)
311{
312 struct ata_port *ap = dev->link->ap;
313
314 if (!ap->slave_link)
315 return dev->link;
316 if (!dev->devno)
317 return &ap->link;
318 return ap->slave_link;
319}
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334void ata_force_cbl(struct ata_port *ap)
335{
336 int i;
337
338 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
339 const struct ata_force_ent *fe = &ata_force_tbl[i];
340
341 if (fe->port != -1 && fe->port != ap->print_id)
342 continue;
343
344 if (fe->param.cbl == ATA_CBL_NONE)
345 continue;
346
347 ap->cbl = fe->param.cbl;
348 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
349 return;
350 }
351}
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369static void ata_force_link_limits(struct ata_link *link)
370{
371 bool did_spd = false;
372 int linkno = link->pmp;
373 int i;
374
375 if (ata_is_host_link(link))
376 linkno += 15;
377
378 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
379 const struct ata_force_ent *fe = &ata_force_tbl[i];
380
381 if (fe->port != -1 && fe->port != link->ap->print_id)
382 continue;
383
384 if (fe->device != -1 && fe->device != linkno)
385 continue;
386
387
388 if (!did_spd && fe->param.spd_limit) {
389 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
390 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
391 fe->param.name);
392 did_spd = true;
393 }
394
395
396 if (fe->param.lflags) {
397 link->flags |= fe->param.lflags;
398 ata_link_notice(link,
399 "FORCE: link flag 0x%x forced -> 0x%x\n",
400 fe->param.lflags, link->flags);
401 }
402 }
403}
404
405
406
407
408
409
410
411
412
413
414
415
416static void ata_force_xfermask(struct ata_device *dev)
417{
418 int devno = dev->link->pmp + dev->devno;
419 int alt_devno = devno;
420 int i;
421
422
423 if (ata_is_host_link(dev->link))
424 alt_devno += 15;
425
426 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
427 const struct ata_force_ent *fe = &ata_force_tbl[i];
428 unsigned long pio_mask, mwdma_mask, udma_mask;
429
430 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
431 continue;
432
433 if (fe->device != -1 && fe->device != devno &&
434 fe->device != alt_devno)
435 continue;
436
437 if (!fe->param.xfer_mask)
438 continue;
439
440 ata_unpack_xfermask(fe->param.xfer_mask,
441 &pio_mask, &mwdma_mask, &udma_mask);
442 if (udma_mask)
443 dev->udma_mask = udma_mask;
444 else if (mwdma_mask) {
445 dev->udma_mask = 0;
446 dev->mwdma_mask = mwdma_mask;
447 } else {
448 dev->udma_mask = 0;
449 dev->mwdma_mask = 0;
450 dev->pio_mask = pio_mask;
451 }
452
453 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
454 fe->param.name);
455 return;
456 }
457}
458
459
460
461
462
463
464
465
466
467
468
469
470static void ata_force_horkage(struct ata_device *dev)
471{
472 int devno = dev->link->pmp + dev->devno;
473 int alt_devno = devno;
474 int i;
475
476
477 if (ata_is_host_link(dev->link))
478 alt_devno += 15;
479
480 for (i = 0; i < ata_force_tbl_size; i++) {
481 const struct ata_force_ent *fe = &ata_force_tbl[i];
482
483 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
484 continue;
485
486 if (fe->device != -1 && fe->device != devno &&
487 fe->device != alt_devno)
488 continue;
489
490 if (!(~dev->horkage & fe->param.horkage_on) &&
491 !(dev->horkage & fe->param.horkage_off))
492 continue;
493
494 dev->horkage |= fe->param.horkage_on;
495 dev->horkage &= ~fe->param.horkage_off;
496
497 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
498 fe->param.name);
499 }
500}
501
502
503
504
505
506
507
508
509
510
511
512
513
514int atapi_cmd_type(u8 opcode)
515{
516 switch (opcode) {
517 case GPCMD_READ_10:
518 case GPCMD_READ_12:
519 return ATAPI_READ;
520
521 case GPCMD_WRITE_10:
522 case GPCMD_WRITE_12:
523 case GPCMD_WRITE_AND_VERIFY_10:
524 return ATAPI_WRITE;
525
526 case GPCMD_READ_CD:
527 case GPCMD_READ_CD_MSF:
528 return ATAPI_READ_CD;
529
530 case ATA_16:
531 case ATA_12:
532 if (atapi_passthru16)
533 return ATAPI_PASS_THRU;
534
535 default:
536 return ATAPI_MISC;
537 }
538}
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
554{
555 fis[0] = 0x27;
556 fis[1] = pmp & 0xf;
557 if (is_cmd)
558 fis[1] |= (1 << 7);
559
560 fis[2] = tf->command;
561 fis[3] = tf->feature;
562
563 fis[4] = tf->lbal;
564 fis[5] = tf->lbam;
565 fis[6] = tf->lbah;
566 fis[7] = tf->device;
567
568 fis[8] = tf->hob_lbal;
569 fis[9] = tf->hob_lbam;
570 fis[10] = tf->hob_lbah;
571 fis[11] = tf->hob_feature;
572
573 fis[12] = tf->nsect;
574 fis[13] = tf->hob_nsect;
575 fis[14] = 0;
576 fis[15] = tf->ctl;
577
578 fis[16] = tf->auxiliary & 0xff;
579 fis[17] = (tf->auxiliary >> 8) & 0xff;
580 fis[18] = (tf->auxiliary >> 16) & 0xff;
581 fis[19] = (tf->auxiliary >> 24) & 0xff;
582}
583
584
585
586
587
588
589
590
591
592
593
594
595void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
596{
597 tf->command = fis[2];
598 tf->feature = fis[3];
599
600 tf->lbal = fis[4];
601 tf->lbam = fis[5];
602 tf->lbah = fis[6];
603 tf->device = fis[7];
604
605 tf->hob_lbal = fis[8];
606 tf->hob_lbam = fis[9];
607 tf->hob_lbah = fis[10];
608
609 tf->nsect = fis[12];
610 tf->hob_nsect = fis[13];
611}
612
613static const u8 ata_rw_cmds[] = {
614
615 ATA_CMD_READ_MULTI,
616 ATA_CMD_WRITE_MULTI,
617 ATA_CMD_READ_MULTI_EXT,
618 ATA_CMD_WRITE_MULTI_EXT,
619 0,
620 0,
621 0,
622 ATA_CMD_WRITE_MULTI_FUA_EXT,
623
624 ATA_CMD_PIO_READ,
625 ATA_CMD_PIO_WRITE,
626 ATA_CMD_PIO_READ_EXT,
627 ATA_CMD_PIO_WRITE_EXT,
628 0,
629 0,
630 0,
631 0,
632
633 ATA_CMD_READ,
634 ATA_CMD_WRITE,
635 ATA_CMD_READ_EXT,
636 ATA_CMD_WRITE_EXT,
637 0,
638 0,
639 0,
640 ATA_CMD_WRITE_FUA_EXT
641};
642
643
644
645
646
647
648
649
650
651
652
653
654static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
655{
656 u8 cmd;
657
658 int index, fua, lba48, write;
659
660 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
661 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
662 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
663
664 if (dev->flags & ATA_DFLAG_PIO) {
665 tf->protocol = ATA_PROT_PIO;
666 index = dev->multi_count ? 0 : 8;
667 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
668
669 tf->protocol = ATA_PROT_PIO;
670 index = dev->multi_count ? 0 : 8;
671 } else {
672 tf->protocol = ATA_PROT_DMA;
673 index = 16;
674 }
675
676 cmd = ata_rw_cmds[index + fua + lba48 + write];
677 if (cmd) {
678 tf->command = cmd;
679 return 0;
680 }
681 return -1;
682}
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
700{
701 u64 block = 0;
702
703 if (tf->flags & ATA_TFLAG_LBA) {
704 if (tf->flags & ATA_TFLAG_LBA48) {
705 block |= (u64)tf->hob_lbah << 40;
706 block |= (u64)tf->hob_lbam << 32;
707 block |= (u64)tf->hob_lbal << 24;
708 } else
709 block |= (tf->device & 0xf) << 24;
710
711 block |= tf->lbah << 16;
712 block |= tf->lbam << 8;
713 block |= tf->lbal;
714 } else {
715 u32 cyl, head, sect;
716
717 cyl = tf->lbam | (tf->lbah << 8);
718 head = tf->device & 0xf;
719 sect = tf->lbal;
720
721 if (!sect) {
722 ata_dev_warn(dev,
723 "device reported invalid CHS sector 0\n");
724 return U64_MAX;
725 }
726
727 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
728 }
729
730 return block;
731}
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
754 u64 block, u32 n_block, unsigned int tf_flags,
755 unsigned int tag)
756{
757 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
758 tf->flags |= tf_flags;
759
760 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
761
762 if (!lba_48_ok(block, n_block))
763 return -ERANGE;
764
765 tf->protocol = ATA_PROT_NCQ;
766 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
767
768 if (tf->flags & ATA_TFLAG_WRITE)
769 tf->command = ATA_CMD_FPDMA_WRITE;
770 else
771 tf->command = ATA_CMD_FPDMA_READ;
772
773 tf->nsect = tag << 3;
774 tf->hob_feature = (n_block >> 8) & 0xff;
775 tf->feature = n_block & 0xff;
776
777 tf->hob_lbah = (block >> 40) & 0xff;
778 tf->hob_lbam = (block >> 32) & 0xff;
779 tf->hob_lbal = (block >> 24) & 0xff;
780 tf->lbah = (block >> 16) & 0xff;
781 tf->lbam = (block >> 8) & 0xff;
782 tf->lbal = block & 0xff;
783
784 tf->device = ATA_LBA;
785 if (tf->flags & ATA_TFLAG_FUA)
786 tf->device |= 1 << 7;
787 } else if (dev->flags & ATA_DFLAG_LBA) {
788 tf->flags |= ATA_TFLAG_LBA;
789
790 if (lba_28_ok(block, n_block)) {
791
792 tf->device |= (block >> 24) & 0xf;
793 } else if (lba_48_ok(block, n_block)) {
794 if (!(dev->flags & ATA_DFLAG_LBA48))
795 return -ERANGE;
796
797
798 tf->flags |= ATA_TFLAG_LBA48;
799
800 tf->hob_nsect = (n_block >> 8) & 0xff;
801
802 tf->hob_lbah = (block >> 40) & 0xff;
803 tf->hob_lbam = (block >> 32) & 0xff;
804 tf->hob_lbal = (block >> 24) & 0xff;
805 } else
806
807 return -ERANGE;
808
809 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
810 return -EINVAL;
811
812 tf->nsect = n_block & 0xff;
813
814 tf->lbah = (block >> 16) & 0xff;
815 tf->lbam = (block >> 8) & 0xff;
816 tf->lbal = block & 0xff;
817
818 tf->device |= ATA_LBA;
819 } else {
820
821 u32 sect, head, cyl, track;
822
823
824 if (!lba_28_ok(block, n_block))
825 return -ERANGE;
826
827 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
828 return -EINVAL;
829
830
831 track = (u32)block / dev->sectors;
832 cyl = track / dev->heads;
833 head = track % dev->heads;
834 sect = (u32)block % dev->sectors + 1;
835
836 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
837 (u32)block, track, cyl, head, sect);
838
839
840
841
842
843 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
844 return -ERANGE;
845
846 tf->nsect = n_block & 0xff;
847 tf->lbal = sect;
848 tf->lbam = cyl;
849 tf->lbah = cyl >> 8;
850 tf->device |= head;
851 }
852
853 return 0;
854}
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871unsigned long ata_pack_xfermask(unsigned long pio_mask,
872 unsigned long mwdma_mask,
873 unsigned long udma_mask)
874{
875 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
876 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
877 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
878}
879
880
881
882
883
884
885
886
887
888
889
890void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
891 unsigned long *mwdma_mask, unsigned long *udma_mask)
892{
893 if (pio_mask)
894 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
895 if (mwdma_mask)
896 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
897 if (udma_mask)
898 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
899}
900
901static const struct ata_xfer_ent {
902 int shift, bits;
903 u8 base;
904} ata_xfer_tbl[] = {
905 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
906 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
907 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
908 { -1, },
909};
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924u8 ata_xfer_mask2mode(unsigned long xfer_mask)
925{
926 int highbit = fls(xfer_mask) - 1;
927 const struct ata_xfer_ent *ent;
928
929 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
930 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
931 return ent->base + highbit - ent->shift;
932 return 0xff;
933}
934
935
936
937
938
939
940
941
942
943
944
945
946
947unsigned long ata_xfer_mode2mask(u8 xfer_mode)
948{
949 const struct ata_xfer_ent *ent;
950
951 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
952 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
953 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
954 & ~((1 << ent->shift) - 1);
955 return 0;
956}
957
958
959
960
961
962
963
964
965
966
967
968
969
970int ata_xfer_mode2shift(unsigned long xfer_mode)
971{
972 const struct ata_xfer_ent *ent;
973
974 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
975 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
976 return ent->shift;
977 return -1;
978}
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994const char *ata_mode_string(unsigned long xfer_mask)
995{
996 static const char * const xfer_mode_str[] = {
997 "PIO0",
998 "PIO1",
999 "PIO2",
1000 "PIO3",
1001 "PIO4",
1002 "PIO5",
1003 "PIO6",
1004 "MWDMA0",
1005 "MWDMA1",
1006 "MWDMA2",
1007 "MWDMA3",
1008 "MWDMA4",
1009 "UDMA/16",
1010 "UDMA/25",
1011 "UDMA/33",
1012 "UDMA/44",
1013 "UDMA/66",
1014 "UDMA/100",
1015 "UDMA/133",
1016 "UDMA7",
1017 };
1018 int highbit;
1019
1020 highbit = fls(xfer_mask) - 1;
1021 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1022 return xfer_mode_str[highbit];
1023 return "<n/a>";
1024}
1025
1026const char *sata_spd_string(unsigned int spd)
1027{
1028 static const char * const spd_str[] = {
1029 "1.5 Gbps",
1030 "3.0 Gbps",
1031 "6.0 Gbps",
1032 };
1033
1034 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1035 return "<unknown>";
1036 return spd_str[spd - 1];
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1055{
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1078 DPRINTK("found ATA device by sig\n");
1079 return ATA_DEV_ATA;
1080 }
1081
1082 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1083 DPRINTK("found ATAPI device by sig\n");
1084 return ATA_DEV_ATAPI;
1085 }
1086
1087 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1088 DPRINTK("found PMP device by sig\n");
1089 return ATA_DEV_PMP;
1090 }
1091
1092 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1093 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1094 return ATA_DEV_SEMB;
1095 }
1096
1097 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1098 DPRINTK("found ZAC device by sig\n");
1099 return ATA_DEV_ZAC;
1100 }
1101
1102 DPRINTK("unknown device\n");
1103 return ATA_DEV_UNKNOWN;
1104}
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121void ata_id_string(const u16 *id, unsigned char *s,
1122 unsigned int ofs, unsigned int len)
1123{
1124 unsigned int c;
1125
1126 BUG_ON(len & 1);
1127
1128 while (len > 0) {
1129 c = id[ofs] >> 8;
1130 *s = c;
1131 s++;
1132
1133 c = id[ofs] & 0xff;
1134 *s = c;
1135 s++;
1136
1137 ofs++;
1138 len -= 2;
1139 }
1140}
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156void ata_id_c_string(const u16 *id, unsigned char *s,
1157 unsigned int ofs, unsigned int len)
1158{
1159 unsigned char *p;
1160
1161 ata_id_string(id, s, ofs, len - 1);
1162
1163 p = s + strnlen(s, len - 1);
1164 while (p > s && p[-1] == ' ')
1165 p--;
1166 *p = '\0';
1167}
1168
1169static u64 ata_id_n_sectors(const u16 *id)
1170{
1171 if (ata_id_has_lba(id)) {
1172 if (ata_id_has_lba48(id))
1173 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1174 else
1175 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1176 } else {
1177 if (ata_id_current_chs_valid(id))
1178 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1179 id[ATA_ID_CUR_SECTORS];
1180 else
1181 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1182 id[ATA_ID_SECTORS];
1183 }
1184}
1185
1186u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1187{
1188 u64 sectors = 0;
1189
1190 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1191 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1192 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1193 sectors |= (tf->lbah & 0xff) << 16;
1194 sectors |= (tf->lbam & 0xff) << 8;
1195 sectors |= (tf->lbal & 0xff);
1196
1197 return sectors;
1198}
1199
1200u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1201{
1202 u64 sectors = 0;
1203
1204 sectors |= (tf->device & 0x0f) << 24;
1205 sectors |= (tf->lbah & 0xff) << 16;
1206 sectors |= (tf->lbam & 0xff) << 8;
1207 sectors |= (tf->lbal & 0xff);
1208
1209 return sectors;
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1225{
1226 unsigned int err_mask;
1227 struct ata_taskfile tf;
1228 int lba48 = ata_id_has_lba48(dev->id);
1229
1230 ata_tf_init(dev, &tf);
1231
1232
1233 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1234
1235 if (lba48) {
1236 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1237 tf.flags |= ATA_TFLAG_LBA48;
1238 } else
1239 tf.command = ATA_CMD_READ_NATIVE_MAX;
1240
1241 tf.protocol |= ATA_PROT_NODATA;
1242 tf.device |= ATA_LBA;
1243
1244 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1245 if (err_mask) {
1246 ata_dev_warn(dev,
1247 "failed to read native max address (err_mask=0x%x)\n",
1248 err_mask);
1249 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1250 return -EACCES;
1251 return -EIO;
1252 }
1253
1254 if (lba48)
1255 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1256 else
1257 *max_sectors = ata_tf_to_lba(&tf) + 1;
1258 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1259 (*max_sectors)--;
1260 return 0;
1261}
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1276{
1277 unsigned int err_mask;
1278 struct ata_taskfile tf;
1279 int lba48 = ata_id_has_lba48(dev->id);
1280
1281 new_sectors--;
1282
1283 ata_tf_init(dev, &tf);
1284
1285 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1286
1287 if (lba48) {
1288 tf.command = ATA_CMD_SET_MAX_EXT;
1289 tf.flags |= ATA_TFLAG_LBA48;
1290
1291 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1292 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1293 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1294 } else {
1295 tf.command = ATA_CMD_SET_MAX;
1296
1297 tf.device |= (new_sectors >> 24) & 0xf;
1298 }
1299
1300 tf.protocol |= ATA_PROT_NODATA;
1301 tf.device |= ATA_LBA;
1302
1303 tf.lbal = (new_sectors >> 0) & 0xff;
1304 tf.lbam = (new_sectors >> 8) & 0xff;
1305 tf.lbah = (new_sectors >> 16) & 0xff;
1306
1307 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1308 if (err_mask) {
1309 ata_dev_warn(dev,
1310 "failed to set max address (err_mask=0x%x)\n",
1311 err_mask);
1312 if (err_mask == AC_ERR_DEV &&
1313 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1314 return -EACCES;
1315 return -EIO;
1316 }
1317
1318 return 0;
1319}
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332static int ata_hpa_resize(struct ata_device *dev)
1333{
1334 struct ata_eh_context *ehc = &dev->link->eh_context;
1335 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1336 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1337 u64 sectors = ata_id_n_sectors(dev->id);
1338 u64 native_sectors;
1339 int rc;
1340
1341
1342 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1343 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1344 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1345 return 0;
1346
1347
1348 rc = ata_read_native_max_address(dev, &native_sectors);
1349 if (rc) {
1350
1351
1352
1353 if (rc == -EACCES || !unlock_hpa) {
1354 ata_dev_warn(dev,
1355 "HPA support seems broken, skipping HPA handling\n");
1356 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1357
1358
1359 if (rc == -EACCES)
1360 rc = 0;
1361 }
1362
1363 return rc;
1364 }
1365 dev->n_native_sectors = native_sectors;
1366
1367
1368 if (native_sectors <= sectors || !unlock_hpa) {
1369 if (!print_info || native_sectors == sectors)
1370 return 0;
1371
1372 if (native_sectors > sectors)
1373 ata_dev_info(dev,
1374 "HPA detected: current %llu, native %llu\n",
1375 (unsigned long long)sectors,
1376 (unsigned long long)native_sectors);
1377 else if (native_sectors < sectors)
1378 ata_dev_warn(dev,
1379 "native sectors (%llu) is smaller than sectors (%llu)\n",
1380 (unsigned long long)native_sectors,
1381 (unsigned long long)sectors);
1382 return 0;
1383 }
1384
1385
1386 rc = ata_set_max_sectors(dev, native_sectors);
1387 if (rc == -EACCES) {
1388
1389 ata_dev_warn(dev,
1390 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1391 (unsigned long long)sectors,
1392 (unsigned long long)native_sectors);
1393 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1394 return 0;
1395 } else if (rc)
1396 return rc;
1397
1398
1399 rc = ata_dev_reread_id(dev, 0);
1400 if (rc) {
1401 ata_dev_err(dev,
1402 "failed to re-read IDENTIFY data after HPA resizing\n");
1403 return rc;
1404 }
1405
1406 if (print_info) {
1407 u64 new_sectors = ata_id_n_sectors(dev->id);
1408 ata_dev_info(dev,
1409 "HPA unlocked: %llu -> %llu, native %llu\n",
1410 (unsigned long long)sectors,
1411 (unsigned long long)new_sectors,
1412 (unsigned long long)native_sectors);
1413 }
1414
1415 return 0;
1416}
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429static inline void ata_dump_id(const u16 *id)
1430{
1431 DPRINTK("49==0x%04x "
1432 "53==0x%04x "
1433 "63==0x%04x "
1434 "64==0x%04x "
1435 "75==0x%04x \n",
1436 id[49],
1437 id[53],
1438 id[63],
1439 id[64],
1440 id[75]);
1441 DPRINTK("80==0x%04x "
1442 "81==0x%04x "
1443 "82==0x%04x "
1444 "83==0x%04x "
1445 "84==0x%04x \n",
1446 id[80],
1447 id[81],
1448 id[82],
1449 id[83],
1450 id[84]);
1451 DPRINTK("88==0x%04x "
1452 "93==0x%04x\n",
1453 id[88],
1454 id[93]);
1455}
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472unsigned long ata_id_xfermask(const u16 *id)
1473{
1474 unsigned long pio_mask, mwdma_mask, udma_mask;
1475
1476
1477 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1478 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1479 pio_mask <<= 3;
1480 pio_mask |= 0x7;
1481 } else {
1482
1483
1484
1485
1486 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1487 if (mode < 5)
1488 pio_mask = (2 << mode) - 1;
1489 else
1490 pio_mask = 1;
1491
1492
1493
1494
1495
1496
1497
1498 }
1499
1500 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1501
1502 if (ata_id_is_cfa(id)) {
1503
1504
1505
1506 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1507 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1508
1509 if (pio)
1510 pio_mask |= (1 << 5);
1511 if (pio > 1)
1512 pio_mask |= (1 << 6);
1513 if (dma)
1514 mwdma_mask |= (1 << 3);
1515 if (dma > 1)
1516 mwdma_mask |= (1 << 4);
1517 }
1518
1519 udma_mask = 0;
1520 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1521 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1522
1523 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1524}
1525
1526static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1527{
1528 struct completion *waiting = qc->private_data;
1529
1530 complete(waiting);
1531}
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555unsigned ata_exec_internal_sg(struct ata_device *dev,
1556 struct ata_taskfile *tf, const u8 *cdb,
1557 int dma_dir, struct scatterlist *sgl,
1558 unsigned int n_elem, unsigned long timeout)
1559{
1560 struct ata_link *link = dev->link;
1561 struct ata_port *ap = link->ap;
1562 u8 command = tf->command;
1563 int auto_timeout = 0;
1564 struct ata_queued_cmd *qc;
1565 unsigned int tag, preempted_tag;
1566 u32 preempted_sactive, preempted_qc_active;
1567 int preempted_nr_active_links;
1568 DECLARE_COMPLETION_ONSTACK(wait);
1569 unsigned long flags;
1570 unsigned int err_mask;
1571 int rc;
1572
1573 spin_lock_irqsave(ap->lock, flags);
1574
1575
1576 if (ap->pflags & ATA_PFLAG_FROZEN) {
1577 spin_unlock_irqrestore(ap->lock, flags);
1578 return AC_ERR_SYSTEM;
1579 }
1580
1581
1582
1583
1584
1585
1586
1587
1588 if (ap->ops->error_handler)
1589 tag = ATA_TAG_INTERNAL;
1590 else
1591 tag = 0;
1592
1593 qc = __ata_qc_from_tag(ap, tag);
1594
1595 qc->tag = tag;
1596 qc->scsicmd = NULL;
1597 qc->ap = ap;
1598 qc->dev = dev;
1599 ata_qc_reinit(qc);
1600
1601 preempted_tag = link->active_tag;
1602 preempted_sactive = link->sactive;
1603 preempted_qc_active = ap->qc_active;
1604 preempted_nr_active_links = ap->nr_active_links;
1605 link->active_tag = ATA_TAG_POISON;
1606 link->sactive = 0;
1607 ap->qc_active = 0;
1608 ap->nr_active_links = 0;
1609
1610
1611 qc->tf = *tf;
1612 if (cdb)
1613 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1614
1615
1616 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1617 dma_dir == DMA_FROM_DEVICE)
1618 qc->tf.feature |= ATAPI_DMADIR;
1619
1620 qc->flags |= ATA_QCFLAG_RESULT_TF;
1621 qc->dma_dir = dma_dir;
1622 if (dma_dir != DMA_NONE) {
1623 unsigned int i, buflen = 0;
1624 struct scatterlist *sg;
1625
1626 for_each_sg(sgl, sg, n_elem, i)
1627 buflen += sg->length;
1628
1629 ata_sg_init(qc, sgl, n_elem);
1630 qc->nbytes = buflen;
1631 }
1632
1633 qc->private_data = &wait;
1634 qc->complete_fn = ata_qc_complete_internal;
1635
1636 ata_qc_issue(qc);
1637
1638 spin_unlock_irqrestore(ap->lock, flags);
1639
1640 if (!timeout) {
1641 if (ata_probe_timeout)
1642 timeout = ata_probe_timeout * 1000;
1643 else {
1644 timeout = ata_internal_cmd_timeout(dev, command);
1645 auto_timeout = 1;
1646 }
1647 }
1648
1649 if (ap->ops->error_handler)
1650 ata_eh_release(ap);
1651
1652 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1653
1654 if (ap->ops->error_handler)
1655 ata_eh_acquire(ap);
1656
1657 ata_sff_flush_pio_task(ap);
1658
1659 if (!rc) {
1660 spin_lock_irqsave(ap->lock, flags);
1661
1662
1663
1664
1665
1666
1667 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1668 qc->err_mask |= AC_ERR_TIMEOUT;
1669
1670 if (ap->ops->error_handler)
1671 ata_port_freeze(ap);
1672 else
1673 ata_qc_complete(qc);
1674
1675 if (ata_msg_warn(ap))
1676 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1677 command);
1678 }
1679
1680 spin_unlock_irqrestore(ap->lock, flags);
1681 }
1682
1683
1684 if (ap->ops->post_internal_cmd)
1685 ap->ops->post_internal_cmd(qc);
1686
1687
1688 if (qc->flags & ATA_QCFLAG_FAILED) {
1689 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1690 qc->err_mask |= AC_ERR_DEV;
1691
1692 if (!qc->err_mask)
1693 qc->err_mask |= AC_ERR_OTHER;
1694
1695 if (qc->err_mask & ~AC_ERR_OTHER)
1696 qc->err_mask &= ~AC_ERR_OTHER;
1697 }
1698
1699
1700 spin_lock_irqsave(ap->lock, flags);
1701
1702 *tf = qc->result_tf;
1703 err_mask = qc->err_mask;
1704
1705 ata_qc_free(qc);
1706 link->active_tag = preempted_tag;
1707 link->sactive = preempted_sactive;
1708 ap->qc_active = preempted_qc_active;
1709 ap->nr_active_links = preempted_nr_active_links;
1710
1711 spin_unlock_irqrestore(ap->lock, flags);
1712
1713 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1714 ata_internal_cmd_timed_out(dev, command);
1715
1716 return err_mask;
1717}
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738unsigned ata_exec_internal(struct ata_device *dev,
1739 struct ata_taskfile *tf, const u8 *cdb,
1740 int dma_dir, void *buf, unsigned int buflen,
1741 unsigned long timeout)
1742{
1743 struct scatterlist *psg = NULL, sg;
1744 unsigned int n_elem = 0;
1745
1746 if (dma_dir != DMA_NONE) {
1747 WARN_ON(!buf);
1748 sg_init_one(&sg, buf, buflen);
1749 psg = &sg;
1750 n_elem++;
1751 }
1752
1753 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1754 timeout);
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1765{
1766
1767
1768
1769
1770 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1771 return 0;
1772
1773
1774
1775 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1776 return 0;
1777
1778 if (ata_id_is_cfa(adev->id)
1779 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1780 return 0;
1781
1782 if (adev->pio_mode > XFER_PIO_2)
1783 return 1;
1784
1785 if (ata_id_has_iordy(adev->id))
1786 return 1;
1787 return 0;
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1798{
1799
1800 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
1801 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1802
1803 if (pio) {
1804
1805 if (pio > 240)
1806 return 3 << ATA_SHIFT_PIO;
1807 return 7 << ATA_SHIFT_PIO;
1808 }
1809 }
1810 return 3 << ATA_SHIFT_PIO;
1811}
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823unsigned int ata_do_dev_read_id(struct ata_device *dev,
1824 struct ata_taskfile *tf, u16 *id)
1825{
1826 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1827 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1828}
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1852 unsigned int flags, u16 *id)
1853{
1854 struct ata_port *ap = dev->link->ap;
1855 unsigned int class = *p_class;
1856 struct ata_taskfile tf;
1857 unsigned int err_mask = 0;
1858 const char *reason;
1859 bool is_semb = class == ATA_DEV_SEMB;
1860 int may_fallback = 1, tried_spinup = 0;
1861 int rc;
1862
1863 if (ata_msg_ctl(ap))
1864 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1865
1866retry:
1867 ata_tf_init(dev, &tf);
1868
1869 switch (class) {
1870 case ATA_DEV_SEMB:
1871 class = ATA_DEV_ATA;
1872 case ATA_DEV_ATA:
1873 case ATA_DEV_ZAC:
1874 tf.command = ATA_CMD_ID_ATA;
1875 break;
1876 case ATA_DEV_ATAPI:
1877 tf.command = ATA_CMD_ID_ATAPI;
1878 break;
1879 default:
1880 rc = -ENODEV;
1881 reason = "unsupported class";
1882 goto err_out;
1883 }
1884
1885 tf.protocol = ATA_PROT_PIO;
1886
1887
1888
1889
1890 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1891
1892
1893
1894
1895 tf.flags |= ATA_TFLAG_POLLING;
1896
1897 if (ap->ops->read_id)
1898 err_mask = ap->ops->read_id(dev, &tf, id);
1899 else
1900 err_mask = ata_do_dev_read_id(dev, &tf, id);
1901
1902 if (err_mask) {
1903 if (err_mask & AC_ERR_NODEV_HINT) {
1904 ata_dev_dbg(dev, "NODEV after polling detection\n");
1905 return -ENOENT;
1906 }
1907
1908 if (is_semb) {
1909 ata_dev_info(dev,
1910 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1911
1912 *p_class = ATA_DEV_SEMB_UNSUP;
1913 return 0;
1914 }
1915
1916 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1917
1918
1919
1920
1921
1922 if (may_fallback) {
1923 may_fallback = 0;
1924
1925 if (class == ATA_DEV_ATA)
1926 class = ATA_DEV_ATAPI;
1927 else
1928 class = ATA_DEV_ATA;
1929 goto retry;
1930 }
1931
1932
1933
1934
1935
1936 ata_dev_dbg(dev,
1937 "both IDENTIFYs aborted, assuming NODEV\n");
1938 return -ENOENT;
1939 }
1940
1941 rc = -EIO;
1942 reason = "I/O error";
1943 goto err_out;
1944 }
1945
1946 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1947 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1948 "class=%d may_fallback=%d tried_spinup=%d\n",
1949 class, may_fallback, tried_spinup);
1950 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1951 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1952 }
1953
1954
1955
1956
1957 may_fallback = 0;
1958
1959 swap_buf_le16(id, ATA_ID_WORDS);
1960
1961
1962 rc = -EINVAL;
1963 reason = "device reports invalid type";
1964
1965 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1966 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1967 goto err_out;
1968 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1969 ata_id_is_ata(id)) {
1970 ata_dev_dbg(dev,
1971 "host indicates ignore ATA devices, ignored\n");
1972 return -ENOENT;
1973 }
1974 } else {
1975 if (ata_id_is_ata(id))
1976 goto err_out;
1977 }
1978
1979 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1980 tried_spinup = 1;
1981
1982
1983
1984
1985
1986 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1987 if (err_mask && id[2] != 0x738c) {
1988 rc = -EIO;
1989 reason = "SPINUP failed";
1990 goto err_out;
1991 }
1992
1993
1994
1995
1996 if (id[2] == 0x37c8)
1997 goto retry;
1998 }
1999
2000 if ((flags & ATA_READID_POSTRESET) &&
2001 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2014 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2015 if (err_mask) {
2016 rc = -EIO;
2017 reason = "INIT_DEV_PARAMS failed";
2018 goto err_out;
2019 }
2020
2021
2022
2023
2024 flags &= ~ATA_READID_POSTRESET;
2025 goto retry;
2026 }
2027 }
2028
2029 *p_class = class;
2030
2031 return 0;
2032
2033 err_out:
2034 if (ata_msg_warn(ap))
2035 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2036 reason, err_mask);
2037 return rc;
2038}
2039
2040static int ata_do_link_spd_horkage(struct ata_device *dev)
2041{
2042 struct ata_link *plink = ata_dev_phys_link(dev);
2043 u32 target, target_limit;
2044
2045 if (!sata_scr_valid(plink))
2046 return 0;
2047
2048 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2049 target = 1;
2050 else
2051 return 0;
2052
2053 target_limit = (1 << target) - 1;
2054
2055
2056 if (plink->sata_spd_limit <= target_limit)
2057 return 0;
2058
2059 plink->sata_spd_limit = target_limit;
2060
2061
2062
2063
2064
2065 if (plink->sata_spd > target) {
2066 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2067 sata_spd_string(target));
2068 return -EAGAIN;
2069 }
2070 return 0;
2071}
2072
2073static inline u8 ata_dev_knobble(struct ata_device *dev)
2074{
2075 struct ata_port *ap = dev->link->ap;
2076
2077 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2078 return 0;
2079
2080 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2081}
2082
2083static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2084{
2085 struct ata_port *ap = dev->link->ap;
2086 unsigned int err_mask;
2087 int log_index = ATA_LOG_NCQ_SEND_RECV * 2;
2088 u16 log_pages;
2089
2090 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2091 0, ap->sector_buf, 1);
2092 if (err_mask) {
2093 ata_dev_dbg(dev,
2094 "failed to get Log Directory Emask 0x%x\n",
2095 err_mask);
2096 return;
2097 }
2098 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2099 if (!log_pages) {
2100 ata_dev_warn(dev,
2101 "NCQ Send/Recv Log not supported\n");
2102 return;
2103 }
2104 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2105 0, ap->sector_buf, 1);
2106 if (err_mask) {
2107 ata_dev_dbg(dev,
2108 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2109 err_mask);
2110 } else {
2111 u8 *cmds = dev->ncq_send_recv_cmds;
2112
2113 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2114 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2115
2116 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2117 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2118 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2119 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2120 }
2121 }
2122}
2123
2124static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2125{
2126 struct ata_port *ap = dev->link->ap;
2127 unsigned int err_mask;
2128 int log_index = ATA_LOG_NCQ_NON_DATA * 2;
2129 u16 log_pages;
2130
2131 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2132 0, ap->sector_buf, 1);
2133 if (err_mask) {
2134 ata_dev_dbg(dev,
2135 "failed to get Log Directory Emask 0x%x\n",
2136 err_mask);
2137 return;
2138 }
2139 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2140 if (!log_pages) {
2141 ata_dev_warn(dev,
2142 "NCQ Send/Recv Log not supported\n");
2143 return;
2144 }
2145 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2146 0, ap->sector_buf, 1);
2147 if (err_mask) {
2148 ata_dev_dbg(dev,
2149 "failed to get NCQ Non-Data Log Emask 0x%x\n",
2150 err_mask);
2151 } else {
2152 u8 *cmds = dev->ncq_non_data_cmds;
2153
2154 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2155 }
2156}
2157
2158static int ata_dev_config_ncq(struct ata_device *dev,
2159 char *desc, size_t desc_sz)
2160{
2161 struct ata_port *ap = dev->link->ap;
2162 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2163 unsigned int err_mask;
2164 char *aa_desc = "";
2165
2166 if (!ata_id_has_ncq(dev->id)) {
2167 desc[0] = '\0';
2168 return 0;
2169 }
2170 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2171 snprintf(desc, desc_sz, "NCQ (not used)");
2172 return 0;
2173 }
2174 if (ap->flags & ATA_FLAG_NCQ) {
2175 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2176 dev->flags |= ATA_DFLAG_NCQ;
2177 }
2178
2179 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2180 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2181 ata_id_has_fpdma_aa(dev->id)) {
2182 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2183 SATA_FPDMA_AA);
2184 if (err_mask) {
2185 ata_dev_err(dev,
2186 "failed to enable AA (error_mask=0x%x)\n",
2187 err_mask);
2188 if (err_mask != AC_ERR_DEV) {
2189 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2190 return -EIO;
2191 }
2192 } else
2193 aa_desc = ", AA";
2194 }
2195
2196 if (hdepth >= ddepth)
2197 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2198 else
2199 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2200 ddepth, aa_desc);
2201
2202 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2203 if (ata_id_has_ncq_send_and_recv(dev->id))
2204 ata_dev_config_ncq_send_recv(dev);
2205 if (ata_id_has_ncq_non_data(dev->id))
2206 ata_dev_config_ncq_non_data(dev);
2207 }
2208
2209 return 0;
2210}
2211
2212static void ata_dev_config_sense_reporting(struct ata_device *dev)
2213{
2214 unsigned int err_mask;
2215
2216 if (!ata_id_has_sense_reporting(dev->id))
2217 return;
2218
2219 if (ata_id_sense_reporting_enabled(dev->id))
2220 return;
2221
2222 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2223 if (err_mask) {
2224 ata_dev_dbg(dev,
2225 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2226 err_mask);
2227 }
2228}
2229
2230static void ata_dev_config_zac(struct ata_device *dev)
2231{
2232 struct ata_port *ap = dev->link->ap;
2233 unsigned int err_mask;
2234 u8 *identify_buf = ap->sector_buf;
2235 int log_index = ATA_LOG_SATA_ID_DEV_DATA * 2, i, found = 0;
2236 u16 log_pages;
2237
2238 dev->zac_zones_optimal_open = U32_MAX;
2239 dev->zac_zones_optimal_nonseq = U32_MAX;
2240 dev->zac_zones_max_open = U32_MAX;
2241
2242
2243
2244
2245 if (dev->class == ATA_DEV_ZAC)
2246 dev->flags |= ATA_DFLAG_ZAC;
2247 else if (ata_id_zoned_cap(dev->id) == 0x01)
2248
2249
2250
2251 dev->flags |= ATA_DFLAG_ZAC;
2252
2253 if (!(dev->flags & ATA_DFLAG_ZAC))
2254 return;
2255
2256
2257
2258
2259
2260 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2261 0, ap->sector_buf, 1);
2262 if (err_mask) {
2263 ata_dev_info(dev,
2264 "failed to get Log Directory Emask 0x%x\n",
2265 err_mask);
2266 return;
2267 }
2268 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2269 if (log_pages == 0) {
2270 ata_dev_warn(dev,
2271 "ATA Identify Device Log not supported\n");
2272 return;
2273 }
2274
2275
2276
2277
2278 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, 0,
2279 identify_buf, 1);
2280 if (err_mask) {
2281 ata_dev_info(dev,
2282 "failed to get Device Identify Log Emask 0x%x\n",
2283 err_mask);
2284 return;
2285 }
2286 log_pages = identify_buf[8];
2287 for (i = 0; i < log_pages; i++) {
2288 if (identify_buf[9 + i] == ATA_LOG_ZONED_INFORMATION) {
2289 found++;
2290 break;
2291 }
2292 }
2293 if (!found) {
2294 ata_dev_warn(dev,
2295 "ATA Zoned Information Log not supported\n");
2296 return;
2297 }
2298
2299
2300
2301
2302 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA,
2303 ATA_LOG_ZONED_INFORMATION,
2304 identify_buf, 1);
2305 if (!err_mask) {
2306 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2307
2308 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2309 if ((zoned_cap >> 63))
2310 dev->zac_zoned_cap = (zoned_cap & 1);
2311 opt_open = get_unaligned_le64(&identify_buf[24]);
2312 if ((opt_open >> 63))
2313 dev->zac_zones_optimal_open = (u32)opt_open;
2314 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2315 if ((opt_nonseq >> 63))
2316 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2317 max_open = get_unaligned_le64(&identify_buf[40]);
2318 if ((max_open >> 63))
2319 dev->zac_zones_max_open = (u32)max_open;
2320 }
2321}
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336int ata_dev_configure(struct ata_device *dev)
2337{
2338 struct ata_port *ap = dev->link->ap;
2339 struct ata_eh_context *ehc = &dev->link->eh_context;
2340 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2341 const u16 *id = dev->id;
2342 unsigned long xfer_mask;
2343 unsigned int err_mask;
2344 char revbuf[7];
2345 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2346 char modelbuf[ATA_ID_PROD_LEN+1];
2347 int rc;
2348
2349 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2350 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2351 return 0;
2352 }
2353
2354 if (ata_msg_probe(ap))
2355 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2356
2357
2358 dev->horkage |= ata_dev_blacklisted(dev);
2359 ata_force_horkage(dev);
2360
2361 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2362 ata_dev_info(dev, "unsupported device, disabling\n");
2363 ata_dev_disable(dev);
2364 return 0;
2365 }
2366
2367 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2368 dev->class == ATA_DEV_ATAPI) {
2369 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2370 atapi_enabled ? "not supported with this driver"
2371 : "disabled");
2372 ata_dev_disable(dev);
2373 return 0;
2374 }
2375
2376 rc = ata_do_link_spd_horkage(dev);
2377 if (rc)
2378 return rc;
2379
2380
2381 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2382 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2383 dev->horkage |= ATA_HORKAGE_NOLPM;
2384
2385 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2386 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2387 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2388 }
2389
2390
2391 rc = ata_acpi_on_devcfg(dev);
2392 if (rc)
2393 return rc;
2394
2395
2396 rc = ata_hpa_resize(dev);
2397 if (rc)
2398 return rc;
2399
2400
2401 if (ata_msg_probe(ap))
2402 ata_dev_dbg(dev,
2403 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2404 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2405 __func__,
2406 id[49], id[82], id[83], id[84],
2407 id[85], id[86], id[87], id[88]);
2408
2409
2410 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2411 dev->max_sectors = 0;
2412 dev->cdb_len = 0;
2413 dev->n_sectors = 0;
2414 dev->cylinders = 0;
2415 dev->heads = 0;
2416 dev->sectors = 0;
2417 dev->multi_count = 0;
2418
2419
2420
2421
2422
2423
2424 xfer_mask = ata_id_xfermask(id);
2425
2426 if (ata_msg_probe(ap))
2427 ata_dump_id(id);
2428
2429
2430 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2431 sizeof(fwrevbuf));
2432
2433 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2434 sizeof(modelbuf));
2435
2436
2437 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2438 if (ata_id_is_cfa(id)) {
2439
2440 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2441 ata_dev_warn(dev,
2442 "supports DRM functions and may not be fully accessible\n");
2443 snprintf(revbuf, 7, "CFA");
2444 } else {
2445 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2446
2447 if (ata_id_has_tpm(id))
2448 ata_dev_warn(dev,
2449 "supports DRM functions and may not be fully accessible\n");
2450 }
2451
2452 dev->n_sectors = ata_id_n_sectors(id);
2453
2454
2455 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2456 unsigned int max = dev->id[47] & 0xff;
2457 unsigned int cnt = dev->id[59] & 0xff;
2458
2459 if (is_power_of_2(max) && is_power_of_2(cnt))
2460 if (cnt <= max)
2461 dev->multi_count = cnt;
2462 }
2463
2464 if (ata_id_has_lba(id)) {
2465 const char *lba_desc;
2466 char ncq_desc[24];
2467
2468 lba_desc = "LBA";
2469 dev->flags |= ATA_DFLAG_LBA;
2470 if (ata_id_has_lba48(id)) {
2471 dev->flags |= ATA_DFLAG_LBA48;
2472 lba_desc = "LBA48";
2473
2474 if (dev->n_sectors >= (1UL << 28) &&
2475 ata_id_has_flush_ext(id))
2476 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2477 }
2478
2479
2480 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2481 if (rc)
2482 return rc;
2483
2484
2485 if (ata_msg_drv(ap) && print_info) {
2486 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2487 revbuf, modelbuf, fwrevbuf,
2488 ata_mode_string(xfer_mask));
2489 ata_dev_info(dev,
2490 "%llu sectors, multi %u: %s %s\n",
2491 (unsigned long long)dev->n_sectors,
2492 dev->multi_count, lba_desc, ncq_desc);
2493 }
2494 } else {
2495
2496
2497
2498 dev->cylinders = id[1];
2499 dev->heads = id[3];
2500 dev->sectors = id[6];
2501
2502 if (ata_id_current_chs_valid(id)) {
2503
2504 dev->cylinders = id[54];
2505 dev->heads = id[55];
2506 dev->sectors = id[56];
2507 }
2508
2509
2510 if (ata_msg_drv(ap) && print_info) {
2511 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2512 revbuf, modelbuf, fwrevbuf,
2513 ata_mode_string(xfer_mask));
2514 ata_dev_info(dev,
2515 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2516 (unsigned long long)dev->n_sectors,
2517 dev->multi_count, dev->cylinders,
2518 dev->heads, dev->sectors);
2519 }
2520 }
2521
2522
2523
2524
2525 if (ata_id_has_devslp(dev->id)) {
2526 u8 *sata_setting = ap->sector_buf;
2527 int i, j;
2528
2529 dev->flags |= ATA_DFLAG_DEVSLP;
2530 err_mask = ata_read_log_page(dev,
2531 ATA_LOG_SATA_ID_DEV_DATA,
2532 ATA_LOG_SATA_SETTINGS,
2533 sata_setting,
2534 1);
2535 if (err_mask)
2536 ata_dev_dbg(dev,
2537 "failed to get Identify Device Data, Emask 0x%x\n",
2538 err_mask);
2539 else
2540 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2541 j = ATA_LOG_DEVSLP_OFFSET + i;
2542 dev->devslp_timing[i] = sata_setting[j];
2543 }
2544 }
2545 ata_dev_config_sense_reporting(dev);
2546 ata_dev_config_zac(dev);
2547 dev->cdb_len = 16;
2548 }
2549
2550
2551 else if (dev->class == ATA_DEV_ATAPI) {
2552 const char *cdb_intr_string = "";
2553 const char *atapi_an_string = "";
2554 const char *dma_dir_string = "";
2555 u32 sntf;
2556
2557 rc = atapi_cdb_len(id);
2558 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2559 if (ata_msg_warn(ap))
2560 ata_dev_warn(dev, "unsupported CDB len\n");
2561 rc = -EINVAL;
2562 goto err_out_nosup;
2563 }
2564 dev->cdb_len = (unsigned int) rc;
2565
2566
2567
2568
2569
2570
2571 if (atapi_an &&
2572 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2573 (!sata_pmp_attached(ap) ||
2574 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2575
2576 err_mask = ata_dev_set_feature(dev,
2577 SETFEATURES_SATA_ENABLE, SATA_AN);
2578 if (err_mask)
2579 ata_dev_err(dev,
2580 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2581 err_mask);
2582 else {
2583 dev->flags |= ATA_DFLAG_AN;
2584 atapi_an_string = ", ATAPI AN";
2585 }
2586 }
2587
2588 if (ata_id_cdb_intr(dev->id)) {
2589 dev->flags |= ATA_DFLAG_CDB_INTR;
2590 cdb_intr_string = ", CDB intr";
2591 }
2592
2593 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2594 dev->flags |= ATA_DFLAG_DMADIR;
2595 dma_dir_string = ", DMADIR";
2596 }
2597
2598 if (ata_id_has_da(dev->id)) {
2599 dev->flags |= ATA_DFLAG_DA;
2600 zpodd_init(dev);
2601 }
2602
2603
2604 if (ata_msg_drv(ap) && print_info)
2605 ata_dev_info(dev,
2606 "ATAPI: %s, %s, max %s%s%s%s\n",
2607 modelbuf, fwrevbuf,
2608 ata_mode_string(xfer_mask),
2609 cdb_intr_string, atapi_an_string,
2610 dma_dir_string);
2611 }
2612
2613
2614 dev->max_sectors = ATA_MAX_SECTORS;
2615 if (dev->flags & ATA_DFLAG_LBA48)
2616 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2617
2618
2619
2620 if (ata_dev_knobble(dev)) {
2621 if (ata_msg_drv(ap) && print_info)
2622 ata_dev_info(dev, "applying bridge limits\n");
2623 dev->udma_mask &= ATA_UDMA5;
2624 dev->max_sectors = ATA_MAX_SECTORS;
2625 }
2626
2627 if ((dev->class == ATA_DEV_ATAPI) &&
2628 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2629 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2630 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2631 }
2632
2633 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2634 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2635 dev->max_sectors);
2636
2637 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2638 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2639 dev->max_sectors);
2640
2641 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2642 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2643
2644 if (ap->ops->dev_config)
2645 ap->ops->dev_config(dev);
2646
2647 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2648
2649
2650
2651
2652
2653
2654 if (print_info) {
2655 ata_dev_warn(dev,
2656"Drive reports diagnostics failure. This may indicate a drive\n");
2657 ata_dev_warn(dev,
2658"fault or invalid emulation. Contact drive vendor for information.\n");
2659 }
2660 }
2661
2662 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2663 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2664 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2665 }
2666
2667 return 0;
2668
2669err_out_nosup:
2670 if (ata_msg_probe(ap))
2671 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2672 return rc;
2673}
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683int ata_cable_40wire(struct ata_port *ap)
2684{
2685 return ATA_CBL_PATA40;
2686}
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696int ata_cable_80wire(struct ata_port *ap)
2697{
2698 return ATA_CBL_PATA80;
2699}
2700
2701
2702
2703
2704
2705
2706
2707
2708int ata_cable_unknown(struct ata_port *ap)
2709{
2710 return ATA_CBL_PATA_UNK;
2711}
2712
2713
2714
2715
2716
2717
2718
2719
2720int ata_cable_ignore(struct ata_port *ap)
2721{
2722 return ATA_CBL_PATA_IGN;
2723}
2724
2725
2726
2727
2728
2729
2730
2731
2732int ata_cable_sata(struct ata_port *ap)
2733{
2734 return ATA_CBL_SATA;
2735}
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752int ata_bus_probe(struct ata_port *ap)
2753{
2754 unsigned int classes[ATA_MAX_DEVICES];
2755 int tries[ATA_MAX_DEVICES];
2756 int rc;
2757 struct ata_device *dev;
2758
2759 ata_for_each_dev(dev, &ap->link, ALL)
2760 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2761
2762 retry:
2763 ata_for_each_dev(dev, &ap->link, ALL) {
2764
2765
2766
2767
2768
2769
2770
2771 dev->pio_mode = XFER_PIO_0;
2772 dev->dma_mode = 0xff;
2773
2774
2775
2776
2777
2778
2779 if (ap->ops->set_piomode)
2780 ap->ops->set_piomode(ap, dev);
2781 }
2782
2783
2784 ap->ops->phy_reset(ap);
2785
2786 ata_for_each_dev(dev, &ap->link, ALL) {
2787 if (dev->class != ATA_DEV_UNKNOWN)
2788 classes[dev->devno] = dev->class;
2789 else
2790 classes[dev->devno] = ATA_DEV_NONE;
2791
2792 dev->class = ATA_DEV_UNKNOWN;
2793 }
2794
2795
2796
2797
2798
2799 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2800 if (tries[dev->devno])
2801 dev->class = classes[dev->devno];
2802
2803 if (!ata_dev_enabled(dev))
2804 continue;
2805
2806 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2807 dev->id);
2808 if (rc)
2809 goto fail;
2810 }
2811
2812
2813 if (ap->ops->cable_detect)
2814 ap->cbl = ap->ops->cable_detect(ap);
2815
2816
2817
2818
2819
2820
2821 ata_for_each_dev(dev, &ap->link, ENABLED)
2822 if (ata_id_is_sata(dev->id))
2823 ap->cbl = ATA_CBL_SATA;
2824
2825
2826
2827
2828 ata_for_each_dev(dev, &ap->link, ENABLED) {
2829 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2830 rc = ata_dev_configure(dev);
2831 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2832 if (rc)
2833 goto fail;
2834 }
2835
2836
2837 rc = ata_set_mode(&ap->link, &dev);
2838 if (rc)
2839 goto fail;
2840
2841 ata_for_each_dev(dev, &ap->link, ENABLED)
2842 return 0;
2843
2844 return -ENODEV;
2845
2846 fail:
2847 tries[dev->devno]--;
2848
2849 switch (rc) {
2850 case -EINVAL:
2851
2852 tries[dev->devno] = 0;
2853 break;
2854
2855 case -ENODEV:
2856
2857 tries[dev->devno] = min(tries[dev->devno], 1);
2858 case -EIO:
2859 if (tries[dev->devno] == 1) {
2860
2861
2862
2863 sata_down_spd_limit(&ap->link, 0);
2864 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2865 }
2866 }
2867
2868 if (!tries[dev->devno])
2869 ata_dev_disable(dev);
2870
2871 goto retry;
2872}
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883static void sata_print_link_status(struct ata_link *link)
2884{
2885 u32 sstatus, scontrol, tmp;
2886
2887 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2888 return;
2889 sata_scr_read(link, SCR_CONTROL, &scontrol);
2890
2891 if (ata_phys_link_online(link)) {
2892 tmp = (sstatus >> 4) & 0xf;
2893 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2894 sata_spd_string(tmp), sstatus, scontrol);
2895 } else {
2896 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2897 sstatus, scontrol);
2898 }
2899}
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909struct ata_device *ata_dev_pair(struct ata_device *adev)
2910{
2911 struct ata_link *link = adev->link;
2912 struct ata_device *pair = &link->device[1 - adev->devno];
2913 if (!ata_dev_enabled(pair))
2914 return NULL;
2915 return pair;
2916}
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2939{
2940 u32 sstatus, spd, mask;
2941 int rc, bit;
2942
2943 if (!sata_scr_valid(link))
2944 return -EOPNOTSUPP;
2945
2946
2947
2948
2949 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2950 if (rc == 0 && ata_sstatus_online(sstatus))
2951 spd = (sstatus >> 4) & 0xf;
2952 else
2953 spd = link->sata_spd;
2954
2955 mask = link->sata_spd_limit;
2956 if (mask <= 1)
2957 return -EINVAL;
2958
2959
2960 bit = fls(mask) - 1;
2961 mask &= ~(1 << bit);
2962
2963
2964
2965
2966 if (spd > 1)
2967 mask &= (1 << (spd - 1)) - 1;
2968 else
2969 mask &= 1;
2970
2971
2972 if (!mask)
2973 return -EINVAL;
2974
2975 if (spd_limit) {
2976 if (mask & ((1 << spd_limit) - 1))
2977 mask &= (1 << spd_limit) - 1;
2978 else {
2979 bit = ffs(mask) - 1;
2980 mask = 1 << bit;
2981 }
2982 }
2983
2984 link->sata_spd_limit = mask;
2985
2986 ata_link_warn(link, "limiting SATA link speed to %s\n",
2987 sata_spd_string(fls(mask)));
2988
2989 return 0;
2990}
2991
2992static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2993{
2994 struct ata_link *host_link = &link->ap->link;
2995 u32 limit, target, spd;
2996
2997 limit = link->sata_spd_limit;
2998
2999
3000
3001
3002
3003 if (!ata_is_host_link(link) && host_link->sata_spd)
3004 limit &= (1 << host_link->sata_spd) - 1;
3005
3006 if (limit == UINT_MAX)
3007 target = 0;
3008 else
3009 target = fls(limit);
3010
3011 spd = (*scontrol >> 4) & 0xf;
3012 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3013
3014 return spd != target;
3015}
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032static int sata_set_spd_needed(struct ata_link *link)
3033{
3034 u32 scontrol;
3035
3036 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3037 return 1;
3038
3039 return __sata_set_spd_needed(link, &scontrol);
3040}
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055int sata_set_spd(struct ata_link *link)
3056{
3057 u32 scontrol;
3058 int rc;
3059
3060 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3061 return rc;
3062
3063 if (!__sata_set_spd_needed(link, &scontrol))
3064 return 0;
3065
3066 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3067 return rc;
3068
3069 return 1;
3070}
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084static const struct ata_timing ata_timing[] = {
3085
3086 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
3087 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
3088 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
3089 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
3090 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
3091 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
3092 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
3093
3094 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
3095 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
3096 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
3097
3098 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
3099 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
3100 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
3101 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
3102 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
3103
3104
3105 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
3106 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
3107 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
3108 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
3109 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
3110 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
3111 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
3112
3113 { 0xFF }
3114};
3115
3116#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3117#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
3118
3119static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3120{
3121 q->setup = EZ(t->setup * 1000, T);
3122 q->act8b = EZ(t->act8b * 1000, T);
3123 q->rec8b = EZ(t->rec8b * 1000, T);
3124 q->cyc8b = EZ(t->cyc8b * 1000, T);
3125 q->active = EZ(t->active * 1000, T);
3126 q->recover = EZ(t->recover * 1000, T);
3127 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
3128 q->cycle = EZ(t->cycle * 1000, T);
3129 q->udma = EZ(t->udma * 1000, UT);
3130}
3131
3132void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3133 struct ata_timing *m, unsigned int what)
3134{
3135 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3136 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3137 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3138 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3139 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3140 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3141 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3142 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3143 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3144}
3145
3146const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3147{
3148 const struct ata_timing *t = ata_timing;
3149
3150 while (xfer_mode > t->mode)
3151 t++;
3152
3153 if (xfer_mode == t->mode)
3154 return t;
3155
3156 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3157 __func__, xfer_mode);
3158
3159 return NULL;
3160}
3161
3162int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3163 struct ata_timing *t, int T, int UT)
3164{
3165 const u16 *id = adev->id;
3166 const struct ata_timing *s;
3167 struct ata_timing p;
3168
3169
3170
3171
3172
3173 if (!(s = ata_timing_find_mode(speed)))
3174 return -EINVAL;
3175
3176 memcpy(t, s, sizeof(*s));
3177
3178
3179
3180
3181
3182
3183 if (id[ATA_ID_FIELD_VALID] & 2) {
3184 memset(&p, 0, sizeof(p));
3185
3186 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3187 if (speed <= XFER_PIO_2)
3188 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3189 else if ((speed <= XFER_PIO_4) ||
3190 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3191 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3192 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3193 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3194
3195 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3196 }
3197
3198
3199
3200
3201
3202 ata_timing_quantize(t, t, T, UT);
3203
3204
3205
3206
3207
3208
3209
3210 if (speed > XFER_PIO_6) {
3211 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3212 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3213 }
3214
3215
3216
3217
3218
3219 if (t->act8b + t->rec8b < t->cyc8b) {
3220 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3221 t->rec8b = t->cyc8b - t->act8b;
3222 }
3223
3224 if (t->active + t->recover < t->cycle) {
3225 t->active += (t->cycle - (t->active + t->recover)) / 2;
3226 t->recover = t->cycle - t->active;
3227 }
3228
3229
3230
3231
3232 if (t->active + t->recover > t->cycle)
3233 t->cycle = t->active + t->recover;
3234
3235 return 0;
3236}
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3255{
3256 u8 base_mode = 0xff, last_mode = 0xff;
3257 const struct ata_xfer_ent *ent;
3258 const struct ata_timing *t;
3259
3260 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3261 if (ent->shift == xfer_shift)
3262 base_mode = ent->base;
3263
3264 for (t = ata_timing_find_mode(base_mode);
3265 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3266 unsigned short this_cycle;
3267
3268 switch (xfer_shift) {
3269 case ATA_SHIFT_PIO:
3270 case ATA_SHIFT_MWDMA:
3271 this_cycle = t->cycle;
3272 break;
3273 case ATA_SHIFT_UDMA:
3274 this_cycle = t->udma;
3275 break;
3276 default:
3277 return 0xff;
3278 }
3279
3280 if (cycle > this_cycle)
3281 break;
3282
3283 last_mode = t->mode;
3284 }
3285
3286 return last_mode;
3287}
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3305{
3306 char buf[32];
3307 unsigned long orig_mask, xfer_mask;
3308 unsigned long pio_mask, mwdma_mask, udma_mask;
3309 int quiet, highbit;
3310
3311 quiet = !!(sel & ATA_DNXFER_QUIET);
3312 sel &= ~ATA_DNXFER_QUIET;
3313
3314 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3315 dev->mwdma_mask,
3316 dev->udma_mask);
3317 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3318
3319 switch (sel) {
3320 case ATA_DNXFER_PIO:
3321 highbit = fls(pio_mask) - 1;
3322 pio_mask &= ~(1 << highbit);
3323 break;
3324
3325 case ATA_DNXFER_DMA:
3326 if (udma_mask) {
3327 highbit = fls(udma_mask) - 1;
3328 udma_mask &= ~(1 << highbit);
3329 if (!udma_mask)
3330 return -ENOENT;
3331 } else if (mwdma_mask) {
3332 highbit = fls(mwdma_mask) - 1;
3333 mwdma_mask &= ~(1 << highbit);
3334 if (!mwdma_mask)
3335 return -ENOENT;
3336 }
3337 break;
3338
3339 case ATA_DNXFER_40C:
3340 udma_mask &= ATA_UDMA_MASK_40C;
3341 break;
3342
3343 case ATA_DNXFER_FORCE_PIO0:
3344 pio_mask &= 1;
3345 case ATA_DNXFER_FORCE_PIO:
3346 mwdma_mask = 0;
3347 udma_mask = 0;
3348 break;
3349
3350 default:
3351 BUG();
3352 }
3353
3354 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3355
3356 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3357 return -ENOENT;
3358
3359 if (!quiet) {
3360 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3361 snprintf(buf, sizeof(buf), "%s:%s",
3362 ata_mode_string(xfer_mask),
3363 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3364 else
3365 snprintf(buf, sizeof(buf), "%s",
3366 ata_mode_string(xfer_mask));
3367
3368 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3369 }
3370
3371 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3372 &dev->udma_mask);
3373
3374 return 0;
3375}
3376
3377static int ata_dev_set_mode(struct ata_device *dev)
3378{
3379 struct ata_port *ap = dev->link->ap;
3380 struct ata_eh_context *ehc = &dev->link->eh_context;
3381 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3382 const char *dev_err_whine = "";
3383 int ign_dev_err = 0;
3384 unsigned int err_mask = 0;
3385 int rc;
3386
3387 dev->flags &= ~ATA_DFLAG_PIO;
3388 if (dev->xfer_shift == ATA_SHIFT_PIO)
3389 dev->flags |= ATA_DFLAG_PIO;
3390
3391 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3392 dev_err_whine = " (SET_XFERMODE skipped)";
3393 else {
3394 if (nosetxfer)
3395 ata_dev_warn(dev,
3396 "NOSETXFER but PATA detected - can't "
3397 "skip SETXFER, might malfunction\n");
3398 err_mask = ata_dev_set_xfermode(dev);
3399 }
3400
3401 if (err_mask & ~AC_ERR_DEV)
3402 goto fail;
3403
3404
3405 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3406 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3407 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3408 if (rc)
3409 return rc;
3410
3411 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3412
3413 if (ata_id_is_cfa(dev->id))
3414 ign_dev_err = 1;
3415
3416
3417 if (ata_id_major_version(dev->id) == 0 &&
3418 dev->pio_mode <= XFER_PIO_2)
3419 ign_dev_err = 1;
3420
3421
3422
3423 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3424 ign_dev_err = 1;
3425 }
3426
3427
3428 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3429 dev->dma_mode == XFER_MW_DMA_0 &&
3430 (dev->id[63] >> 8) & 1)
3431 ign_dev_err = 1;
3432
3433
3434 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3435 ign_dev_err = 1;
3436
3437 if (err_mask & AC_ERR_DEV) {
3438 if (!ign_dev_err)
3439 goto fail;
3440 else
3441 dev_err_whine = " (device error ignored)";
3442 }
3443
3444 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3445 dev->xfer_shift, (int)dev->xfer_mode);
3446
3447 ata_dev_info(dev, "configured for %s%s\n",
3448 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3449 dev_err_whine);
3450
3451 return 0;
3452
3453 fail:
3454 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3455 return -EIO;
3456}
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3476{
3477 struct ata_port *ap = link->ap;
3478 struct ata_device *dev;
3479 int rc = 0, used_dma = 0, found = 0;
3480
3481
3482 ata_for_each_dev(dev, link, ENABLED) {
3483 unsigned long pio_mask, dma_mask;
3484 unsigned int mode_mask;
3485
3486 mode_mask = ATA_DMA_MASK_ATA;
3487 if (dev->class == ATA_DEV_ATAPI)
3488 mode_mask = ATA_DMA_MASK_ATAPI;
3489 else if (ata_id_is_cfa(dev->id))
3490 mode_mask = ATA_DMA_MASK_CFA;
3491
3492 ata_dev_xfermask(dev);
3493 ata_force_xfermask(dev);
3494
3495 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3496
3497 if (libata_dma_mask & mode_mask)
3498 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3499 dev->udma_mask);
3500 else
3501 dma_mask = 0;
3502
3503 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3504 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3505
3506 found = 1;
3507 if (ata_dma_enabled(dev))
3508 used_dma = 1;
3509 }
3510 if (!found)
3511 goto out;
3512
3513
3514 ata_for_each_dev(dev, link, ENABLED) {
3515 if (dev->pio_mode == 0xff) {
3516 ata_dev_warn(dev, "no PIO support\n");
3517 rc = -EINVAL;
3518 goto out;
3519 }
3520
3521 dev->xfer_mode = dev->pio_mode;
3522 dev->xfer_shift = ATA_SHIFT_PIO;
3523 if (ap->ops->set_piomode)
3524 ap->ops->set_piomode(ap, dev);
3525 }
3526
3527
3528 ata_for_each_dev(dev, link, ENABLED) {
3529 if (!ata_dma_enabled(dev))
3530 continue;
3531
3532 dev->xfer_mode = dev->dma_mode;
3533 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3534 if (ap->ops->set_dmamode)
3535 ap->ops->set_dmamode(ap, dev);
3536 }
3537
3538
3539 ata_for_each_dev(dev, link, ENABLED) {
3540 rc = ata_dev_set_mode(dev);
3541 if (rc)
3542 goto out;
3543 }
3544
3545
3546
3547
3548 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3549 ap->host->simplex_claimed = ap;
3550
3551 out:
3552 if (rc)
3553 *r_failed_dev = dev;
3554 return rc;
3555}
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3578 int (*check_ready)(struct ata_link *link))
3579{
3580 unsigned long start = jiffies;
3581 unsigned long nodev_deadline;
3582 int warned = 0;
3583
3584
3585 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3586 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3587 else
3588 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3589
3590
3591
3592
3593
3594 WARN_ON(link == link->ap->slave_link);
3595
3596 if (time_after(nodev_deadline, deadline))
3597 nodev_deadline = deadline;
3598
3599 while (1) {
3600 unsigned long now = jiffies;
3601 int ready, tmp;
3602
3603 ready = tmp = check_ready(link);
3604 if (ready > 0)
3605 return 0;
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618 if (ready == -ENODEV) {
3619 if (ata_link_online(link))
3620 ready = 0;
3621 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3622 !ata_link_offline(link) &&
3623 time_before(now, nodev_deadline))
3624 ready = 0;
3625 }
3626
3627 if (ready)
3628 return ready;
3629 if (time_after(now, deadline))
3630 return -EBUSY;
3631
3632 if (!warned && time_after(now, start + 5 * HZ) &&
3633 (deadline - now > 3 * HZ)) {
3634 ata_link_warn(link,
3635 "link is slow to respond, please be patient "
3636 "(ready=%d)\n", tmp);
3637 warned = 1;
3638 }
3639
3640 ata_msleep(link->ap, 50);
3641 }
3642}
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3659 int (*check_ready)(struct ata_link *link))
3660{
3661 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3662
3663 return ata_wait_ready(link, deadline, check_ready);
3664}
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3689 unsigned long deadline)
3690{
3691 unsigned long interval = params[0];
3692 unsigned long duration = params[1];
3693 unsigned long last_jiffies, t;
3694 u32 last, cur;
3695 int rc;
3696
3697 t = ata_deadline(jiffies, params[2]);
3698 if (time_before(t, deadline))
3699 deadline = t;
3700
3701 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3702 return rc;
3703 cur &= 0xf;
3704
3705 last = cur;
3706 last_jiffies = jiffies;
3707
3708 while (1) {
3709 ata_msleep(link->ap, interval);
3710 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3711 return rc;
3712 cur &= 0xf;
3713
3714
3715 if (cur == last) {
3716 if (cur == 1 && time_before(jiffies, deadline))
3717 continue;
3718 if (time_after(jiffies,
3719 ata_deadline(last_jiffies, duration)))
3720 return 0;
3721 continue;
3722 }
3723
3724
3725 last = cur;
3726 last_jiffies = jiffies;
3727
3728
3729
3730
3731 if (time_after(jiffies, deadline))
3732 return -EPIPE;
3733 }
3734}
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750int sata_link_resume(struct ata_link *link, const unsigned long *params,
3751 unsigned long deadline)
3752{
3753 int tries = ATA_LINK_RESUME_TRIES;
3754 u32 scontrol, serror;
3755 int rc;
3756
3757 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3758 return rc;
3759
3760
3761
3762
3763
3764
3765 do {
3766 scontrol = (scontrol & 0x0f0) | 0x300;
3767 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3768 return rc;
3769
3770
3771
3772
3773
3774 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3775 ata_msleep(link->ap, 200);
3776
3777
3778 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3779 return rc;
3780 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3781
3782 if ((scontrol & 0xf0f) != 0x300) {
3783 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3784 scontrol);
3785 return 0;
3786 }
3787
3788 if (tries < ATA_LINK_RESUME_TRIES)
3789 ata_link_warn(link, "link resume succeeded after %d retries\n",
3790 ATA_LINK_RESUME_TRIES - tries);
3791
3792 if ((rc = sata_link_debounce(link, params, deadline)))
3793 return rc;
3794
3795
3796 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3797 rc = sata_scr_write(link, SCR_ERROR, serror);
3798
3799 return rc != -EINVAL ? rc : 0;
3800}
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3821 bool spm_wakeup)
3822{
3823 struct ata_eh_context *ehc = &link->eh_context;
3824 bool woken_up = false;
3825 u32 scontrol;
3826 int rc;
3827
3828 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3829 if (rc)
3830 return rc;
3831
3832 switch (policy) {
3833 case ATA_LPM_MAX_POWER:
3834
3835 scontrol |= (0x7 << 8);
3836
3837 if (spm_wakeup) {
3838 scontrol |= (0x4 << 12);
3839 woken_up = true;
3840 }
3841 break;
3842 case ATA_LPM_MED_POWER:
3843
3844 scontrol &= ~(0x1 << 8);
3845 scontrol |= (0x6 << 8);
3846 break;
3847 case ATA_LPM_MIN_POWER:
3848 if (ata_link_nr_enabled(link) > 0)
3849
3850 scontrol &= ~(0x7 << 8);
3851 else {
3852
3853 scontrol &= ~0xf;
3854 scontrol |= (0x1 << 2);
3855 }
3856 break;
3857 default:
3858 WARN_ON(1);
3859 }
3860
3861 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3862 if (rc)
3863 return rc;
3864
3865
3866 if (woken_up)
3867 msleep(10);
3868
3869
3870 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3871 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3872}
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3892{
3893 struct ata_port *ap = link->ap;
3894 struct ata_eh_context *ehc = &link->eh_context;
3895 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3896 int rc;
3897
3898
3899 if (ehc->i.action & ATA_EH_HARDRESET)
3900 return 0;
3901
3902
3903 if (ap->flags & ATA_FLAG_SATA) {
3904 rc = sata_link_resume(link, timing, deadline);
3905
3906 if (rc && rc != -EOPNOTSUPP)
3907 ata_link_warn(link,
3908 "failed to resume link for reset (errno=%d)\n",
3909 rc);
3910 }
3911
3912
3913 if (ata_phys_link_offline(link))
3914 ehc->i.action &= ~ATA_EH_SOFTRESET;
3915
3916 return 0;
3917}
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3944 unsigned long deadline,
3945 bool *online, int (*check_ready)(struct ata_link *))
3946{
3947 u32 scontrol;
3948 int rc;
3949
3950 DPRINTK("ENTER\n");
3951
3952 if (online)
3953 *online = false;
3954
3955 if (sata_set_spd_needed(link)) {
3956
3957
3958
3959
3960
3961 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3962 goto out;
3963
3964 scontrol = (scontrol & 0x0f0) | 0x304;
3965
3966 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3967 goto out;
3968
3969 sata_set_spd(link);
3970 }
3971
3972
3973 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3974 goto out;
3975
3976 scontrol = (scontrol & 0x0f0) | 0x301;
3977
3978 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3979 goto out;
3980
3981
3982
3983
3984 ata_msleep(link->ap, 1);
3985
3986
3987 rc = sata_link_resume(link, timing, deadline);
3988 if (rc)
3989 goto out;
3990
3991 if (ata_phys_link_offline(link))
3992 goto out;
3993
3994
3995 if (online)
3996 *online = true;
3997
3998 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3999
4000
4001
4002
4003
4004 if (check_ready) {
4005 unsigned long pmp_deadline;
4006
4007 pmp_deadline = ata_deadline(jiffies,
4008 ATA_TMOUT_PMP_SRST_WAIT);
4009 if (time_after(pmp_deadline, deadline))
4010 pmp_deadline = deadline;
4011 ata_wait_ready(link, pmp_deadline, check_ready);
4012 }
4013 rc = -EAGAIN;
4014 goto out;
4015 }
4016
4017 rc = 0;
4018 if (check_ready)
4019 rc = ata_wait_ready(link, deadline, check_ready);
4020 out:
4021 if (rc && rc != -EAGAIN) {
4022
4023 if (online)
4024 *online = false;
4025 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
4026 }
4027 DPRINTK("EXIT, rc=%d\n", rc);
4028 return rc;
4029}
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4046 unsigned long deadline)
4047{
4048 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4049 bool online;
4050 int rc;
4051
4052
4053 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
4054 return online ? -EAGAIN : rc;
4055}
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4070{
4071 u32 serror;
4072
4073 DPRINTK("ENTER\n");
4074
4075
4076 if (!sata_scr_read(link, SCR_ERROR, &serror))
4077 sata_scr_write(link, SCR_ERROR, serror);
4078
4079
4080 sata_print_link_status(link);
4081
4082 DPRINTK("EXIT\n");
4083}
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4102 const u16 *new_id)
4103{
4104 const u16 *old_id = dev->id;
4105 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4106 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4107
4108 if (dev->class != new_class) {
4109 ata_dev_info(dev, "class mismatch %d != %d\n",
4110 dev->class, new_class);
4111 return 0;
4112 }
4113
4114 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4115 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4116 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4117 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4118
4119 if (strcmp(model[0], model[1])) {
4120 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
4121 model[0], model[1]);
4122 return 0;
4123 }
4124
4125 if (strcmp(serial[0], serial[1])) {
4126 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
4127 serial[0], serial[1]);
4128 return 0;
4129 }
4130
4131 return 1;
4132}
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4149{
4150 unsigned int class = dev->class;
4151 u16 *id = (void *)dev->link->ap->sector_buf;
4152 int rc;
4153
4154
4155 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4156 if (rc)
4157 return rc;
4158
4159
4160 if (!ata_dev_same_device(dev, class, id))
4161 return -ENODEV;
4162
4163 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4164 return 0;
4165}
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4183 unsigned int readid_flags)
4184{
4185 u64 n_sectors = dev->n_sectors;
4186 u64 n_native_sectors = dev->n_native_sectors;
4187 int rc;
4188
4189 if (!ata_dev_enabled(dev))
4190 return -ENODEV;
4191
4192
4193 if (ata_class_enabled(new_class) &&
4194 new_class != ATA_DEV_ATA &&
4195 new_class != ATA_DEV_ATAPI &&
4196 new_class != ATA_DEV_ZAC &&
4197 new_class != ATA_DEV_SEMB) {
4198 ata_dev_info(dev, "class mismatch %u != %u\n",
4199 dev->class, new_class);
4200 rc = -ENODEV;
4201 goto fail;
4202 }
4203
4204
4205 rc = ata_dev_reread_id(dev, readid_flags);
4206 if (rc)
4207 goto fail;
4208
4209
4210 rc = ata_dev_configure(dev);
4211 if (rc)
4212 goto fail;
4213
4214
4215 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4216 dev->n_sectors == n_sectors)
4217 return 0;
4218
4219
4220 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4221 (unsigned long long)n_sectors,
4222 (unsigned long long)dev->n_sectors);
4223
4224
4225
4226
4227
4228
4229 if (dev->n_native_sectors == n_native_sectors &&
4230 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4231 ata_dev_warn(dev,
4232 "new n_sectors matches native, probably "
4233 "late HPA unlock, n_sectors updated\n");
4234
4235 return 0;
4236 }
4237
4238
4239
4240
4241
4242
4243
4244 if (dev->n_native_sectors == n_native_sectors &&
4245 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4246 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4247 ata_dev_warn(dev,
4248 "old n_sectors matches native, probably "
4249 "late HPA lock, will try to unlock HPA\n");
4250
4251 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4252 rc = -EIO;
4253 } else
4254 rc = -ENODEV;
4255
4256
4257 dev->n_native_sectors = n_native_sectors;
4258 dev->n_sectors = n_sectors;
4259 fail:
4260 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4261 return rc;
4262}
4263
4264struct ata_blacklist_entry {
4265 const char *model_num;
4266 const char *model_rev;
4267 unsigned long horkage;
4268};
4269
4270static const struct ata_blacklist_entry ata_device_blacklist [] = {
4271
4272 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4273 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4274 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4275 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4276 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4277 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4278 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4279 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4280 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4281 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4282 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4283 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4284 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4285 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4286 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4287 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4288 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4289 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4290 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4291 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4292 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4293 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4294 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4295 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4296 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4297 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4298 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4299 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4300 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4301 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
4302
4303 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4304
4305
4306 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4307 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4308 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4309 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4310
4311
4312
4313
4314
4315 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4316
4317
4318
4319
4320
4321 { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4322
4323
4324
4325
4326
4327 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4328 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4329
4330 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4331
4332 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4333 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4334 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4335 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4336 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4337
4338
4339 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4340 ATA_HORKAGE_FIRMWARE_WARN },
4341
4342 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4343 ATA_HORKAGE_FIRMWARE_WARN },
4344
4345 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4346 ATA_HORKAGE_FIRMWARE_WARN },
4347
4348 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4349 ATA_HORKAGE_FIRMWARE_WARN },
4350
4351
4352 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4353 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4354 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
4355
4356
4357
4358 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4359 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4360 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4361
4362
4363 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4364
4365
4366 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4367 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4368 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4369 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4370
4371
4372 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4373
4374
4375 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4376 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4377 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4378
4379
4380 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4381
4382 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4383
4384
4385 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4386 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4387
4388
4389 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4390 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4391
4392
4393
4394
4395
4396 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4397 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4398 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4399 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4400 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4401
4402
4403 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4404 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4405 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4406 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4407 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4408 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4409 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4410 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4411 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4412 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4413 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4414 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4415 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4416 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4417
4418
4419 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437 { "INTEL*SSDSC2MH*", NULL, 0, },
4438
4439 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4440 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4441 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4442 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4443 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4444 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4445 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4457 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4458 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4459 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4460 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4461 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4462 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4463
4464
4465 { }
4466};
4467
4468static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4469{
4470 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4471 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4472 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4473
4474 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4475 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4476
4477 while (ad->model_num) {
4478 if (glob_match(ad->model_num, model_num)) {
4479 if (ad->model_rev == NULL)
4480 return ad->horkage;
4481 if (glob_match(ad->model_rev, model_rev))
4482 return ad->horkage;
4483 }
4484 ad++;
4485 }
4486 return 0;
4487}
4488
4489static int ata_dma_blacklisted(const struct ata_device *dev)
4490{
4491
4492
4493
4494
4495 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4496 (dev->flags & ATA_DFLAG_CDB_INTR))
4497 return 1;
4498 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4499}
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509static int ata_is_40wire(struct ata_device *dev)
4510{
4511 if (dev->horkage & ATA_HORKAGE_IVB)
4512 return ata_drive_40wire_relaxed(dev->id);
4513 return ata_drive_40wire(dev->id);
4514}
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529static int cable_is_40wire(struct ata_port *ap)
4530{
4531 struct ata_link *link;
4532 struct ata_device *dev;
4533
4534
4535 if (ap->cbl == ATA_CBL_PATA40)
4536 return 1;
4537
4538
4539 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4540 return 0;
4541
4542
4543
4544
4545
4546 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4547 return 0;
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558 ata_for_each_link(link, ap, EDGE) {
4559 ata_for_each_dev(dev, link, ENABLED) {
4560 if (!ata_is_40wire(dev))
4561 return 0;
4562 }
4563 }
4564 return 1;
4565}
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579static void ata_dev_xfermask(struct ata_device *dev)
4580{
4581 struct ata_link *link = dev->link;
4582 struct ata_port *ap = link->ap;
4583 struct ata_host *host = ap->host;
4584 unsigned long xfer_mask;
4585
4586
4587 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4588 ap->mwdma_mask, ap->udma_mask);
4589
4590
4591 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4592 dev->mwdma_mask, dev->udma_mask);
4593 xfer_mask &= ata_id_xfermask(dev->id);
4594
4595
4596
4597
4598
4599 if (ata_dev_pair(dev)) {
4600
4601 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4602
4603 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4604 }
4605
4606 if (ata_dma_blacklisted(dev)) {
4607 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4608 ata_dev_warn(dev,
4609 "device is on DMA blacklist, disabling DMA\n");
4610 }
4611
4612 if ((host->flags & ATA_HOST_SIMPLEX) &&
4613 host->simplex_claimed && host->simplex_claimed != ap) {
4614 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4615 ata_dev_warn(dev,
4616 "simplex DMA is claimed by other device, disabling DMA\n");
4617 }
4618
4619 if (ap->flags & ATA_FLAG_NO_IORDY)
4620 xfer_mask &= ata_pio_mask_no_iordy(dev);
4621
4622 if (ap->ops->mode_filter)
4623 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4634
4635 if (cable_is_40wire(ap)) {
4636 ata_dev_warn(dev,
4637 "limited to UDMA/33 due to 40-wire cable\n");
4638 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4639 }
4640
4641 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4642 &dev->mwdma_mask, &dev->udma_mask);
4643}
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4660{
4661 struct ata_taskfile tf;
4662 unsigned int err_mask;
4663
4664
4665 DPRINTK("set features - xfer mode\n");
4666
4667
4668
4669
4670 ata_tf_init(dev, &tf);
4671 tf.command = ATA_CMD_SET_FEATURES;
4672 tf.feature = SETFEATURES_XFER;
4673 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4674 tf.protocol = ATA_PROT_NODATA;
4675
4676 if (ata_pio_need_iordy(dev))
4677 tf.nsect = dev->xfer_mode;
4678
4679 else if (ata_id_has_iordy(dev->id))
4680 tf.nsect = 0x01;
4681 else
4682 return 0;
4683
4684
4685 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4686
4687 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4688 return err_mask;
4689}
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4707{
4708 struct ata_taskfile tf;
4709 unsigned int err_mask;
4710 unsigned long timeout = 0;
4711
4712
4713 DPRINTK("set features - SATA features\n");
4714
4715 ata_tf_init(dev, &tf);
4716 tf.command = ATA_CMD_SET_FEATURES;
4717 tf.feature = enable;
4718 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4719 tf.protocol = ATA_PROT_NODATA;
4720 tf.nsect = feature;
4721
4722 if (enable == SETFEATURES_SPINUP)
4723 timeout = ata_probe_timeout ?
4724 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4725 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4726
4727 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4728 return err_mask;
4729}
4730EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744static unsigned int ata_dev_init_params(struct ata_device *dev,
4745 u16 heads, u16 sectors)
4746{
4747 struct ata_taskfile tf;
4748 unsigned int err_mask;
4749
4750
4751 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4752 return AC_ERR_INVALID;
4753
4754
4755 DPRINTK("init dev params \n");
4756
4757 ata_tf_init(dev, &tf);
4758 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4759 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4760 tf.protocol = ATA_PROT_NODATA;
4761 tf.nsect = sectors;
4762 tf.device |= (heads - 1) & 0x0f;
4763
4764 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4765
4766
4767
4768 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4769 err_mask = 0;
4770
4771 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4772 return err_mask;
4773}
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784void ata_sg_clean(struct ata_queued_cmd *qc)
4785{
4786 struct ata_port *ap = qc->ap;
4787 struct scatterlist *sg = qc->sg;
4788 int dir = qc->dma_dir;
4789
4790 WARN_ON_ONCE(sg == NULL);
4791
4792 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4793
4794 if (qc->n_elem)
4795 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4796
4797 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4798 qc->sg = NULL;
4799}
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815int atapi_check_dma(struct ata_queued_cmd *qc)
4816{
4817 struct ata_port *ap = qc->ap;
4818
4819
4820
4821
4822 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4823 unlikely(qc->nbytes & 15))
4824 return 1;
4825
4826 if (ap->ops->check_atapi_dma)
4827 return ap->ops->check_atapi_dma(qc);
4828
4829 return 0;
4830}
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847int ata_std_qc_defer(struct ata_queued_cmd *qc)
4848{
4849 struct ata_link *link = qc->dev->link;
4850
4851 if (qc->tf.protocol == ATA_PROT_NCQ) {
4852 if (!ata_tag_valid(link->active_tag))
4853 return 0;
4854 } else {
4855 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4856 return 0;
4857 }
4858
4859 return ATA_DEFER_LINK;
4860}
4861
4862void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4878 unsigned int n_elem)
4879{
4880 qc->sg = sg;
4881 qc->n_elem = n_elem;
4882 qc->cursg = qc->sg;
4883}
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898static int ata_sg_setup(struct ata_queued_cmd *qc)
4899{
4900 struct ata_port *ap = qc->ap;
4901 unsigned int n_elem;
4902
4903 VPRINTK("ENTER, ata%u\n", ap->print_id);
4904
4905 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4906 if (n_elem < 1)
4907 return -1;
4908
4909 DPRINTK("%d sg elements mapped\n", n_elem);
4910 qc->orig_n_elem = qc->n_elem;
4911 qc->n_elem = n_elem;
4912 qc->flags |= ATA_QCFLAG_DMAMAP;
4913
4914 return 0;
4915}
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929void swap_buf_le16(u16 *buf, unsigned int buf_words)
4930{
4931#ifdef __BIG_ENDIAN
4932 unsigned int i;
4933
4934 for (i = 0; i < buf_words; i++)
4935 buf[i] = le16_to_cpu(buf[i]);
4936#endif
4937}
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4949{
4950 struct ata_port *ap = dev->link->ap;
4951 struct ata_queued_cmd *qc;
4952
4953
4954 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4955 return NULL;
4956
4957
4958 if (ap->flags & ATA_FLAG_SAS_HOST) {
4959 tag = ata_sas_allocate_tag(ap);
4960 if (tag < 0)
4961 return NULL;
4962 }
4963
4964 qc = __ata_qc_from_tag(ap, tag);
4965 qc->tag = tag;
4966 qc->scsicmd = NULL;
4967 qc->ap = ap;
4968 qc->dev = dev;
4969
4970 ata_qc_reinit(qc);
4971
4972 return qc;
4973}
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985void ata_qc_free(struct ata_queued_cmd *qc)
4986{
4987 struct ata_port *ap;
4988 unsigned int tag;
4989
4990 WARN_ON_ONCE(qc == NULL);
4991 ap = qc->ap;
4992
4993 qc->flags = 0;
4994 tag = qc->tag;
4995 if (likely(ata_tag_valid(tag))) {
4996 qc->tag = ATA_TAG_POISON;
4997 if (ap->flags & ATA_FLAG_SAS_HOST)
4998 ata_sas_free_tag(tag, ap);
4999 }
5000}
5001
5002void __ata_qc_complete(struct ata_queued_cmd *qc)
5003{
5004 struct ata_port *ap;
5005 struct ata_link *link;
5006
5007 WARN_ON_ONCE(qc == NULL);
5008 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
5009 ap = qc->ap;
5010 link = qc->dev->link;
5011
5012 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5013 ata_sg_clean(qc);
5014
5015
5016 if (qc->tf.protocol == ATA_PROT_NCQ) {
5017 link->sactive &= ~(1 << qc->tag);
5018 if (!link->sactive)
5019 ap->nr_active_links--;
5020 } else {
5021 link->active_tag = ATA_TAG_POISON;
5022 ap->nr_active_links--;
5023 }
5024
5025
5026 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5027 ap->excl_link == link))
5028 ap->excl_link = NULL;
5029
5030
5031
5032
5033
5034 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5035 ap->qc_active &= ~(1 << qc->tag);
5036
5037
5038 qc->complete_fn(qc);
5039}
5040
5041static void fill_result_tf(struct ata_queued_cmd *qc)
5042{
5043 struct ata_port *ap = qc->ap;
5044
5045 qc->result_tf.flags = qc->tf.flags;
5046 ap->ops->qc_fill_rtf(qc);
5047}
5048
5049static void ata_verify_xfer(struct ata_queued_cmd *qc)
5050{
5051 struct ata_device *dev = qc->dev;
5052
5053 if (ata_is_nodata(qc->tf.protocol))
5054 return;
5055
5056 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5057 return;
5058
5059 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5060}
5061
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077void ata_qc_complete(struct ata_queued_cmd *qc)
5078{
5079 struct ata_port *ap = qc->ap;
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094 if (ap->ops->error_handler) {
5095 struct ata_device *dev = qc->dev;
5096 struct ata_eh_info *ehi = &dev->link->eh_info;
5097
5098 if (unlikely(qc->err_mask))
5099 qc->flags |= ATA_QCFLAG_FAILED;
5100
5101
5102
5103
5104
5105 if (unlikely(ata_tag_internal(qc->tag))) {
5106 fill_result_tf(qc);
5107 trace_ata_qc_complete_internal(qc);
5108 __ata_qc_complete(qc);
5109 return;
5110 }
5111
5112
5113
5114
5115
5116 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5117 fill_result_tf(qc);
5118 trace_ata_qc_complete_failed(qc);
5119 ata_qc_schedule_eh(qc);
5120 return;
5121 }
5122
5123 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5124
5125
5126 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5127 fill_result_tf(qc);
5128
5129 trace_ata_qc_complete_done(qc);
5130
5131
5132
5133 switch (qc->tf.command) {
5134 case ATA_CMD_SET_FEATURES:
5135 if (qc->tf.feature != SETFEATURES_WC_ON &&
5136 qc->tf.feature != SETFEATURES_WC_OFF)
5137 break;
5138
5139 case ATA_CMD_INIT_DEV_PARAMS:
5140 case ATA_CMD_SET_MULTI:
5141
5142 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5143 ata_port_schedule_eh(ap);
5144 break;
5145
5146 case ATA_CMD_SLEEP:
5147 dev->flags |= ATA_DFLAG_SLEEPING;
5148 break;
5149 }
5150
5151 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5152 ata_verify_xfer(qc);
5153
5154 __ata_qc_complete(qc);
5155 } else {
5156 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5157 return;
5158
5159
5160 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5161 fill_result_tf(qc);
5162
5163 __ata_qc_complete(qc);
5164 }
5165}
5166
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5188{
5189 int nr_done = 0;
5190 u32 done_mask;
5191
5192 done_mask = ap->qc_active ^ qc_active;
5193
5194 if (unlikely(done_mask & qc_active)) {
5195 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5196 ap->qc_active, qc_active);
5197 return -EINVAL;
5198 }
5199
5200 while (done_mask) {
5201 struct ata_queued_cmd *qc;
5202 unsigned int tag = __ffs(done_mask);
5203
5204 qc = ata_qc_from_tag(ap, tag);
5205 if (qc) {
5206 ata_qc_complete(qc);
5207 nr_done++;
5208 }
5209 done_mask &= ~(1 << tag);
5210 }
5211
5212 return nr_done;
5213}
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227void ata_qc_issue(struct ata_queued_cmd *qc)
5228{
5229 struct ata_port *ap = qc->ap;
5230 struct ata_link *link = qc->dev->link;
5231 u8 prot = qc->tf.protocol;
5232
5233
5234
5235
5236
5237 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5238
5239 if (ata_is_ncq(prot)) {
5240 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5241
5242 if (!link->sactive)
5243 ap->nr_active_links++;
5244 link->sactive |= 1 << qc->tag;
5245 } else {
5246 WARN_ON_ONCE(link->sactive);
5247
5248 ap->nr_active_links++;
5249 link->active_tag = qc->tag;
5250 }
5251
5252 qc->flags |= ATA_QCFLAG_ACTIVE;
5253 ap->qc_active |= 1 << qc->tag;
5254
5255
5256
5257
5258
5259 if (WARN_ON_ONCE(ata_is_data(prot) &&
5260 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5261 goto sys_err;
5262
5263 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5264 (ap->flags & ATA_FLAG_PIO_DMA)))
5265 if (ata_sg_setup(qc))
5266 goto sys_err;
5267
5268
5269 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5270 link->eh_info.action |= ATA_EH_RESET;
5271 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5272 ata_link_abort(link);
5273 return;
5274 }
5275
5276 ap->ops->qc_prep(qc);
5277 trace_ata_qc_issue(qc);
5278 qc->err_mask |= ap->ops->qc_issue(qc);
5279 if (unlikely(qc->err_mask))
5280 goto err;
5281 return;
5282
5283sys_err:
5284 qc->err_mask |= AC_ERR_SYSTEM;
5285err:
5286 ata_qc_complete(qc);
5287}
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301int sata_scr_valid(struct ata_link *link)
5302{
5303 struct ata_port *ap = link->ap;
5304
5305 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5306}
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5325{
5326 if (ata_is_host_link(link)) {
5327 if (sata_scr_valid(link))
5328 return link->ap->ops->scr_read(link, reg, val);
5329 return -EOPNOTSUPP;
5330 }
5331
5332 return sata_pmp_scr_read(link, reg, val);
5333}
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351int sata_scr_write(struct ata_link *link, int reg, u32 val)
5352{
5353 if (ata_is_host_link(link)) {
5354 if (sata_scr_valid(link))
5355 return link->ap->ops->scr_write(link, reg, val);
5356 return -EOPNOTSUPP;
5357 }
5358
5359 return sata_pmp_scr_write(link, reg, val);
5360}
5361
5362
5363
5364
5365
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5378{
5379 if (ata_is_host_link(link)) {
5380 int rc;
5381
5382 if (sata_scr_valid(link)) {
5383 rc = link->ap->ops->scr_write(link, reg, val);
5384 if (rc == 0)
5385 rc = link->ap->ops->scr_read(link, reg, &val);
5386 return rc;
5387 }
5388 return -EOPNOTSUPP;
5389 }
5390
5391 return sata_pmp_scr_write(link, reg, val);
5392}
5393
5394
5395
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408bool ata_phys_link_online(struct ata_link *link)
5409{
5410 u32 sstatus;
5411
5412 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5413 ata_sstatus_online(sstatus))
5414 return true;
5415 return false;
5416}
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432bool ata_phys_link_offline(struct ata_link *link)
5433{
5434 u32 sstatus;
5435
5436 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5437 !ata_sstatus_online(sstatus))
5438 return true;
5439 return false;
5440}
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458bool ata_link_online(struct ata_link *link)
5459{
5460 struct ata_link *slave = link->ap->slave_link;
5461
5462 WARN_ON(link == slave);
5463
5464 return ata_phys_link_online(link) ||
5465 (slave && ata_phys_link_online(slave));
5466}
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484bool ata_link_offline(struct ata_link *link)
5485{
5486 struct ata_link *slave = link->ap->slave_link;
5487
5488 WARN_ON(link == slave);
5489
5490 return ata_phys_link_offline(link) &&
5491 (!slave || ata_phys_link_offline(slave));
5492}
5493
5494#ifdef CONFIG_PM
5495static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5496 unsigned int action, unsigned int ehi_flags,
5497 bool async)
5498{
5499 struct ata_link *link;
5500 unsigned long flags;
5501
5502
5503
5504
5505 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5506 ata_port_wait_eh(ap);
5507 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5508 }
5509
5510
5511 spin_lock_irqsave(ap->lock, flags);
5512
5513 ap->pm_mesg = mesg;
5514 ap->pflags |= ATA_PFLAG_PM_PENDING;
5515 ata_for_each_link(link, ap, HOST_FIRST) {
5516 link->eh_info.action |= action;
5517 link->eh_info.flags |= ehi_flags;
5518 }
5519
5520 ata_port_schedule_eh(ap);
5521
5522 spin_unlock_irqrestore(ap->lock, flags);
5523
5524 if (!async) {
5525 ata_port_wait_eh(ap);
5526 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5527 }
5528}
5529
5530
5531
5532
5533
5534
5535
5536
5537static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5538 | ATA_EHI_NO_AUTOPSY
5539 | ATA_EHI_NO_RECOVERY;
5540
5541static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5542{
5543 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5544}
5545
5546static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5547{
5548 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5549}
5550
5551static int ata_port_pm_suspend(struct device *dev)
5552{
5553 struct ata_port *ap = to_ata_port(dev);
5554
5555 if (pm_runtime_suspended(dev))
5556 return 0;
5557
5558 ata_port_suspend(ap, PMSG_SUSPEND);
5559 return 0;
5560}
5561
5562static int ata_port_pm_freeze(struct device *dev)
5563{
5564 struct ata_port *ap = to_ata_port(dev);
5565
5566 if (pm_runtime_suspended(dev))
5567 return 0;
5568
5569 ata_port_suspend(ap, PMSG_FREEZE);
5570 return 0;
5571}
5572
5573static int ata_port_pm_poweroff(struct device *dev)
5574{
5575 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5576 return 0;
5577}
5578
5579static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5580 | ATA_EHI_QUIET;
5581
5582static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5583{
5584 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5585}
5586
5587static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5588{
5589 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5590}
5591
5592static int ata_port_pm_resume(struct device *dev)
5593{
5594 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5595 pm_runtime_disable(dev);
5596 pm_runtime_set_active(dev);
5597 pm_runtime_enable(dev);
5598 return 0;
5599}
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609static int ata_port_runtime_idle(struct device *dev)
5610{
5611 struct ata_port *ap = to_ata_port(dev);
5612 struct ata_link *link;
5613 struct ata_device *adev;
5614
5615 ata_for_each_link(link, ap, HOST_FIRST) {
5616 ata_for_each_dev(adev, link, ENABLED)
5617 if (adev->class == ATA_DEV_ATAPI &&
5618 !zpodd_dev_enabled(adev))
5619 return -EBUSY;
5620 }
5621
5622 return 0;
5623}
5624
5625static int ata_port_runtime_suspend(struct device *dev)
5626{
5627 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5628 return 0;
5629}
5630
5631static int ata_port_runtime_resume(struct device *dev)
5632{
5633 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5634 return 0;
5635}
5636
5637static const struct dev_pm_ops ata_port_pm_ops = {
5638 .suspend = ata_port_pm_suspend,
5639 .resume = ata_port_pm_resume,
5640 .freeze = ata_port_pm_freeze,
5641 .thaw = ata_port_pm_resume,
5642 .poweroff = ata_port_pm_poweroff,
5643 .restore = ata_port_pm_resume,
5644
5645 .runtime_suspend = ata_port_runtime_suspend,
5646 .runtime_resume = ata_port_runtime_resume,
5647 .runtime_idle = ata_port_runtime_idle,
5648};
5649
5650
5651
5652
5653
5654
5655void ata_sas_port_suspend(struct ata_port *ap)
5656{
5657 ata_port_suspend_async(ap, PMSG_SUSPEND);
5658}
5659EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5660
5661void ata_sas_port_resume(struct ata_port *ap)
5662{
5663 ata_port_resume_async(ap, PMSG_RESUME);
5664}
5665EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5666
5667
5668
5669
5670
5671
5672
5673
5674int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5675{
5676 host->dev->power.power_state = mesg;
5677 return 0;
5678}
5679
5680
5681
5682
5683
5684
5685
5686void ata_host_resume(struct ata_host *host)
5687{
5688 host->dev->power.power_state = PMSG_ON;
5689}
5690#endif
5691
5692struct device_type ata_port_type = {
5693 .name = "ata_port",
5694#ifdef CONFIG_PM
5695 .pm = &ata_port_pm_ops,
5696#endif
5697};
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707
5708void ata_dev_init(struct ata_device *dev)
5709{
5710 struct ata_link *link = ata_dev_phys_link(dev);
5711 struct ata_port *ap = link->ap;
5712 unsigned long flags;
5713
5714
5715 link->sata_spd_limit = link->hw_sata_spd_limit;
5716 link->sata_spd = 0;
5717
5718
5719
5720
5721
5722 spin_lock_irqsave(ap->lock, flags);
5723 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5724 dev->horkage = 0;
5725 spin_unlock_irqrestore(ap->lock, flags);
5726
5727 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5728 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5729 dev->pio_mask = UINT_MAX;
5730 dev->mwdma_mask = UINT_MAX;
5731 dev->udma_mask = UINT_MAX;
5732}
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5746{
5747 int i;
5748
5749
5750 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5751 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5752
5753 link->ap = ap;
5754 link->pmp = pmp;
5755 link->active_tag = ATA_TAG_POISON;
5756 link->hw_sata_spd_limit = UINT_MAX;
5757
5758
5759 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5760 struct ata_device *dev = &link->device[i];
5761
5762 dev->link = link;
5763 dev->devno = dev - link->device;
5764#ifdef CONFIG_ATA_ACPI
5765 dev->gtf_filter = ata_acpi_gtf_filter;
5766#endif
5767 ata_dev_init(dev);
5768 }
5769}
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782
5783
5784int sata_link_init_spd(struct ata_link *link)
5785{
5786 u8 spd;
5787 int rc;
5788
5789 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5790 if (rc)
5791 return rc;
5792
5793 spd = (link->saved_scontrol >> 4) & 0xf;
5794 if (spd)
5795 link->hw_sata_spd_limit &= (1 << spd) - 1;
5796
5797 ata_force_link_limits(link);
5798
5799 link->sata_spd_limit = link->hw_sata_spd_limit;
5800
5801 return 0;
5802}
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812
5813
5814
5815
5816struct ata_port *ata_port_alloc(struct ata_host *host)
5817{
5818 struct ata_port *ap;
5819
5820 DPRINTK("ENTER\n");
5821
5822 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5823 if (!ap)
5824 return NULL;
5825
5826 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5827 ap->lock = &host->lock;
5828 ap->print_id = -1;
5829 ap->local_port_no = -1;
5830 ap->host = host;
5831 ap->dev = host->dev;
5832
5833#if defined(ATA_VERBOSE_DEBUG)
5834
5835 ap->msg_enable = 0x00FF;
5836#elif defined(ATA_DEBUG)
5837 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5838#else
5839 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5840#endif
5841
5842 mutex_init(&ap->scsi_scan_mutex);
5843 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5844 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5845 INIT_LIST_HEAD(&ap->eh_done_q);
5846 init_waitqueue_head(&ap->eh_wait_q);
5847 init_completion(&ap->park_req_pending);
5848 init_timer_deferrable(&ap->fastdrain_timer);
5849 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5850 ap->fastdrain_timer.data = (unsigned long)ap;
5851
5852 ap->cbl = ATA_CBL_NONE;
5853
5854 ata_link_init(ap, &ap->link, 0);
5855
5856#ifdef ATA_IRQ_TRAP
5857 ap->stats.unhandled_irq = 1;
5858 ap->stats.idle_irq = 1;
5859#endif
5860 ata_sff_port_init(ap);
5861
5862 return ap;
5863}
5864
5865static void ata_host_release(struct device *gendev, void *res)
5866{
5867 struct ata_host *host = dev_get_drvdata(gendev);
5868 int i;
5869
5870 for (i = 0; i < host->n_ports; i++) {
5871 struct ata_port *ap = host->ports[i];
5872
5873 if (!ap)
5874 continue;
5875
5876 if (ap->scsi_host)
5877 scsi_host_put(ap->scsi_host);
5878
5879 kfree(ap->pmp_link);
5880 kfree(ap->slave_link);
5881 kfree(ap);
5882 host->ports[i] = NULL;
5883 }
5884
5885 dev_set_drvdata(gendev, NULL);
5886}
5887
5888
5889
5890
5891
5892
5893
5894
5895
5896
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906
5907
5908struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5909{
5910 struct ata_host *host;
5911 size_t sz;
5912 int i;
5913
5914 DPRINTK("ENTER\n");
5915
5916 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5917 return NULL;
5918
5919
5920 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5921
5922 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5923 if (!host)
5924 goto err_out;
5925
5926 devres_add(dev, host);
5927 dev_set_drvdata(dev, host);
5928
5929 spin_lock_init(&host->lock);
5930 mutex_init(&host->eh_mutex);
5931 host->dev = dev;
5932 host->n_ports = max_ports;
5933
5934
5935 for (i = 0; i < max_ports; i++) {
5936 struct ata_port *ap;
5937
5938 ap = ata_port_alloc(host);
5939 if (!ap)
5940 goto err_out;
5941
5942 ap->port_no = i;
5943 host->ports[i] = ap;
5944 }
5945
5946 devres_remove_group(dev, NULL);
5947 return host;
5948
5949 err_out:
5950 devres_release_group(dev, NULL);
5951 return NULL;
5952}
5953
5954
5955
5956
5957
5958
5959
5960
5961
5962
5963
5964
5965
5966
5967
5968
5969
5970struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5971 const struct ata_port_info * const * ppi,
5972 int n_ports)
5973{
5974 const struct ata_port_info *pi;
5975 struct ata_host *host;
5976 int i, j;
5977
5978 host = ata_host_alloc(dev, n_ports);
5979 if (!host)
5980 return NULL;
5981
5982 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5983 struct ata_port *ap = host->ports[i];
5984
5985 if (ppi[j])
5986 pi = ppi[j++];
5987
5988 ap->pio_mask = pi->pio_mask;
5989 ap->mwdma_mask = pi->mwdma_mask;
5990 ap->udma_mask = pi->udma_mask;
5991 ap->flags |= pi->flags;
5992 ap->link.flags |= pi->link_flags;
5993 ap->ops = pi->port_ops;
5994
5995 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5996 host->ops = pi->port_ops;
5997 }
5998
5999 return host;
6000}
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013
6014
6015
6016
6017
6018
6019
6020
6021
6022
6023
6024
6025
6026
6027
6028
6029
6030
6031
6032
6033
6034
6035
6036
6037
6038
6039
6040
6041
6042
6043
6044
6045
6046
6047
6048int ata_slave_link_init(struct ata_port *ap)
6049{
6050 struct ata_link *link;
6051
6052 WARN_ON(ap->slave_link);
6053 WARN_ON(ap->flags & ATA_FLAG_PMP);
6054
6055 link = kzalloc(sizeof(*link), GFP_KERNEL);
6056 if (!link)
6057 return -ENOMEM;
6058
6059 ata_link_init(ap, link, 1);
6060 ap->slave_link = link;
6061 return 0;
6062}
6063
6064static void ata_host_stop(struct device *gendev, void *res)
6065{
6066 struct ata_host *host = dev_get_drvdata(gendev);
6067 int i;
6068
6069 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6070
6071 for (i = 0; i < host->n_ports; i++) {
6072 struct ata_port *ap = host->ports[i];
6073
6074 if (ap->ops->port_stop)
6075 ap->ops->port_stop(ap);
6076 }
6077
6078 if (host->ops->host_stop)
6079 host->ops->host_stop(host);
6080}
6081
6082
6083
6084
6085
6086
6087
6088
6089
6090
6091
6092
6093
6094
6095
6096
6097
6098
6099
6100
6101
6102static void ata_finalize_port_ops(struct ata_port_operations *ops)
6103{
6104 static DEFINE_SPINLOCK(lock);
6105 const struct ata_port_operations *cur;
6106 void **begin = (void **)ops;
6107 void **end = (void **)&ops->inherits;
6108 void **pp;
6109
6110 if (!ops || !ops->inherits)
6111 return;
6112
6113 spin_lock(&lock);
6114
6115 for (cur = ops->inherits; cur; cur = cur->inherits) {
6116 void **inherit = (void **)cur;
6117
6118 for (pp = begin; pp < end; pp++, inherit++)
6119 if (!*pp)
6120 *pp = *inherit;
6121 }
6122
6123 for (pp = begin; pp < end; pp++)
6124 if (IS_ERR(*pp))
6125 *pp = NULL;
6126
6127 ops->inherits = NULL;
6128
6129 spin_unlock(&lock);
6130}
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148int ata_host_start(struct ata_host *host)
6149{
6150 int have_stop = 0;
6151 void *start_dr = NULL;
6152 int i, rc;
6153
6154 if (host->flags & ATA_HOST_STARTED)
6155 return 0;
6156
6157 ata_finalize_port_ops(host->ops);
6158
6159 for (i = 0; i < host->n_ports; i++) {
6160 struct ata_port *ap = host->ports[i];
6161
6162 ata_finalize_port_ops(ap->ops);
6163
6164 if (!host->ops && !ata_port_is_dummy(ap))
6165 host->ops = ap->ops;
6166
6167 if (ap->ops->port_stop)
6168 have_stop = 1;
6169 }
6170
6171 if (host->ops->host_stop)
6172 have_stop = 1;
6173
6174 if (have_stop) {
6175 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6176 if (!start_dr)
6177 return -ENOMEM;
6178 }
6179
6180 for (i = 0; i < host->n_ports; i++) {
6181 struct ata_port *ap = host->ports[i];
6182
6183 if (ap->ops->port_start) {
6184 rc = ap->ops->port_start(ap);
6185 if (rc) {
6186 if (rc != -ENODEV)
6187 dev_err(host->dev,
6188 "failed to start port %d (errno=%d)\n",
6189 i, rc);
6190 goto err_out;
6191 }
6192 }
6193 ata_eh_freeze_port(ap);
6194 }
6195
6196 if (start_dr)
6197 devres_add(host->dev, start_dr);
6198 host->flags |= ATA_HOST_STARTED;
6199 return 0;
6200
6201 err_out:
6202 while (--i >= 0) {
6203 struct ata_port *ap = host->ports[i];
6204
6205 if (ap->ops->port_stop)
6206 ap->ops->port_stop(ap);
6207 }
6208 devres_free(start_dr);
6209 return rc;
6210}
6211
6212
6213
6214
6215
6216
6217
6218
6219void ata_host_init(struct ata_host *host, struct device *dev,
6220 struct ata_port_operations *ops)
6221{
6222 spin_lock_init(&host->lock);
6223 mutex_init(&host->eh_mutex);
6224 host->n_tags = ATA_MAX_QUEUE - 1;
6225 host->dev = dev;
6226 host->ops = ops;
6227}
6228
6229void __ata_port_probe(struct ata_port *ap)
6230{
6231 struct ata_eh_info *ehi = &ap->link.eh_info;
6232 unsigned long flags;
6233
6234
6235 spin_lock_irqsave(ap->lock, flags);
6236
6237 ehi->probe_mask |= ATA_ALL_DEVICES;
6238 ehi->action |= ATA_EH_RESET;
6239 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6240
6241 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6242 ap->pflags |= ATA_PFLAG_LOADING;
6243 ata_port_schedule_eh(ap);
6244
6245 spin_unlock_irqrestore(ap->lock, flags);
6246}
6247
6248int ata_port_probe(struct ata_port *ap)
6249{
6250 int rc = 0;
6251
6252 if (ap->ops->error_handler) {
6253 __ata_port_probe(ap);
6254 ata_port_wait_eh(ap);
6255 } else {
6256 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6257 rc = ata_bus_probe(ap);
6258 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6259 }
6260 return rc;
6261}
6262
6263
6264static void async_port_probe(void *data, async_cookie_t cookie)
6265{
6266 struct ata_port *ap = data;
6267
6268
6269
6270
6271
6272
6273
6274
6275 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6276 async_synchronize_cookie(cookie);
6277
6278 (void)ata_port_probe(ap);
6279
6280
6281 async_synchronize_cookie(cookie);
6282
6283 ata_scsi_scan_host(ap, 1);
6284}
6285
6286
6287
6288
6289
6290
6291
6292
6293
6294
6295
6296
6297
6298
6299
6300
6301
6302int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6303{
6304 int i, rc;
6305
6306 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6307
6308
6309 if (!(host->flags & ATA_HOST_STARTED)) {
6310 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6311 WARN_ON(1);
6312 return -EINVAL;
6313 }
6314
6315
6316
6317
6318
6319 for (i = host->n_ports; host->ports[i]; i++)
6320 kfree(host->ports[i]);
6321
6322
6323 for (i = 0; i < host->n_ports; i++) {
6324 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6325 host->ports[i]->local_port_no = i + 1;
6326 }
6327
6328
6329 for (i = 0; i < host->n_ports; i++) {
6330 rc = ata_tport_add(host->dev,host->ports[i]);
6331 if (rc) {
6332 goto err_tadd;
6333 }
6334 }
6335
6336 rc = ata_scsi_add_hosts(host, sht);
6337 if (rc)
6338 goto err_tadd;
6339
6340
6341 for (i = 0; i < host->n_ports; i++) {
6342 struct ata_port *ap = host->ports[i];
6343 unsigned long xfer_mask;
6344
6345
6346 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6347 ap->cbl = ATA_CBL_SATA;
6348
6349
6350 sata_link_init_spd(&ap->link);
6351 if (ap->slave_link)
6352 sata_link_init_spd(ap->slave_link);
6353
6354
6355 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6356 ap->udma_mask);
6357
6358 if (!ata_port_is_dummy(ap)) {
6359 ata_port_info(ap, "%cATA max %s %s\n",
6360 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6361 ata_mode_string(xfer_mask),
6362 ap->link.eh_info.desc);
6363 ata_ehi_clear_desc(&ap->link.eh_info);
6364 } else
6365 ata_port_info(ap, "DUMMY\n");
6366 }
6367
6368
6369 for (i = 0; i < host->n_ports; i++) {
6370 struct ata_port *ap = host->ports[i];
6371 async_schedule(async_port_probe, ap);
6372 }
6373
6374 return 0;
6375
6376 err_tadd:
6377 while (--i >= 0) {
6378 ata_tport_delete(host->ports[i]);
6379 }
6380 return rc;
6381
6382}
6383
6384
6385
6386
6387
6388
6389
6390
6391
6392
6393
6394
6395
6396
6397
6398
6399
6400
6401
6402
6403
6404
6405
6406
6407int ata_host_activate(struct ata_host *host, int irq,
6408 irq_handler_t irq_handler, unsigned long irq_flags,
6409 struct scsi_host_template *sht)
6410{
6411 int i, rc;
6412 char *irq_desc;
6413
6414 rc = ata_host_start(host);
6415 if (rc)
6416 return rc;
6417
6418
6419 if (!irq) {
6420 WARN_ON(irq_handler);
6421 return ata_host_register(host, sht);
6422 }
6423
6424 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6425 dev_driver_string(host->dev),
6426 dev_name(host->dev));
6427 if (!irq_desc)
6428 return -ENOMEM;
6429
6430 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6431 irq_desc, host);
6432 if (rc)
6433 return rc;
6434
6435 for (i = 0; i < host->n_ports; i++)
6436 ata_port_desc(host->ports[i], "irq %d", irq);
6437
6438 rc = ata_host_register(host, sht);
6439
6440 if (rc)
6441 devm_free_irq(host->dev, irq, host);
6442
6443 return rc;
6444}
6445
6446
6447
6448
6449
6450
6451
6452
6453
6454
6455
6456
6457static void ata_port_detach(struct ata_port *ap)
6458{
6459 unsigned long flags;
6460 struct ata_link *link;
6461 struct ata_device *dev;
6462
6463 if (!ap->ops->error_handler)
6464 goto skip_eh;
6465
6466
6467 spin_lock_irqsave(ap->lock, flags);
6468 ap->pflags |= ATA_PFLAG_UNLOADING;
6469 ata_port_schedule_eh(ap);
6470 spin_unlock_irqrestore(ap->lock, flags);
6471
6472
6473 ata_port_wait_eh(ap);
6474
6475
6476 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6477
6478 cancel_delayed_work_sync(&ap->hotplug_task);
6479
6480 skip_eh:
6481
6482 ata_for_each_link(link, ap, HOST_FIRST) {
6483 ata_for_each_dev(dev, link, ALL) {
6484 if (zpodd_dev_enabled(dev))
6485 zpodd_exit(dev);
6486 }
6487 }
6488 if (ap->pmp_link) {
6489 int i;
6490 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6491 ata_tlink_delete(&ap->pmp_link[i]);
6492 }
6493
6494 scsi_remove_host(ap->scsi_host);
6495 ata_tport_delete(ap);
6496}
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506
6507void ata_host_detach(struct ata_host *host)
6508{
6509 int i;
6510
6511 for (i = 0; i < host->n_ports; i++)
6512 ata_port_detach(host->ports[i]);
6513
6514
6515 ata_acpi_dissociate(host);
6516}
6517
6518#ifdef CONFIG_PCI
6519
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530
6531void ata_pci_remove_one(struct pci_dev *pdev)
6532{
6533 struct ata_host *host = pci_get_drvdata(pdev);
6534
6535 ata_host_detach(host);
6536}
6537
6538
6539int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6540{
6541 unsigned long tmp = 0;
6542
6543 switch (bits->width) {
6544 case 1: {
6545 u8 tmp8 = 0;
6546 pci_read_config_byte(pdev, bits->reg, &tmp8);
6547 tmp = tmp8;
6548 break;
6549 }
6550 case 2: {
6551 u16 tmp16 = 0;
6552 pci_read_config_word(pdev, bits->reg, &tmp16);
6553 tmp = tmp16;
6554 break;
6555 }
6556 case 4: {
6557 u32 tmp32 = 0;
6558 pci_read_config_dword(pdev, bits->reg, &tmp32);
6559 tmp = tmp32;
6560 break;
6561 }
6562
6563 default:
6564 return -EINVAL;
6565 }
6566
6567 tmp &= bits->mask;
6568
6569 return (tmp == bits->val) ? 1 : 0;
6570}
6571
6572#ifdef CONFIG_PM
6573void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6574{
6575 pci_save_state(pdev);
6576 pci_disable_device(pdev);
6577
6578 if (mesg.event & PM_EVENT_SLEEP)
6579 pci_set_power_state(pdev, PCI_D3hot);
6580}
6581
6582int ata_pci_device_do_resume(struct pci_dev *pdev)
6583{
6584 int rc;
6585
6586 pci_set_power_state(pdev, PCI_D0);
6587 pci_restore_state(pdev);
6588
6589 rc = pcim_enable_device(pdev);
6590 if (rc) {
6591 dev_err(&pdev->dev,
6592 "failed to enable device after resume (%d)\n", rc);
6593 return rc;
6594 }
6595
6596 pci_set_master(pdev);
6597 return 0;
6598}
6599
6600int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6601{
6602 struct ata_host *host = pci_get_drvdata(pdev);
6603 int rc = 0;
6604
6605 rc = ata_host_suspend(host, mesg);
6606 if (rc)
6607 return rc;
6608
6609 ata_pci_device_do_suspend(pdev, mesg);
6610
6611 return 0;
6612}
6613
6614int ata_pci_device_resume(struct pci_dev *pdev)
6615{
6616 struct ata_host *host = pci_get_drvdata(pdev);
6617 int rc;
6618
6619 rc = ata_pci_device_do_resume(pdev);
6620 if (rc == 0)
6621 ata_host_resume(host);
6622 return rc;
6623}
6624#endif
6625
6626#endif
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639int ata_platform_remove_one(struct platform_device *pdev)
6640{
6641 struct ata_host *host = platform_get_drvdata(pdev);
6642
6643 ata_host_detach(host);
6644
6645 return 0;
6646}
6647
6648static int __init ata_parse_force_one(char **cur,
6649 struct ata_force_ent *force_ent,
6650 const char **reason)
6651{
6652 static const struct ata_force_param force_tbl[] __initconst = {
6653 { "40c", .cbl = ATA_CBL_PATA40 },
6654 { "80c", .cbl = ATA_CBL_PATA80 },
6655 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6656 { "unk", .cbl = ATA_CBL_PATA_UNK },
6657 { "ign", .cbl = ATA_CBL_PATA_IGN },
6658 { "sata", .cbl = ATA_CBL_SATA },
6659 { "1.5Gbps", .spd_limit = 1 },
6660 { "3.0Gbps", .spd_limit = 2 },
6661 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6662 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6663 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
6664 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
6665 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6666 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6667 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6668 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6669 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6670 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6671 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6672 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6673 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6674 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6675 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6676 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6677 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6678 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6679 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6680 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6681 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6682 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6683 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6684 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6685 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6686 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6687 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6688 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6689 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6690 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6691 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6692 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6693 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6694 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6695 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6696 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6697 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6698 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6699 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6700 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6701 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6702 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6703 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6704 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6705 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
6706 };
6707 char *start = *cur, *p = *cur;
6708 char *id, *val, *endp;
6709 const struct ata_force_param *match_fp = NULL;
6710 int nr_matches = 0, i;
6711
6712
6713 while (*p != '\0' && *p != ',')
6714 p++;
6715
6716 if (*p == '\0')
6717 *cur = p;
6718 else
6719 *cur = p + 1;
6720
6721 *p = '\0';
6722
6723
6724 p = strchr(start, ':');
6725 if (!p) {
6726 val = strstrip(start);
6727 goto parse_val;
6728 }
6729 *p = '\0';
6730
6731 id = strstrip(start);
6732 val = strstrip(p + 1);
6733
6734
6735 p = strchr(id, '.');
6736 if (p) {
6737 *p++ = '\0';
6738 force_ent->device = simple_strtoul(p, &endp, 10);
6739 if (p == endp || *endp != '\0') {
6740 *reason = "invalid device";
6741 return -EINVAL;
6742 }
6743 }
6744
6745 force_ent->port = simple_strtoul(id, &endp, 10);
6746 if (p == endp || *endp != '\0') {
6747 *reason = "invalid port/link";
6748 return -EINVAL;
6749 }
6750
6751 parse_val:
6752
6753 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6754 const struct ata_force_param *fp = &force_tbl[i];
6755
6756 if (strncasecmp(val, fp->name, strlen(val)))
6757 continue;
6758
6759 nr_matches++;
6760 match_fp = fp;
6761
6762 if (strcasecmp(val, fp->name) == 0) {
6763 nr_matches = 1;
6764 break;
6765 }
6766 }
6767
6768 if (!nr_matches) {
6769 *reason = "unknown value";
6770 return -EINVAL;
6771 }
6772 if (nr_matches > 1) {
6773 *reason = "ambigious value";
6774 return -EINVAL;
6775 }
6776
6777 force_ent->param = *match_fp;
6778
6779 return 0;
6780}
6781
6782static void __init ata_parse_force_param(void)
6783{
6784 int idx = 0, size = 1;
6785 int last_port = -1, last_device = -1;
6786 char *p, *cur, *next;
6787
6788
6789 for (p = ata_force_param_buf; *p; p++)
6790 if (*p == ',')
6791 size++;
6792
6793 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6794 if (!ata_force_tbl) {
6795 printk(KERN_WARNING "ata: failed to extend force table, "
6796 "libata.force ignored\n");
6797 return;
6798 }
6799
6800
6801 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6802 const char *reason = "";
6803 struct ata_force_ent te = { .port = -1, .device = -1 };
6804
6805 next = cur;
6806 if (ata_parse_force_one(&next, &te, &reason)) {
6807 printk(KERN_WARNING "ata: failed to parse force "
6808 "parameter \"%s\" (%s)\n",
6809 cur, reason);
6810 continue;
6811 }
6812
6813 if (te.port == -1) {
6814 te.port = last_port;
6815 te.device = last_device;
6816 }
6817
6818 ata_force_tbl[idx++] = te;
6819
6820 last_port = te.port;
6821 last_device = te.device;
6822 }
6823
6824 ata_force_tbl_size = idx;
6825}
6826
6827static int __init ata_init(void)
6828{
6829 int rc;
6830
6831 ata_parse_force_param();
6832
6833 rc = ata_sff_init();
6834 if (rc) {
6835 kfree(ata_force_tbl);
6836 return rc;
6837 }
6838
6839 libata_transport_init();
6840 ata_scsi_transport_template = ata_attach_transport();
6841 if (!ata_scsi_transport_template) {
6842 ata_sff_exit();
6843 rc = -ENOMEM;
6844 goto err_out;
6845 }
6846
6847 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6848 return 0;
6849
6850err_out:
6851 return rc;
6852}
6853
6854static void __exit ata_exit(void)
6855{
6856 ata_release_transport(ata_scsi_transport_template);
6857 libata_transport_exit();
6858 ata_sff_exit();
6859 kfree(ata_force_tbl);
6860}
6861
6862subsys_initcall(ata_init);
6863module_exit(ata_exit);
6864
6865static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6866
6867int ata_ratelimit(void)
6868{
6869 return __ratelimit(&ratelimit);
6870}
6871
6872
6873
6874
6875
6876
6877
6878
6879
6880
6881
6882
6883
6884
6885
6886void ata_msleep(struct ata_port *ap, unsigned int msecs)
6887{
6888 bool owns_eh = ap && ap->host->eh_owner == current;
6889
6890 if (owns_eh)
6891 ata_eh_release(ap);
6892
6893 if (msecs < 20) {
6894 unsigned long usecs = msecs * USEC_PER_MSEC;
6895 usleep_range(usecs, usecs + 50);
6896 } else {
6897 msleep(msecs);
6898 }
6899
6900 if (owns_eh)
6901 ata_eh_acquire(ap);
6902}
6903
6904
6905
6906
6907
6908
6909
6910
6911
6912
6913
6914
6915
6916
6917
6918
6919
6920
6921
6922
6923
6924
6925
6926
6927
6928u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6929 unsigned long interval, unsigned long timeout)
6930{
6931 unsigned long deadline;
6932 u32 tmp;
6933
6934 tmp = ioread32(reg);
6935
6936
6937
6938
6939
6940 deadline = ata_deadline(jiffies, timeout);
6941
6942 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6943 ata_msleep(ap, interval);
6944 tmp = ioread32(reg);
6945 }
6946
6947 return tmp;
6948}
6949
6950
6951
6952
6953
6954
6955
6956
6957
6958
6959
6960
6961
6962bool sata_lpm_ignore_phy_events(struct ata_link *link)
6963{
6964 unsigned long lpm_timeout = link->last_lpm_change +
6965 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
6966
6967
6968 if (link->lpm_policy > ATA_LPM_MAX_POWER)
6969 return true;
6970
6971
6972
6973
6974 if ((link->flags & ATA_LFLAG_CHANGED) &&
6975 time_before(jiffies, lpm_timeout))
6976 return true;
6977
6978 return false;
6979}
6980EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
6981
6982
6983
6984
6985static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6986{
6987 return AC_ERR_SYSTEM;
6988}
6989
6990static void ata_dummy_error_handler(struct ata_port *ap)
6991{
6992
6993}
6994
6995struct ata_port_operations ata_dummy_port_ops = {
6996 .qc_prep = ata_noop_qc_prep,
6997 .qc_issue = ata_dummy_qc_issue,
6998 .error_handler = ata_dummy_error_handler,
6999 .sched_eh = ata_std_sched_eh,
7000 .end_eh = ata_std_end_eh,
7001};
7002
7003const struct ata_port_info ata_dummy_port_info = {
7004 .port_ops = &ata_dummy_port_ops,
7005};
7006
7007
7008
7009
7010void ata_port_printk(const struct ata_port *ap, const char *level,
7011 const char *fmt, ...)
7012{
7013 struct va_format vaf;
7014 va_list args;
7015
7016 va_start(args, fmt);
7017
7018 vaf.fmt = fmt;
7019 vaf.va = &args;
7020
7021 printk("%sata%u: %pV", level, ap->print_id, &vaf);
7022
7023 va_end(args);
7024}
7025EXPORT_SYMBOL(ata_port_printk);
7026
7027void ata_link_printk(const struct ata_link *link, const char *level,
7028 const char *fmt, ...)
7029{
7030 struct va_format vaf;
7031 va_list args;
7032
7033 va_start(args, fmt);
7034
7035 vaf.fmt = fmt;
7036 vaf.va = &args;
7037
7038 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
7039 printk("%sata%u.%02u: %pV",
7040 level, link->ap->print_id, link->pmp, &vaf);
7041 else
7042 printk("%sata%u: %pV",
7043 level, link->ap->print_id, &vaf);
7044
7045 va_end(args);
7046}
7047EXPORT_SYMBOL(ata_link_printk);
7048
7049void ata_dev_printk(const struct ata_device *dev, const char *level,
7050 const char *fmt, ...)
7051{
7052 struct va_format vaf;
7053 va_list args;
7054
7055 va_start(args, fmt);
7056
7057 vaf.fmt = fmt;
7058 vaf.va = &args;
7059
7060 printk("%sata%u.%02u: %pV",
7061 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
7062 &vaf);
7063
7064 va_end(args);
7065}
7066EXPORT_SYMBOL(ata_dev_printk);
7067
7068void ata_print_version(const struct device *dev, const char *version)
7069{
7070 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
7071}
7072EXPORT_SYMBOL(ata_print_version);
7073
7074
7075
7076
7077
7078
7079
7080EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7081EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7082EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7083EXPORT_SYMBOL_GPL(ata_base_port_ops);
7084EXPORT_SYMBOL_GPL(sata_port_ops);
7085EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7086EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7087EXPORT_SYMBOL_GPL(ata_link_next);
7088EXPORT_SYMBOL_GPL(ata_dev_next);
7089EXPORT_SYMBOL_GPL(ata_std_bios_param);
7090EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
7091EXPORT_SYMBOL_GPL(ata_host_init);
7092EXPORT_SYMBOL_GPL(ata_host_alloc);
7093EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7094EXPORT_SYMBOL_GPL(ata_slave_link_init);
7095EXPORT_SYMBOL_GPL(ata_host_start);
7096EXPORT_SYMBOL_GPL(ata_host_register);
7097EXPORT_SYMBOL_GPL(ata_host_activate);
7098EXPORT_SYMBOL_GPL(ata_host_detach);
7099EXPORT_SYMBOL_GPL(ata_sg_init);
7100EXPORT_SYMBOL_GPL(ata_qc_complete);
7101EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7102EXPORT_SYMBOL_GPL(atapi_cmd_type);
7103EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7104EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7105EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7106EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7107EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7108EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7109EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7110EXPORT_SYMBOL_GPL(ata_mode_string);
7111EXPORT_SYMBOL_GPL(ata_id_xfermask);
7112EXPORT_SYMBOL_GPL(ata_do_set_mode);
7113EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7114EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7115EXPORT_SYMBOL_GPL(ata_dev_disable);
7116EXPORT_SYMBOL_GPL(sata_set_spd);
7117EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7118EXPORT_SYMBOL_GPL(sata_link_debounce);
7119EXPORT_SYMBOL_GPL(sata_link_resume);
7120EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
7121EXPORT_SYMBOL_GPL(ata_std_prereset);
7122EXPORT_SYMBOL_GPL(sata_link_hardreset);
7123EXPORT_SYMBOL_GPL(sata_std_hardreset);
7124EXPORT_SYMBOL_GPL(ata_std_postreset);
7125EXPORT_SYMBOL_GPL(ata_dev_classify);
7126EXPORT_SYMBOL_GPL(ata_dev_pair);
7127EXPORT_SYMBOL_GPL(ata_ratelimit);
7128EXPORT_SYMBOL_GPL(ata_msleep);
7129EXPORT_SYMBOL_GPL(ata_wait_register);
7130EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7131EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7132EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7133EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7134EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
7135EXPORT_SYMBOL_GPL(sata_scr_valid);
7136EXPORT_SYMBOL_GPL(sata_scr_read);
7137EXPORT_SYMBOL_GPL(sata_scr_write);
7138EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7139EXPORT_SYMBOL_GPL(ata_link_online);
7140EXPORT_SYMBOL_GPL(ata_link_offline);
7141#ifdef CONFIG_PM
7142EXPORT_SYMBOL_GPL(ata_host_suspend);
7143EXPORT_SYMBOL_GPL(ata_host_resume);
7144#endif
7145EXPORT_SYMBOL_GPL(ata_id_string);
7146EXPORT_SYMBOL_GPL(ata_id_c_string);
7147EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
7148EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7149
7150EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7151EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7152EXPORT_SYMBOL_GPL(ata_timing_compute);
7153EXPORT_SYMBOL_GPL(ata_timing_merge);
7154EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7155
7156#ifdef CONFIG_PCI
7157EXPORT_SYMBOL_GPL(pci_test_config_bits);
7158EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7159#ifdef CONFIG_PM
7160EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7161EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7162EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7163EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7164#endif
7165#endif
7166
7167EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7168
7169EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7170EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7171EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7172EXPORT_SYMBOL_GPL(ata_port_desc);
7173#ifdef CONFIG_PCI
7174EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7175#endif
7176EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7177EXPORT_SYMBOL_GPL(ata_link_abort);
7178EXPORT_SYMBOL_GPL(ata_port_abort);
7179EXPORT_SYMBOL_GPL(ata_port_freeze);
7180EXPORT_SYMBOL_GPL(sata_async_notification);
7181EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7182EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7183EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7184EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7185EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7186EXPORT_SYMBOL_GPL(ata_do_eh);
7187EXPORT_SYMBOL_GPL(ata_std_error_handler);
7188
7189EXPORT_SYMBOL_GPL(ata_cable_40wire);
7190EXPORT_SYMBOL_GPL(ata_cable_80wire);
7191EXPORT_SYMBOL_GPL(ata_cable_unknown);
7192EXPORT_SYMBOL_GPL(ata_cable_ignore);
7193EXPORT_SYMBOL_GPL(ata_cable_sata);
7194