1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/time.h>
54#include <linux/interrupt.h>
55#include <linux/completion.h>
56#include <linux/suspend.h>
57#include <linux/workqueue.h>
58#include <linux/scatterlist.h>
59#include <linux/io.h>
60#include <linux/async.h>
61#include <linux/log2.h>
62#include <linux/slab.h>
63#include <linux/glob.h>
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_host.h>
67#include <linux/libata.h>
68#include <asm/byteorder.h>
69#include <asm/unaligned.h>
70#include <linux/cdrom.h>
71#include <linux/ratelimit.h>
72#include <linux/leds.h>
73#include <linux/pm_runtime.h>
74#include <linux/platform_device.h>
75
76#define CREATE_TRACE_POINTS
77#include <trace/events/libata.h>
78
79#include "libata.h"
80#include "libata-transport.h"
81
82
83const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
84const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
85const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
86
87const struct ata_port_operations ata_base_port_ops = {
88 .prereset = ata_std_prereset,
89 .postreset = ata_std_postreset,
90 .error_handler = ata_std_error_handler,
91 .sched_eh = ata_std_sched_eh,
92 .end_eh = ata_std_end_eh,
93};
94
95const struct ata_port_operations sata_port_ops = {
96 .inherits = &ata_base_port_ops,
97
98 .qc_defer = ata_std_qc_defer,
99 .hardreset = sata_std_hardreset,
100};
101
102static unsigned int ata_dev_init_params(struct ata_device *dev,
103 u16 heads, u16 sectors);
104static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
105static void ata_dev_xfermask(struct ata_device *dev);
106static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
107
108atomic_t ata_print_id = ATOMIC_INIT(0);
109
110struct ata_force_param {
111 const char *name;
112 unsigned int cbl;
113 int spd_limit;
114 unsigned long xfer_mask;
115 unsigned int horkage_on;
116 unsigned int horkage_off;
117 unsigned int lflags;
118};
119
120struct ata_force_ent {
121 int port;
122 int device;
123 struct ata_force_param param;
124};
125
126static struct ata_force_ent *ata_force_tbl;
127static int ata_force_tbl_size;
128
129static char ata_force_param_buf[PAGE_SIZE] __initdata;
130
131module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
132MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
133
134static int atapi_enabled = 1;
135module_param(atapi_enabled, int, 0444);
136MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
137
138static int atapi_dmadir = 0;
139module_param(atapi_dmadir, int, 0444);
140MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
141
142int atapi_passthru16 = 1;
143module_param(atapi_passthru16, int, 0444);
144MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
145
146int libata_fua = 0;
147module_param_named(fua, libata_fua, int, 0444);
148MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
149
150static int ata_ignore_hpa;
151module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
152MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
153
154static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
155module_param_named(dma, libata_dma_mask, int, 0444);
156MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
157
158static int ata_probe_timeout;
159module_param(ata_probe_timeout, int, 0444);
160MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
161
162int libata_noacpi = 0;
163module_param_named(noacpi, libata_noacpi, int, 0444);
164MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
165
166int libata_allow_tpm = 0;
167module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
168MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
169
170static int atapi_an;
171module_param(atapi_an, int, 0444);
172MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
173
174MODULE_AUTHOR("Jeff Garzik");
175MODULE_DESCRIPTION("Library module for ATA devices");
176MODULE_LICENSE("GPL");
177MODULE_VERSION(DRV_VERSION);
178
179
180static bool ata_sstatus_online(u32 sstatus)
181{
182 return (sstatus & 0xf) == 0x3;
183}
184
185
186
187
188
189
190
191
192
193
194
195
196
197struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
198 enum ata_link_iter_mode mode)
199{
200 BUG_ON(mode != ATA_LITER_EDGE &&
201 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
202
203
204 if (!link)
205 switch (mode) {
206 case ATA_LITER_EDGE:
207 case ATA_LITER_PMP_FIRST:
208 if (sata_pmp_attached(ap))
209 return ap->pmp_link;
210
211 case ATA_LITER_HOST_FIRST:
212 return &ap->link;
213 }
214
215
216 if (link == &ap->link)
217 switch (mode) {
218 case ATA_LITER_HOST_FIRST:
219 if (sata_pmp_attached(ap))
220 return ap->pmp_link;
221
222 case ATA_LITER_PMP_FIRST:
223 if (unlikely(ap->slave_link))
224 return ap->slave_link;
225
226 case ATA_LITER_EDGE:
227 return NULL;
228 }
229
230
231 if (unlikely(link == ap->slave_link))
232 return NULL;
233
234
235 if (++link < ap->pmp_link + ap->nr_pmp_links)
236 return link;
237
238 if (mode == ATA_LITER_PMP_FIRST)
239 return &ap->link;
240
241 return NULL;
242}
243
244
245
246
247
248
249
250
251
252
253
254
255
256struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
257 enum ata_dev_iter_mode mode)
258{
259 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
260 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
261
262
263 if (!dev)
264 switch (mode) {
265 case ATA_DITER_ENABLED:
266 case ATA_DITER_ALL:
267 dev = link->device;
268 goto check;
269 case ATA_DITER_ENABLED_REVERSE:
270 case ATA_DITER_ALL_REVERSE:
271 dev = link->device + ata_link_max_devices(link) - 1;
272 goto check;
273 }
274
275 next:
276
277 switch (mode) {
278 case ATA_DITER_ENABLED:
279 case ATA_DITER_ALL:
280 if (++dev < link->device + ata_link_max_devices(link))
281 goto check;
282 return NULL;
283 case ATA_DITER_ENABLED_REVERSE:
284 case ATA_DITER_ALL_REVERSE:
285 if (--dev >= link->device)
286 goto check;
287 return NULL;
288 }
289
290 check:
291 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
292 !ata_dev_enabled(dev))
293 goto next;
294 return dev;
295}
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311struct ata_link *ata_dev_phys_link(struct ata_device *dev)
312{
313 struct ata_port *ap = dev->link->ap;
314
315 if (!ap->slave_link)
316 return dev->link;
317 if (!dev->devno)
318 return &ap->link;
319 return ap->slave_link;
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335void ata_force_cbl(struct ata_port *ap)
336{
337 int i;
338
339 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
340 const struct ata_force_ent *fe = &ata_force_tbl[i];
341
342 if (fe->port != -1 && fe->port != ap->print_id)
343 continue;
344
345 if (fe->param.cbl == ATA_CBL_NONE)
346 continue;
347
348 ap->cbl = fe->param.cbl;
349 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
350 return;
351 }
352}
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370static void ata_force_link_limits(struct ata_link *link)
371{
372 bool did_spd = false;
373 int linkno = link->pmp;
374 int i;
375
376 if (ata_is_host_link(link))
377 linkno += 15;
378
379 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
380 const struct ata_force_ent *fe = &ata_force_tbl[i];
381
382 if (fe->port != -1 && fe->port != link->ap->print_id)
383 continue;
384
385 if (fe->device != -1 && fe->device != linkno)
386 continue;
387
388
389 if (!did_spd && fe->param.spd_limit) {
390 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
391 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
392 fe->param.name);
393 did_spd = true;
394 }
395
396
397 if (fe->param.lflags) {
398 link->flags |= fe->param.lflags;
399 ata_link_notice(link,
400 "FORCE: link flag 0x%x forced -> 0x%x\n",
401 fe->param.lflags, link->flags);
402 }
403 }
404}
405
406
407
408
409
410
411
412
413
414
415
416
417static void ata_force_xfermask(struct ata_device *dev)
418{
419 int devno = dev->link->pmp + dev->devno;
420 int alt_devno = devno;
421 int i;
422
423
424 if (ata_is_host_link(dev->link))
425 alt_devno += 15;
426
427 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
428 const struct ata_force_ent *fe = &ata_force_tbl[i];
429 unsigned long pio_mask, mwdma_mask, udma_mask;
430
431 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
432 continue;
433
434 if (fe->device != -1 && fe->device != devno &&
435 fe->device != alt_devno)
436 continue;
437
438 if (!fe->param.xfer_mask)
439 continue;
440
441 ata_unpack_xfermask(fe->param.xfer_mask,
442 &pio_mask, &mwdma_mask, &udma_mask);
443 if (udma_mask)
444 dev->udma_mask = udma_mask;
445 else if (mwdma_mask) {
446 dev->udma_mask = 0;
447 dev->mwdma_mask = mwdma_mask;
448 } else {
449 dev->udma_mask = 0;
450 dev->mwdma_mask = 0;
451 dev->pio_mask = pio_mask;
452 }
453
454 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
455 fe->param.name);
456 return;
457 }
458}
459
460
461
462
463
464
465
466
467
468
469
470
471static void ata_force_horkage(struct ata_device *dev)
472{
473 int devno = dev->link->pmp + dev->devno;
474 int alt_devno = devno;
475 int i;
476
477
478 if (ata_is_host_link(dev->link))
479 alt_devno += 15;
480
481 for (i = 0; i < ata_force_tbl_size; i++) {
482 const struct ata_force_ent *fe = &ata_force_tbl[i];
483
484 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
485 continue;
486
487 if (fe->device != -1 && fe->device != devno &&
488 fe->device != alt_devno)
489 continue;
490
491 if (!(~dev->horkage & fe->param.horkage_on) &&
492 !(dev->horkage & fe->param.horkage_off))
493 continue;
494
495 dev->horkage |= fe->param.horkage_on;
496 dev->horkage &= ~fe->param.horkage_off;
497
498 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
499 fe->param.name);
500 }
501}
502
503
504
505
506
507
508
509
510
511
512
513
514
515int atapi_cmd_type(u8 opcode)
516{
517 switch (opcode) {
518 case GPCMD_READ_10:
519 case GPCMD_READ_12:
520 return ATAPI_READ;
521
522 case GPCMD_WRITE_10:
523 case GPCMD_WRITE_12:
524 case GPCMD_WRITE_AND_VERIFY_10:
525 return ATAPI_WRITE;
526
527 case GPCMD_READ_CD:
528 case GPCMD_READ_CD_MSF:
529 return ATAPI_READ_CD;
530
531 case ATA_16:
532 case ATA_12:
533 if (atapi_passthru16)
534 return ATAPI_PASS_THRU;
535
536 default:
537 return ATAPI_MISC;
538 }
539}
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
555{
556 fis[0] = 0x27;
557 fis[1] = pmp & 0xf;
558 if (is_cmd)
559 fis[1] |= (1 << 7);
560
561 fis[2] = tf->command;
562 fis[3] = tf->feature;
563
564 fis[4] = tf->lbal;
565 fis[5] = tf->lbam;
566 fis[6] = tf->lbah;
567 fis[7] = tf->device;
568
569 fis[8] = tf->hob_lbal;
570 fis[9] = tf->hob_lbam;
571 fis[10] = tf->hob_lbah;
572 fis[11] = tf->hob_feature;
573
574 fis[12] = tf->nsect;
575 fis[13] = tf->hob_nsect;
576 fis[14] = 0;
577 fis[15] = tf->ctl;
578
579 fis[16] = tf->auxiliary & 0xff;
580 fis[17] = (tf->auxiliary >> 8) & 0xff;
581 fis[18] = (tf->auxiliary >> 16) & 0xff;
582 fis[19] = (tf->auxiliary >> 24) & 0xff;
583}
584
585
586
587
588
589
590
591
592
593
594
595
596void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
597{
598 tf->command = fis[2];
599 tf->feature = fis[3];
600
601 tf->lbal = fis[4];
602 tf->lbam = fis[5];
603 tf->lbah = fis[6];
604 tf->device = fis[7];
605
606 tf->hob_lbal = fis[8];
607 tf->hob_lbam = fis[9];
608 tf->hob_lbah = fis[10];
609
610 tf->nsect = fis[12];
611 tf->hob_nsect = fis[13];
612}
613
614static const u8 ata_rw_cmds[] = {
615
616 ATA_CMD_READ_MULTI,
617 ATA_CMD_WRITE_MULTI,
618 ATA_CMD_READ_MULTI_EXT,
619 ATA_CMD_WRITE_MULTI_EXT,
620 0,
621 0,
622 0,
623 ATA_CMD_WRITE_MULTI_FUA_EXT,
624
625 ATA_CMD_PIO_READ,
626 ATA_CMD_PIO_WRITE,
627 ATA_CMD_PIO_READ_EXT,
628 ATA_CMD_PIO_WRITE_EXT,
629 0,
630 0,
631 0,
632 0,
633
634 ATA_CMD_READ,
635 ATA_CMD_WRITE,
636 ATA_CMD_READ_EXT,
637 ATA_CMD_WRITE_EXT,
638 0,
639 0,
640 0,
641 ATA_CMD_WRITE_FUA_EXT
642};
643
644
645
646
647
648
649
650
651
652
653
654
655static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
656{
657 u8 cmd;
658
659 int index, fua, lba48, write;
660
661 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
662 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
663 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
664
665 if (dev->flags & ATA_DFLAG_PIO) {
666 tf->protocol = ATA_PROT_PIO;
667 index = dev->multi_count ? 0 : 8;
668 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
669
670 tf->protocol = ATA_PROT_PIO;
671 index = dev->multi_count ? 0 : 8;
672 } else {
673 tf->protocol = ATA_PROT_DMA;
674 index = 16;
675 }
676
677 cmd = ata_rw_cmds[index + fua + lba48 + write];
678 if (cmd) {
679 tf->command = cmd;
680 return 0;
681 }
682 return -1;
683}
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
701{
702 u64 block = 0;
703
704 if (tf->flags & ATA_TFLAG_LBA) {
705 if (tf->flags & ATA_TFLAG_LBA48) {
706 block |= (u64)tf->hob_lbah << 40;
707 block |= (u64)tf->hob_lbam << 32;
708 block |= (u64)tf->hob_lbal << 24;
709 } else
710 block |= (tf->device & 0xf) << 24;
711
712 block |= tf->lbah << 16;
713 block |= tf->lbam << 8;
714 block |= tf->lbal;
715 } else {
716 u32 cyl, head, sect;
717
718 cyl = tf->lbam | (tf->lbah << 8);
719 head = tf->device & 0xf;
720 sect = tf->lbal;
721
722 if (!sect) {
723 ata_dev_warn(dev,
724 "device reported invalid CHS sector 0\n");
725 return U64_MAX;
726 }
727
728 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
729 }
730
731 return block;
732}
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
756 u64 block, u32 n_block, unsigned int tf_flags,
757 unsigned int tag, int class)
758{
759 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
760 tf->flags |= tf_flags;
761
762 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
763
764 if (!lba_48_ok(block, n_block))
765 return -ERANGE;
766
767 tf->protocol = ATA_PROT_NCQ;
768 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
769
770 if (tf->flags & ATA_TFLAG_WRITE)
771 tf->command = ATA_CMD_FPDMA_WRITE;
772 else
773 tf->command = ATA_CMD_FPDMA_READ;
774
775 tf->nsect = tag << 3;
776 tf->hob_feature = (n_block >> 8) & 0xff;
777 tf->feature = n_block & 0xff;
778
779 tf->hob_lbah = (block >> 40) & 0xff;
780 tf->hob_lbam = (block >> 32) & 0xff;
781 tf->hob_lbal = (block >> 24) & 0xff;
782 tf->lbah = (block >> 16) & 0xff;
783 tf->lbam = (block >> 8) & 0xff;
784 tf->lbal = block & 0xff;
785
786 tf->device = ATA_LBA;
787 if (tf->flags & ATA_TFLAG_FUA)
788 tf->device |= 1 << 7;
789
790 if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
791 if (class == IOPRIO_CLASS_RT)
792 tf->hob_nsect |= ATA_PRIO_HIGH <<
793 ATA_SHIFT_PRIO;
794 }
795 } else if (dev->flags & ATA_DFLAG_LBA) {
796 tf->flags |= ATA_TFLAG_LBA;
797
798 if (lba_28_ok(block, n_block)) {
799
800 tf->device |= (block >> 24) & 0xf;
801 } else if (lba_48_ok(block, n_block)) {
802 if (!(dev->flags & ATA_DFLAG_LBA48))
803 return -ERANGE;
804
805
806 tf->flags |= ATA_TFLAG_LBA48;
807
808 tf->hob_nsect = (n_block >> 8) & 0xff;
809
810 tf->hob_lbah = (block >> 40) & 0xff;
811 tf->hob_lbam = (block >> 32) & 0xff;
812 tf->hob_lbal = (block >> 24) & 0xff;
813 } else
814
815 return -ERANGE;
816
817 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
818 return -EINVAL;
819
820 tf->nsect = n_block & 0xff;
821
822 tf->lbah = (block >> 16) & 0xff;
823 tf->lbam = (block >> 8) & 0xff;
824 tf->lbal = block & 0xff;
825
826 tf->device |= ATA_LBA;
827 } else {
828
829 u32 sect, head, cyl, track;
830
831
832 if (!lba_28_ok(block, n_block))
833 return -ERANGE;
834
835 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
836 return -EINVAL;
837
838
839 track = (u32)block / dev->sectors;
840 cyl = track / dev->heads;
841 head = track % dev->heads;
842 sect = (u32)block % dev->sectors + 1;
843
844 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
845 (u32)block, track, cyl, head, sect);
846
847
848
849
850
851 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
852 return -ERANGE;
853
854 tf->nsect = n_block & 0xff;
855 tf->lbal = sect;
856 tf->lbam = cyl;
857 tf->lbah = cyl >> 8;
858 tf->device |= head;
859 }
860
861 return 0;
862}
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879unsigned long ata_pack_xfermask(unsigned long pio_mask,
880 unsigned long mwdma_mask,
881 unsigned long udma_mask)
882{
883 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
884 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
885 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
886}
887
888
889
890
891
892
893
894
895
896
897
898void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
899 unsigned long *mwdma_mask, unsigned long *udma_mask)
900{
901 if (pio_mask)
902 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
903 if (mwdma_mask)
904 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
905 if (udma_mask)
906 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
907}
908
909static const struct ata_xfer_ent {
910 int shift, bits;
911 u8 base;
912} ata_xfer_tbl[] = {
913 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
914 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
915 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
916 { -1, },
917};
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932u8 ata_xfer_mask2mode(unsigned long xfer_mask)
933{
934 int highbit = fls(xfer_mask) - 1;
935 const struct ata_xfer_ent *ent;
936
937 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
938 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
939 return ent->base + highbit - ent->shift;
940 return 0xff;
941}
942
943
944
945
946
947
948
949
950
951
952
953
954
955unsigned long ata_xfer_mode2mask(u8 xfer_mode)
956{
957 const struct ata_xfer_ent *ent;
958
959 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
960 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
961 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
962 & ~((1 << ent->shift) - 1);
963 return 0;
964}
965
966
967
968
969
970
971
972
973
974
975
976
977
978int ata_xfer_mode2shift(unsigned long xfer_mode)
979{
980 const struct ata_xfer_ent *ent;
981
982 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
983 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
984 return ent->shift;
985 return -1;
986}
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002const char *ata_mode_string(unsigned long xfer_mask)
1003{
1004 static const char * const xfer_mode_str[] = {
1005 "PIO0",
1006 "PIO1",
1007 "PIO2",
1008 "PIO3",
1009 "PIO4",
1010 "PIO5",
1011 "PIO6",
1012 "MWDMA0",
1013 "MWDMA1",
1014 "MWDMA2",
1015 "MWDMA3",
1016 "MWDMA4",
1017 "UDMA/16",
1018 "UDMA/25",
1019 "UDMA/33",
1020 "UDMA/44",
1021 "UDMA/66",
1022 "UDMA/100",
1023 "UDMA/133",
1024 "UDMA7",
1025 };
1026 int highbit;
1027
1028 highbit = fls(xfer_mask) - 1;
1029 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1030 return xfer_mode_str[highbit];
1031 return "<n/a>";
1032}
1033
1034const char *sata_spd_string(unsigned int spd)
1035{
1036 static const char * const spd_str[] = {
1037 "1.5 Gbps",
1038 "3.0 Gbps",
1039 "6.0 Gbps",
1040 };
1041
1042 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1043 return "<unknown>";
1044 return spd_str[spd - 1];
1045}
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1063{
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1086 DPRINTK("found ATA device by sig\n");
1087 return ATA_DEV_ATA;
1088 }
1089
1090 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1091 DPRINTK("found ATAPI device by sig\n");
1092 return ATA_DEV_ATAPI;
1093 }
1094
1095 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1096 DPRINTK("found PMP device by sig\n");
1097 return ATA_DEV_PMP;
1098 }
1099
1100 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1101 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1102 return ATA_DEV_SEMB;
1103 }
1104
1105 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1106 DPRINTK("found ZAC device by sig\n");
1107 return ATA_DEV_ZAC;
1108 }
1109
1110 DPRINTK("unknown device\n");
1111 return ATA_DEV_UNKNOWN;
1112}
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129void ata_id_string(const u16 *id, unsigned char *s,
1130 unsigned int ofs, unsigned int len)
1131{
1132 unsigned int c;
1133
1134 BUG_ON(len & 1);
1135
1136 while (len > 0) {
1137 c = id[ofs] >> 8;
1138 *s = c;
1139 s++;
1140
1141 c = id[ofs] & 0xff;
1142 *s = c;
1143 s++;
1144
1145 ofs++;
1146 len -= 2;
1147 }
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164void ata_id_c_string(const u16 *id, unsigned char *s,
1165 unsigned int ofs, unsigned int len)
1166{
1167 unsigned char *p;
1168
1169 ata_id_string(id, s, ofs, len - 1);
1170
1171 p = s + strnlen(s, len - 1);
1172 while (p > s && p[-1] == ' ')
1173 p--;
1174 *p = '\0';
1175}
1176
1177static u64 ata_id_n_sectors(const u16 *id)
1178{
1179 if (ata_id_has_lba(id)) {
1180 if (ata_id_has_lba48(id))
1181 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1182 else
1183 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1184 } else {
1185 if (ata_id_current_chs_valid(id))
1186 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1187 id[ATA_ID_CUR_SECTORS];
1188 else
1189 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1190 id[ATA_ID_SECTORS];
1191 }
1192}
1193
1194u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1195{
1196 u64 sectors = 0;
1197
1198 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1199 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1200 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1201 sectors |= (tf->lbah & 0xff) << 16;
1202 sectors |= (tf->lbam & 0xff) << 8;
1203 sectors |= (tf->lbal & 0xff);
1204
1205 return sectors;
1206}
1207
1208u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1209{
1210 u64 sectors = 0;
1211
1212 sectors |= (tf->device & 0x0f) << 24;
1213 sectors |= (tf->lbah & 0xff) << 16;
1214 sectors |= (tf->lbam & 0xff) << 8;
1215 sectors |= (tf->lbal & 0xff);
1216
1217 return sectors;
1218}
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1233{
1234 unsigned int err_mask;
1235 struct ata_taskfile tf;
1236 int lba48 = ata_id_has_lba48(dev->id);
1237
1238 ata_tf_init(dev, &tf);
1239
1240
1241 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1242
1243 if (lba48) {
1244 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1245 tf.flags |= ATA_TFLAG_LBA48;
1246 } else
1247 tf.command = ATA_CMD_READ_NATIVE_MAX;
1248
1249 tf.protocol = ATA_PROT_NODATA;
1250 tf.device |= ATA_LBA;
1251
1252 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1253 if (err_mask) {
1254 ata_dev_warn(dev,
1255 "failed to read native max address (err_mask=0x%x)\n",
1256 err_mask);
1257 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1258 return -EACCES;
1259 return -EIO;
1260 }
1261
1262 if (lba48)
1263 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1264 else
1265 *max_sectors = ata_tf_to_lba(&tf) + 1;
1266 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1267 (*max_sectors)--;
1268 return 0;
1269}
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1284{
1285 unsigned int err_mask;
1286 struct ata_taskfile tf;
1287 int lba48 = ata_id_has_lba48(dev->id);
1288
1289 new_sectors--;
1290
1291 ata_tf_init(dev, &tf);
1292
1293 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1294
1295 if (lba48) {
1296 tf.command = ATA_CMD_SET_MAX_EXT;
1297 tf.flags |= ATA_TFLAG_LBA48;
1298
1299 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1300 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1301 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1302 } else {
1303 tf.command = ATA_CMD_SET_MAX;
1304
1305 tf.device |= (new_sectors >> 24) & 0xf;
1306 }
1307
1308 tf.protocol = ATA_PROT_NODATA;
1309 tf.device |= ATA_LBA;
1310
1311 tf.lbal = (new_sectors >> 0) & 0xff;
1312 tf.lbam = (new_sectors >> 8) & 0xff;
1313 tf.lbah = (new_sectors >> 16) & 0xff;
1314
1315 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1316 if (err_mask) {
1317 ata_dev_warn(dev,
1318 "failed to set max address (err_mask=0x%x)\n",
1319 err_mask);
1320 if (err_mask == AC_ERR_DEV &&
1321 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1322 return -EACCES;
1323 return -EIO;
1324 }
1325
1326 return 0;
1327}
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340static int ata_hpa_resize(struct ata_device *dev)
1341{
1342 struct ata_eh_context *ehc = &dev->link->eh_context;
1343 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1344 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1345 u64 sectors = ata_id_n_sectors(dev->id);
1346 u64 native_sectors;
1347 int rc;
1348
1349
1350 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1351 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1352 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1353 return 0;
1354
1355
1356 rc = ata_read_native_max_address(dev, &native_sectors);
1357 if (rc) {
1358
1359
1360
1361 if (rc == -EACCES || !unlock_hpa) {
1362 ata_dev_warn(dev,
1363 "HPA support seems broken, skipping HPA handling\n");
1364 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1365
1366
1367 if (rc == -EACCES)
1368 rc = 0;
1369 }
1370
1371 return rc;
1372 }
1373 dev->n_native_sectors = native_sectors;
1374
1375
1376 if (native_sectors <= sectors || !unlock_hpa) {
1377 if (!print_info || native_sectors == sectors)
1378 return 0;
1379
1380 if (native_sectors > sectors)
1381 ata_dev_info(dev,
1382 "HPA detected: current %llu, native %llu\n",
1383 (unsigned long long)sectors,
1384 (unsigned long long)native_sectors);
1385 else if (native_sectors < sectors)
1386 ata_dev_warn(dev,
1387 "native sectors (%llu) is smaller than sectors (%llu)\n",
1388 (unsigned long long)native_sectors,
1389 (unsigned long long)sectors);
1390 return 0;
1391 }
1392
1393
1394 rc = ata_set_max_sectors(dev, native_sectors);
1395 if (rc == -EACCES) {
1396
1397 ata_dev_warn(dev,
1398 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1399 (unsigned long long)sectors,
1400 (unsigned long long)native_sectors);
1401 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1402 return 0;
1403 } else if (rc)
1404 return rc;
1405
1406
1407 rc = ata_dev_reread_id(dev, 0);
1408 if (rc) {
1409 ata_dev_err(dev,
1410 "failed to re-read IDENTIFY data after HPA resizing\n");
1411 return rc;
1412 }
1413
1414 if (print_info) {
1415 u64 new_sectors = ata_id_n_sectors(dev->id);
1416 ata_dev_info(dev,
1417 "HPA unlocked: %llu -> %llu, native %llu\n",
1418 (unsigned long long)sectors,
1419 (unsigned long long)new_sectors,
1420 (unsigned long long)native_sectors);
1421 }
1422
1423 return 0;
1424}
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437static inline void ata_dump_id(const u16 *id)
1438{
1439 DPRINTK("49==0x%04x "
1440 "53==0x%04x "
1441 "63==0x%04x "
1442 "64==0x%04x "
1443 "75==0x%04x \n",
1444 id[49],
1445 id[53],
1446 id[63],
1447 id[64],
1448 id[75]);
1449 DPRINTK("80==0x%04x "
1450 "81==0x%04x "
1451 "82==0x%04x "
1452 "83==0x%04x "
1453 "84==0x%04x \n",
1454 id[80],
1455 id[81],
1456 id[82],
1457 id[83],
1458 id[84]);
1459 DPRINTK("88==0x%04x "
1460 "93==0x%04x\n",
1461 id[88],
1462 id[93]);
1463}
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480unsigned long ata_id_xfermask(const u16 *id)
1481{
1482 unsigned long pio_mask, mwdma_mask, udma_mask;
1483
1484
1485 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1486 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1487 pio_mask <<= 3;
1488 pio_mask |= 0x7;
1489 } else {
1490
1491
1492
1493
1494 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1495 if (mode < 5)
1496 pio_mask = (2 << mode) - 1;
1497 else
1498 pio_mask = 1;
1499
1500
1501
1502
1503
1504
1505
1506 }
1507
1508 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1509
1510 if (ata_id_is_cfa(id)) {
1511
1512
1513
1514 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1515 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1516
1517 if (pio)
1518 pio_mask |= (1 << 5);
1519 if (pio > 1)
1520 pio_mask |= (1 << 6);
1521 if (dma)
1522 mwdma_mask |= (1 << 3);
1523 if (dma > 1)
1524 mwdma_mask |= (1 << 4);
1525 }
1526
1527 udma_mask = 0;
1528 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1529 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1530
1531 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1532}
1533
1534static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1535{
1536 struct completion *waiting = qc->private_data;
1537
1538 complete(waiting);
1539}
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563unsigned ata_exec_internal_sg(struct ata_device *dev,
1564 struct ata_taskfile *tf, const u8 *cdb,
1565 int dma_dir, struct scatterlist *sgl,
1566 unsigned int n_elem, unsigned long timeout)
1567{
1568 struct ata_link *link = dev->link;
1569 struct ata_port *ap = link->ap;
1570 u8 command = tf->command;
1571 int auto_timeout = 0;
1572 struct ata_queued_cmd *qc;
1573 unsigned int tag, preempted_tag;
1574 u32 preempted_sactive, preempted_qc_active;
1575 int preempted_nr_active_links;
1576 DECLARE_COMPLETION_ONSTACK(wait);
1577 unsigned long flags;
1578 unsigned int err_mask;
1579 int rc;
1580
1581 spin_lock_irqsave(ap->lock, flags);
1582
1583
1584 if (ap->pflags & ATA_PFLAG_FROZEN) {
1585 spin_unlock_irqrestore(ap->lock, flags);
1586 return AC_ERR_SYSTEM;
1587 }
1588
1589
1590
1591
1592
1593
1594
1595
1596 if (ap->ops->error_handler)
1597 tag = ATA_TAG_INTERNAL;
1598 else
1599 tag = 0;
1600
1601 qc = __ata_qc_from_tag(ap, tag);
1602
1603 qc->tag = tag;
1604 qc->scsicmd = NULL;
1605 qc->ap = ap;
1606 qc->dev = dev;
1607 ata_qc_reinit(qc);
1608
1609 preempted_tag = link->active_tag;
1610 preempted_sactive = link->sactive;
1611 preempted_qc_active = ap->qc_active;
1612 preempted_nr_active_links = ap->nr_active_links;
1613 link->active_tag = ATA_TAG_POISON;
1614 link->sactive = 0;
1615 ap->qc_active = 0;
1616 ap->nr_active_links = 0;
1617
1618
1619 qc->tf = *tf;
1620 if (cdb)
1621 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1622
1623
1624 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1625 dma_dir == DMA_FROM_DEVICE)
1626 qc->tf.feature |= ATAPI_DMADIR;
1627
1628 qc->flags |= ATA_QCFLAG_RESULT_TF;
1629 qc->dma_dir = dma_dir;
1630 if (dma_dir != DMA_NONE) {
1631 unsigned int i, buflen = 0;
1632 struct scatterlist *sg;
1633
1634 for_each_sg(sgl, sg, n_elem, i)
1635 buflen += sg->length;
1636
1637 ata_sg_init(qc, sgl, n_elem);
1638 qc->nbytes = buflen;
1639 }
1640
1641 qc->private_data = &wait;
1642 qc->complete_fn = ata_qc_complete_internal;
1643
1644 ata_qc_issue(qc);
1645
1646 spin_unlock_irqrestore(ap->lock, flags);
1647
1648 if (!timeout) {
1649 if (ata_probe_timeout)
1650 timeout = ata_probe_timeout * 1000;
1651 else {
1652 timeout = ata_internal_cmd_timeout(dev, command);
1653 auto_timeout = 1;
1654 }
1655 }
1656
1657 if (ap->ops->error_handler)
1658 ata_eh_release(ap);
1659
1660 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1661
1662 if (ap->ops->error_handler)
1663 ata_eh_acquire(ap);
1664
1665 ata_sff_flush_pio_task(ap);
1666
1667 if (!rc) {
1668 spin_lock_irqsave(ap->lock, flags);
1669
1670
1671
1672
1673
1674
1675 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1676 qc->err_mask |= AC_ERR_TIMEOUT;
1677
1678 if (ap->ops->error_handler)
1679 ata_port_freeze(ap);
1680 else
1681 ata_qc_complete(qc);
1682
1683 if (ata_msg_warn(ap))
1684 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1685 command);
1686 }
1687
1688 spin_unlock_irqrestore(ap->lock, flags);
1689 }
1690
1691
1692 if (ap->ops->post_internal_cmd)
1693 ap->ops->post_internal_cmd(qc);
1694
1695
1696 if (qc->flags & ATA_QCFLAG_FAILED) {
1697 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1698 qc->err_mask |= AC_ERR_DEV;
1699
1700 if (!qc->err_mask)
1701 qc->err_mask |= AC_ERR_OTHER;
1702
1703 if (qc->err_mask & ~AC_ERR_OTHER)
1704 qc->err_mask &= ~AC_ERR_OTHER;
1705 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1706 qc->result_tf.command |= ATA_SENSE;
1707 }
1708
1709
1710 spin_lock_irqsave(ap->lock, flags);
1711
1712 *tf = qc->result_tf;
1713 err_mask = qc->err_mask;
1714
1715 ata_qc_free(qc);
1716 link->active_tag = preempted_tag;
1717 link->sactive = preempted_sactive;
1718 ap->qc_active = preempted_qc_active;
1719 ap->nr_active_links = preempted_nr_active_links;
1720
1721 spin_unlock_irqrestore(ap->lock, flags);
1722
1723 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1724 ata_internal_cmd_timed_out(dev, command);
1725
1726 return err_mask;
1727}
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748unsigned ata_exec_internal(struct ata_device *dev,
1749 struct ata_taskfile *tf, const u8 *cdb,
1750 int dma_dir, void *buf, unsigned int buflen,
1751 unsigned long timeout)
1752{
1753 struct scatterlist *psg = NULL, sg;
1754 unsigned int n_elem = 0;
1755
1756 if (dma_dir != DMA_NONE) {
1757 WARN_ON(!buf);
1758 sg_init_one(&sg, buf, buflen);
1759 psg = &sg;
1760 n_elem++;
1761 }
1762
1763 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1764 timeout);
1765}
1766
1767
1768
1769
1770
1771
1772
1773
1774unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1775{
1776
1777
1778
1779
1780 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1781 return 0;
1782
1783
1784
1785 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1786 return 0;
1787
1788 if (ata_id_is_cfa(adev->id)
1789 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1790 return 0;
1791
1792 if (adev->pio_mode > XFER_PIO_2)
1793 return 1;
1794
1795 if (ata_id_has_iordy(adev->id))
1796 return 1;
1797 return 0;
1798}
1799
1800
1801
1802
1803
1804
1805
1806
1807static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1808{
1809
1810 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
1811 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1812
1813 if (pio) {
1814
1815 if (pio > 240)
1816 return 3 << ATA_SHIFT_PIO;
1817 return 7 << ATA_SHIFT_PIO;
1818 }
1819 }
1820 return 3 << ATA_SHIFT_PIO;
1821}
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833unsigned int ata_do_dev_read_id(struct ata_device *dev,
1834 struct ata_taskfile *tf, u16 *id)
1835{
1836 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1837 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1838}
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1862 unsigned int flags, u16 *id)
1863{
1864 struct ata_port *ap = dev->link->ap;
1865 unsigned int class = *p_class;
1866 struct ata_taskfile tf;
1867 unsigned int err_mask = 0;
1868 const char *reason;
1869 bool is_semb = class == ATA_DEV_SEMB;
1870 int may_fallback = 1, tried_spinup = 0;
1871 int rc;
1872
1873 if (ata_msg_ctl(ap))
1874 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1875
1876retry:
1877 ata_tf_init(dev, &tf);
1878
1879 switch (class) {
1880 case ATA_DEV_SEMB:
1881 class = ATA_DEV_ATA;
1882 case ATA_DEV_ATA:
1883 case ATA_DEV_ZAC:
1884 tf.command = ATA_CMD_ID_ATA;
1885 break;
1886 case ATA_DEV_ATAPI:
1887 tf.command = ATA_CMD_ID_ATAPI;
1888 break;
1889 default:
1890 rc = -ENODEV;
1891 reason = "unsupported class";
1892 goto err_out;
1893 }
1894
1895 tf.protocol = ATA_PROT_PIO;
1896
1897
1898
1899
1900 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1901
1902
1903
1904
1905 tf.flags |= ATA_TFLAG_POLLING;
1906
1907 if (ap->ops->read_id)
1908 err_mask = ap->ops->read_id(dev, &tf, id);
1909 else
1910 err_mask = ata_do_dev_read_id(dev, &tf, id);
1911
1912 if (err_mask) {
1913 if (err_mask & AC_ERR_NODEV_HINT) {
1914 ata_dev_dbg(dev, "NODEV after polling detection\n");
1915 return -ENOENT;
1916 }
1917
1918 if (is_semb) {
1919 ata_dev_info(dev,
1920 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1921
1922 *p_class = ATA_DEV_SEMB_UNSUP;
1923 return 0;
1924 }
1925
1926 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1927
1928
1929
1930
1931
1932 if (may_fallback) {
1933 may_fallback = 0;
1934
1935 if (class == ATA_DEV_ATA)
1936 class = ATA_DEV_ATAPI;
1937 else
1938 class = ATA_DEV_ATA;
1939 goto retry;
1940 }
1941
1942
1943
1944
1945
1946 ata_dev_dbg(dev,
1947 "both IDENTIFYs aborted, assuming NODEV\n");
1948 return -ENOENT;
1949 }
1950
1951 rc = -EIO;
1952 reason = "I/O error";
1953 goto err_out;
1954 }
1955
1956 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1957 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1958 "class=%d may_fallback=%d tried_spinup=%d\n",
1959 class, may_fallback, tried_spinup);
1960 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1961 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1962 }
1963
1964
1965
1966
1967 may_fallback = 0;
1968
1969 swap_buf_le16(id, ATA_ID_WORDS);
1970
1971
1972 rc = -EINVAL;
1973 reason = "device reports invalid type";
1974
1975 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1976 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1977 goto err_out;
1978 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1979 ata_id_is_ata(id)) {
1980 ata_dev_dbg(dev,
1981 "host indicates ignore ATA devices, ignored\n");
1982 return -ENOENT;
1983 }
1984 } else {
1985 if (ata_id_is_ata(id))
1986 goto err_out;
1987 }
1988
1989 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1990 tried_spinup = 1;
1991
1992
1993
1994
1995
1996 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1997 if (err_mask && id[2] != 0x738c) {
1998 rc = -EIO;
1999 reason = "SPINUP failed";
2000 goto err_out;
2001 }
2002
2003
2004
2005
2006 if (id[2] == 0x37c8)
2007 goto retry;
2008 }
2009
2010 if ((flags & ATA_READID_POSTRESET) &&
2011 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2024 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2025 if (err_mask) {
2026 rc = -EIO;
2027 reason = "INIT_DEV_PARAMS failed";
2028 goto err_out;
2029 }
2030
2031
2032
2033
2034 flags &= ~ATA_READID_POSTRESET;
2035 goto retry;
2036 }
2037 }
2038
2039 *p_class = class;
2040
2041 return 0;
2042
2043 err_out:
2044 if (ata_msg_warn(ap))
2045 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2046 reason, err_mask);
2047 return rc;
2048}
2049
2050static int ata_do_link_spd_horkage(struct ata_device *dev)
2051{
2052 struct ata_link *plink = ata_dev_phys_link(dev);
2053 u32 target, target_limit;
2054
2055 if (!sata_scr_valid(plink))
2056 return 0;
2057
2058 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2059 target = 1;
2060 else
2061 return 0;
2062
2063 target_limit = (1 << target) - 1;
2064
2065
2066 if (plink->sata_spd_limit <= target_limit)
2067 return 0;
2068
2069 plink->sata_spd_limit = target_limit;
2070
2071
2072
2073
2074
2075 if (plink->sata_spd > target) {
2076 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2077 sata_spd_string(target));
2078 return -EAGAIN;
2079 }
2080 return 0;
2081}
2082
2083static inline u8 ata_dev_knobble(struct ata_device *dev)
2084{
2085 struct ata_port *ap = dev->link->ap;
2086
2087 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2088 return 0;
2089
2090 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2091}
2092
2093static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2094{
2095 struct ata_port *ap = dev->link->ap;
2096 unsigned int err_mask;
2097 int log_index = ATA_LOG_NCQ_SEND_RECV * 2;
2098 u16 log_pages;
2099
2100 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2101 0, ap->sector_buf, 1);
2102 if (err_mask) {
2103 ata_dev_dbg(dev,
2104 "failed to get Log Directory Emask 0x%x\n",
2105 err_mask);
2106 return;
2107 }
2108 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2109 if (!log_pages) {
2110 ata_dev_warn(dev,
2111 "NCQ Send/Recv Log not supported\n");
2112 return;
2113 }
2114 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2115 0, ap->sector_buf, 1);
2116 if (err_mask) {
2117 ata_dev_dbg(dev,
2118 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2119 err_mask);
2120 } else {
2121 u8 *cmds = dev->ncq_send_recv_cmds;
2122
2123 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2124 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2125
2126 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2127 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2128 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2129 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2130 }
2131 }
2132}
2133
2134static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2135{
2136 struct ata_port *ap = dev->link->ap;
2137 unsigned int err_mask;
2138 int log_index = ATA_LOG_NCQ_NON_DATA * 2;
2139 u16 log_pages;
2140
2141 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2142 0, ap->sector_buf, 1);
2143 if (err_mask) {
2144 ata_dev_dbg(dev,
2145 "failed to get Log Directory Emask 0x%x\n",
2146 err_mask);
2147 return;
2148 }
2149 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2150 if (!log_pages) {
2151 ata_dev_warn(dev,
2152 "NCQ Send/Recv Log not supported\n");
2153 return;
2154 }
2155 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2156 0, ap->sector_buf, 1);
2157 if (err_mask) {
2158 ata_dev_dbg(dev,
2159 "failed to get NCQ Non-Data Log Emask 0x%x\n",
2160 err_mask);
2161 } else {
2162 u8 *cmds = dev->ncq_non_data_cmds;
2163
2164 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2165 }
2166}
2167
2168static void ata_dev_config_ncq_prio(struct ata_device *dev)
2169{
2170 struct ata_port *ap = dev->link->ap;
2171 unsigned int err_mask;
2172
2173 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
2174 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2175 return;
2176 }
2177
2178 err_mask = ata_read_log_page(dev,
2179 ATA_LOG_SATA_ID_DEV_DATA,
2180 ATA_LOG_SATA_SETTINGS,
2181 ap->sector_buf,
2182 1);
2183 if (err_mask) {
2184 ata_dev_dbg(dev,
2185 "failed to get Identify Device data, Emask 0x%x\n",
2186 err_mask);
2187 return;
2188 }
2189
2190 if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
2191 dev->flags |= ATA_DFLAG_NCQ_PRIO;
2192 } else {
2193 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2194 ata_dev_dbg(dev, "SATA page does not support priority\n");
2195 }
2196
2197}
2198
2199static int ata_dev_config_ncq(struct ata_device *dev,
2200 char *desc, size_t desc_sz)
2201{
2202 struct ata_port *ap = dev->link->ap;
2203 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2204 unsigned int err_mask;
2205 char *aa_desc = "";
2206
2207 if (!ata_id_has_ncq(dev->id)) {
2208 desc[0] = '\0';
2209 return 0;
2210 }
2211 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2212 snprintf(desc, desc_sz, "NCQ (not used)");
2213 return 0;
2214 }
2215 if (ap->flags & ATA_FLAG_NCQ) {
2216 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2217 dev->flags |= ATA_DFLAG_NCQ;
2218 }
2219
2220 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2221 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2222 ata_id_has_fpdma_aa(dev->id)) {
2223 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2224 SATA_FPDMA_AA);
2225 if (err_mask) {
2226 ata_dev_err(dev,
2227 "failed to enable AA (error_mask=0x%x)\n",
2228 err_mask);
2229 if (err_mask != AC_ERR_DEV) {
2230 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2231 return -EIO;
2232 }
2233 } else
2234 aa_desc = ", AA";
2235 }
2236
2237 if (hdepth >= ddepth)
2238 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2239 else
2240 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2241 ddepth, aa_desc);
2242
2243 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2244 if (ata_id_has_ncq_send_and_recv(dev->id))
2245 ata_dev_config_ncq_send_recv(dev);
2246 if (ata_id_has_ncq_non_data(dev->id))
2247 ata_dev_config_ncq_non_data(dev);
2248 if (ata_id_has_ncq_prio(dev->id))
2249 ata_dev_config_ncq_prio(dev);
2250 }
2251
2252 return 0;
2253}
2254
2255static void ata_dev_config_sense_reporting(struct ata_device *dev)
2256{
2257 unsigned int err_mask;
2258
2259 if (!ata_id_has_sense_reporting(dev->id))
2260 return;
2261
2262 if (ata_id_sense_reporting_enabled(dev->id))
2263 return;
2264
2265 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2266 if (err_mask) {
2267 ata_dev_dbg(dev,
2268 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2269 err_mask);
2270 }
2271}
2272
2273static void ata_dev_config_zac(struct ata_device *dev)
2274{
2275 struct ata_port *ap = dev->link->ap;
2276 unsigned int err_mask;
2277 u8 *identify_buf = ap->sector_buf;
2278 int log_index = ATA_LOG_SATA_ID_DEV_DATA * 2, i, found = 0;
2279 u16 log_pages;
2280
2281 dev->zac_zones_optimal_open = U32_MAX;
2282 dev->zac_zones_optimal_nonseq = U32_MAX;
2283 dev->zac_zones_max_open = U32_MAX;
2284
2285
2286
2287
2288 if (dev->class == ATA_DEV_ZAC)
2289 dev->flags |= ATA_DFLAG_ZAC;
2290 else if (ata_id_zoned_cap(dev->id) == 0x01)
2291
2292
2293
2294 dev->flags |= ATA_DFLAG_ZAC;
2295
2296 if (!(dev->flags & ATA_DFLAG_ZAC))
2297 return;
2298
2299
2300
2301
2302
2303 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2304 0, ap->sector_buf, 1);
2305 if (err_mask) {
2306 ata_dev_info(dev,
2307 "failed to get Log Directory Emask 0x%x\n",
2308 err_mask);
2309 return;
2310 }
2311 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2312 if (log_pages == 0) {
2313 ata_dev_warn(dev,
2314 "ATA Identify Device Log not supported\n");
2315 return;
2316 }
2317
2318
2319
2320
2321 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, 0,
2322 identify_buf, 1);
2323 if (err_mask) {
2324 ata_dev_info(dev,
2325 "failed to get Device Identify Log Emask 0x%x\n",
2326 err_mask);
2327 return;
2328 }
2329 log_pages = identify_buf[8];
2330 for (i = 0; i < log_pages; i++) {
2331 if (identify_buf[9 + i] == ATA_LOG_ZONED_INFORMATION) {
2332 found++;
2333 break;
2334 }
2335 }
2336 if (!found) {
2337 ata_dev_warn(dev,
2338 "ATA Zoned Information Log not supported\n");
2339 return;
2340 }
2341
2342
2343
2344
2345 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA,
2346 ATA_LOG_ZONED_INFORMATION,
2347 identify_buf, 1);
2348 if (!err_mask) {
2349 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2350
2351 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2352 if ((zoned_cap >> 63))
2353 dev->zac_zoned_cap = (zoned_cap & 1);
2354 opt_open = get_unaligned_le64(&identify_buf[24]);
2355 if ((opt_open >> 63))
2356 dev->zac_zones_optimal_open = (u32)opt_open;
2357 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2358 if ((opt_nonseq >> 63))
2359 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2360 max_open = get_unaligned_le64(&identify_buf[40]);
2361 if ((max_open >> 63))
2362 dev->zac_zones_max_open = (u32)max_open;
2363 }
2364}
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379int ata_dev_configure(struct ata_device *dev)
2380{
2381 struct ata_port *ap = dev->link->ap;
2382 struct ata_eh_context *ehc = &dev->link->eh_context;
2383 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2384 const u16 *id = dev->id;
2385 unsigned long xfer_mask;
2386 unsigned int err_mask;
2387 char revbuf[7];
2388 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2389 char modelbuf[ATA_ID_PROD_LEN+1];
2390 int rc;
2391
2392 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2393 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2394 return 0;
2395 }
2396
2397 if (ata_msg_probe(ap))
2398 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2399
2400
2401 dev->horkage |= ata_dev_blacklisted(dev);
2402 ata_force_horkage(dev);
2403
2404 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2405 ata_dev_info(dev, "unsupported device, disabling\n");
2406 ata_dev_disable(dev);
2407 return 0;
2408 }
2409
2410 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2411 dev->class == ATA_DEV_ATAPI) {
2412 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2413 atapi_enabled ? "not supported with this driver"
2414 : "disabled");
2415 ata_dev_disable(dev);
2416 return 0;
2417 }
2418
2419 rc = ata_do_link_spd_horkage(dev);
2420 if (rc)
2421 return rc;
2422
2423
2424 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2425 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2426 dev->horkage |= ATA_HORKAGE_NOLPM;
2427
2428 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2429 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2430 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2431 }
2432
2433
2434 rc = ata_acpi_on_devcfg(dev);
2435 if (rc)
2436 return rc;
2437
2438
2439 rc = ata_hpa_resize(dev);
2440 if (rc)
2441 return rc;
2442
2443
2444 if (ata_msg_probe(ap))
2445 ata_dev_dbg(dev,
2446 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2447 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2448 __func__,
2449 id[49], id[82], id[83], id[84],
2450 id[85], id[86], id[87], id[88]);
2451
2452
2453 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2454 dev->max_sectors = 0;
2455 dev->cdb_len = 0;
2456 dev->n_sectors = 0;
2457 dev->cylinders = 0;
2458 dev->heads = 0;
2459 dev->sectors = 0;
2460 dev->multi_count = 0;
2461
2462
2463
2464
2465
2466
2467 xfer_mask = ata_id_xfermask(id);
2468
2469 if (ata_msg_probe(ap))
2470 ata_dump_id(id);
2471
2472
2473 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2474 sizeof(fwrevbuf));
2475
2476 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2477 sizeof(modelbuf));
2478
2479
2480 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2481 if (ata_id_is_cfa(id)) {
2482
2483 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2484 ata_dev_warn(dev,
2485 "supports DRM functions and may not be fully accessible\n");
2486 snprintf(revbuf, 7, "CFA");
2487 } else {
2488 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2489
2490 if (ata_id_has_tpm(id))
2491 ata_dev_warn(dev,
2492 "supports DRM functions and may not be fully accessible\n");
2493 }
2494
2495 dev->n_sectors = ata_id_n_sectors(id);
2496
2497
2498 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2499 unsigned int max = dev->id[47] & 0xff;
2500 unsigned int cnt = dev->id[59] & 0xff;
2501
2502 if (is_power_of_2(max) && is_power_of_2(cnt))
2503 if (cnt <= max)
2504 dev->multi_count = cnt;
2505 }
2506
2507 if (ata_id_has_lba(id)) {
2508 const char *lba_desc;
2509 char ncq_desc[24];
2510
2511 lba_desc = "LBA";
2512 dev->flags |= ATA_DFLAG_LBA;
2513 if (ata_id_has_lba48(id)) {
2514 dev->flags |= ATA_DFLAG_LBA48;
2515 lba_desc = "LBA48";
2516
2517 if (dev->n_sectors >= (1UL << 28) &&
2518 ata_id_has_flush_ext(id))
2519 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2520 }
2521
2522
2523 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2524 if (rc)
2525 return rc;
2526
2527
2528 if (ata_msg_drv(ap) && print_info) {
2529 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2530 revbuf, modelbuf, fwrevbuf,
2531 ata_mode_string(xfer_mask));
2532 ata_dev_info(dev,
2533 "%llu sectors, multi %u: %s %s\n",
2534 (unsigned long long)dev->n_sectors,
2535 dev->multi_count, lba_desc, ncq_desc);
2536 }
2537 } else {
2538
2539
2540
2541 dev->cylinders = id[1];
2542 dev->heads = id[3];
2543 dev->sectors = id[6];
2544
2545 if (ata_id_current_chs_valid(id)) {
2546
2547 dev->cylinders = id[54];
2548 dev->heads = id[55];
2549 dev->sectors = id[56];
2550 }
2551
2552
2553 if (ata_msg_drv(ap) && print_info) {
2554 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2555 revbuf, modelbuf, fwrevbuf,
2556 ata_mode_string(xfer_mask));
2557 ata_dev_info(dev,
2558 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2559 (unsigned long long)dev->n_sectors,
2560 dev->multi_count, dev->cylinders,
2561 dev->heads, dev->sectors);
2562 }
2563 }
2564
2565
2566
2567
2568 if (ata_id_has_devslp(dev->id)) {
2569 u8 *sata_setting = ap->sector_buf;
2570 int i, j;
2571
2572 dev->flags |= ATA_DFLAG_DEVSLP;
2573 err_mask = ata_read_log_page(dev,
2574 ATA_LOG_SATA_ID_DEV_DATA,
2575 ATA_LOG_SATA_SETTINGS,
2576 sata_setting,
2577 1);
2578 if (err_mask)
2579 ata_dev_dbg(dev,
2580 "failed to get Identify Device Data, Emask 0x%x\n",
2581 err_mask);
2582 else
2583 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2584 j = ATA_LOG_DEVSLP_OFFSET + i;
2585 dev->devslp_timing[i] = sata_setting[j];
2586 }
2587 }
2588 ata_dev_config_sense_reporting(dev);
2589 ata_dev_config_zac(dev);
2590 dev->cdb_len = 16;
2591 }
2592
2593
2594 else if (dev->class == ATA_DEV_ATAPI) {
2595 const char *cdb_intr_string = "";
2596 const char *atapi_an_string = "";
2597 const char *dma_dir_string = "";
2598 u32 sntf;
2599
2600 rc = atapi_cdb_len(id);
2601 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2602 if (ata_msg_warn(ap))
2603 ata_dev_warn(dev, "unsupported CDB len\n");
2604 rc = -EINVAL;
2605 goto err_out_nosup;
2606 }
2607 dev->cdb_len = (unsigned int) rc;
2608
2609
2610
2611
2612
2613
2614 if (atapi_an &&
2615 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2616 (!sata_pmp_attached(ap) ||
2617 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2618
2619 err_mask = ata_dev_set_feature(dev,
2620 SETFEATURES_SATA_ENABLE, SATA_AN);
2621 if (err_mask)
2622 ata_dev_err(dev,
2623 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2624 err_mask);
2625 else {
2626 dev->flags |= ATA_DFLAG_AN;
2627 atapi_an_string = ", ATAPI AN";
2628 }
2629 }
2630
2631 if (ata_id_cdb_intr(dev->id)) {
2632 dev->flags |= ATA_DFLAG_CDB_INTR;
2633 cdb_intr_string = ", CDB intr";
2634 }
2635
2636 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2637 dev->flags |= ATA_DFLAG_DMADIR;
2638 dma_dir_string = ", DMADIR";
2639 }
2640
2641 if (ata_id_has_da(dev->id)) {
2642 dev->flags |= ATA_DFLAG_DA;
2643 zpodd_init(dev);
2644 }
2645
2646
2647 if (ata_msg_drv(ap) && print_info)
2648 ata_dev_info(dev,
2649 "ATAPI: %s, %s, max %s%s%s%s\n",
2650 modelbuf, fwrevbuf,
2651 ata_mode_string(xfer_mask),
2652 cdb_intr_string, atapi_an_string,
2653 dma_dir_string);
2654 }
2655
2656
2657 dev->max_sectors = ATA_MAX_SECTORS;
2658 if (dev->flags & ATA_DFLAG_LBA48)
2659 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2660
2661
2662
2663 if (ata_dev_knobble(dev)) {
2664 if (ata_msg_drv(ap) && print_info)
2665 ata_dev_info(dev, "applying bridge limits\n");
2666 dev->udma_mask &= ATA_UDMA5;
2667 dev->max_sectors = ATA_MAX_SECTORS;
2668 }
2669
2670 if ((dev->class == ATA_DEV_ATAPI) &&
2671 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2672 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2673 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2674 }
2675
2676 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2677 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2678 dev->max_sectors);
2679
2680 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2681 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2682 dev->max_sectors);
2683
2684 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2685 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2686
2687 if (ap->ops->dev_config)
2688 ap->ops->dev_config(dev);
2689
2690 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2691
2692
2693
2694
2695
2696
2697 if (print_info) {
2698 ata_dev_warn(dev,
2699"Drive reports diagnostics failure. This may indicate a drive\n");
2700 ata_dev_warn(dev,
2701"fault or invalid emulation. Contact drive vendor for information.\n");
2702 }
2703 }
2704
2705 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2706 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2707 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2708 }
2709
2710 return 0;
2711
2712err_out_nosup:
2713 if (ata_msg_probe(ap))
2714 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2715 return rc;
2716}
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726int ata_cable_40wire(struct ata_port *ap)
2727{
2728 return ATA_CBL_PATA40;
2729}
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739int ata_cable_80wire(struct ata_port *ap)
2740{
2741 return ATA_CBL_PATA80;
2742}
2743
2744
2745
2746
2747
2748
2749
2750
2751int ata_cable_unknown(struct ata_port *ap)
2752{
2753 return ATA_CBL_PATA_UNK;
2754}
2755
2756
2757
2758
2759
2760
2761
2762
2763int ata_cable_ignore(struct ata_port *ap)
2764{
2765 return ATA_CBL_PATA_IGN;
2766}
2767
2768
2769
2770
2771
2772
2773
2774
2775int ata_cable_sata(struct ata_port *ap)
2776{
2777 return ATA_CBL_SATA;
2778}
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795int ata_bus_probe(struct ata_port *ap)
2796{
2797 unsigned int classes[ATA_MAX_DEVICES];
2798 int tries[ATA_MAX_DEVICES];
2799 int rc;
2800 struct ata_device *dev;
2801
2802 ata_for_each_dev(dev, &ap->link, ALL)
2803 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2804
2805 retry:
2806 ata_for_each_dev(dev, &ap->link, ALL) {
2807
2808
2809
2810
2811
2812
2813
2814 dev->pio_mode = XFER_PIO_0;
2815 dev->dma_mode = 0xff;
2816
2817
2818
2819
2820
2821
2822 if (ap->ops->set_piomode)
2823 ap->ops->set_piomode(ap, dev);
2824 }
2825
2826
2827 ap->ops->phy_reset(ap);
2828
2829 ata_for_each_dev(dev, &ap->link, ALL) {
2830 if (dev->class != ATA_DEV_UNKNOWN)
2831 classes[dev->devno] = dev->class;
2832 else
2833 classes[dev->devno] = ATA_DEV_NONE;
2834
2835 dev->class = ATA_DEV_UNKNOWN;
2836 }
2837
2838
2839
2840
2841
2842 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2843 if (tries[dev->devno])
2844 dev->class = classes[dev->devno];
2845
2846 if (!ata_dev_enabled(dev))
2847 continue;
2848
2849 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2850 dev->id);
2851 if (rc)
2852 goto fail;
2853 }
2854
2855
2856 if (ap->ops->cable_detect)
2857 ap->cbl = ap->ops->cable_detect(ap);
2858
2859
2860
2861
2862
2863
2864 ata_for_each_dev(dev, &ap->link, ENABLED)
2865 if (ata_id_is_sata(dev->id))
2866 ap->cbl = ATA_CBL_SATA;
2867
2868
2869
2870
2871 ata_for_each_dev(dev, &ap->link, ENABLED) {
2872 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2873 rc = ata_dev_configure(dev);
2874 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2875 if (rc)
2876 goto fail;
2877 }
2878
2879
2880 rc = ata_set_mode(&ap->link, &dev);
2881 if (rc)
2882 goto fail;
2883
2884 ata_for_each_dev(dev, &ap->link, ENABLED)
2885 return 0;
2886
2887 return -ENODEV;
2888
2889 fail:
2890 tries[dev->devno]--;
2891
2892 switch (rc) {
2893 case -EINVAL:
2894
2895 tries[dev->devno] = 0;
2896 break;
2897
2898 case -ENODEV:
2899
2900 tries[dev->devno] = min(tries[dev->devno], 1);
2901 case -EIO:
2902 if (tries[dev->devno] == 1) {
2903
2904
2905
2906 sata_down_spd_limit(&ap->link, 0);
2907 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2908 }
2909 }
2910
2911 if (!tries[dev->devno])
2912 ata_dev_disable(dev);
2913
2914 goto retry;
2915}
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926static void sata_print_link_status(struct ata_link *link)
2927{
2928 u32 sstatus, scontrol, tmp;
2929
2930 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2931 return;
2932 sata_scr_read(link, SCR_CONTROL, &scontrol);
2933
2934 if (ata_phys_link_online(link)) {
2935 tmp = (sstatus >> 4) & 0xf;
2936 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2937 sata_spd_string(tmp), sstatus, scontrol);
2938 } else {
2939 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2940 sstatus, scontrol);
2941 }
2942}
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952struct ata_device *ata_dev_pair(struct ata_device *adev)
2953{
2954 struct ata_link *link = adev->link;
2955 struct ata_device *pair = &link->device[1 - adev->devno];
2956 if (!ata_dev_enabled(pair))
2957 return NULL;
2958 return pair;
2959}
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2982{
2983 u32 sstatus, spd, mask;
2984 int rc, bit;
2985
2986 if (!sata_scr_valid(link))
2987 return -EOPNOTSUPP;
2988
2989
2990
2991
2992 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2993 if (rc == 0 && ata_sstatus_online(sstatus))
2994 spd = (sstatus >> 4) & 0xf;
2995 else
2996 spd = link->sata_spd;
2997
2998 mask = link->sata_spd_limit;
2999 if (mask <= 1)
3000 return -EINVAL;
3001
3002
3003 bit = fls(mask) - 1;
3004 mask &= ~(1 << bit);
3005
3006
3007
3008
3009 if (spd > 1)
3010 mask &= (1 << (spd - 1)) - 1;
3011 else
3012 mask &= 1;
3013
3014
3015 if (!mask)
3016 return -EINVAL;
3017
3018 if (spd_limit) {
3019 if (mask & ((1 << spd_limit) - 1))
3020 mask &= (1 << spd_limit) - 1;
3021 else {
3022 bit = ffs(mask) - 1;
3023 mask = 1 << bit;
3024 }
3025 }
3026
3027 link->sata_spd_limit = mask;
3028
3029 ata_link_warn(link, "limiting SATA link speed to %s\n",
3030 sata_spd_string(fls(mask)));
3031
3032 return 0;
3033}
3034
3035static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
3036{
3037 struct ata_link *host_link = &link->ap->link;
3038 u32 limit, target, spd;
3039
3040 limit = link->sata_spd_limit;
3041
3042
3043
3044
3045
3046 if (!ata_is_host_link(link) && host_link->sata_spd)
3047 limit &= (1 << host_link->sata_spd) - 1;
3048
3049 if (limit == UINT_MAX)
3050 target = 0;
3051 else
3052 target = fls(limit);
3053
3054 spd = (*scontrol >> 4) & 0xf;
3055 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3056
3057 return spd != target;
3058}
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075static int sata_set_spd_needed(struct ata_link *link)
3076{
3077 u32 scontrol;
3078
3079 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3080 return 1;
3081
3082 return __sata_set_spd_needed(link, &scontrol);
3083}
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098int sata_set_spd(struct ata_link *link)
3099{
3100 u32 scontrol;
3101 int rc;
3102
3103 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3104 return rc;
3105
3106 if (!__sata_set_spd_needed(link, &scontrol))
3107 return 0;
3108
3109 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3110 return rc;
3111
3112 return 1;
3113}
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127static const struct ata_timing ata_timing[] = {
3128
3129 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
3130 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
3131 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
3132 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
3133 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
3134 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
3135 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
3136
3137 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
3138 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
3139 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
3140
3141 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
3142 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
3143 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
3144 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
3145 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
3146
3147
3148 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
3149 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
3150 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
3151 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
3152 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
3153 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
3154 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
3155
3156 { 0xFF }
3157};
3158
3159#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3160#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
3161
3162static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3163{
3164 q->setup = EZ(t->setup * 1000, T);
3165 q->act8b = EZ(t->act8b * 1000, T);
3166 q->rec8b = EZ(t->rec8b * 1000, T);
3167 q->cyc8b = EZ(t->cyc8b * 1000, T);
3168 q->active = EZ(t->active * 1000, T);
3169 q->recover = EZ(t->recover * 1000, T);
3170 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
3171 q->cycle = EZ(t->cycle * 1000, T);
3172 q->udma = EZ(t->udma * 1000, UT);
3173}
3174
3175void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3176 struct ata_timing *m, unsigned int what)
3177{
3178 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3179 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3180 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3181 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3182 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3183 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3184 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3185 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3186 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3187}
3188
3189const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3190{
3191 const struct ata_timing *t = ata_timing;
3192
3193 while (xfer_mode > t->mode)
3194 t++;
3195
3196 if (xfer_mode == t->mode)
3197 return t;
3198
3199 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3200 __func__, xfer_mode);
3201
3202 return NULL;
3203}
3204
3205int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3206 struct ata_timing *t, int T, int UT)
3207{
3208 const u16 *id = adev->id;
3209 const struct ata_timing *s;
3210 struct ata_timing p;
3211
3212
3213
3214
3215
3216 if (!(s = ata_timing_find_mode(speed)))
3217 return -EINVAL;
3218
3219 memcpy(t, s, sizeof(*s));
3220
3221
3222
3223
3224
3225
3226 if (id[ATA_ID_FIELD_VALID] & 2) {
3227 memset(&p, 0, sizeof(p));
3228
3229 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3230 if (speed <= XFER_PIO_2)
3231 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3232 else if ((speed <= XFER_PIO_4) ||
3233 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3234 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3235 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3236 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3237
3238 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3239 }
3240
3241
3242
3243
3244
3245 ata_timing_quantize(t, t, T, UT);
3246
3247
3248
3249
3250
3251
3252
3253 if (speed > XFER_PIO_6) {
3254 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3255 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3256 }
3257
3258
3259
3260
3261
3262 if (t->act8b + t->rec8b < t->cyc8b) {
3263 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3264 t->rec8b = t->cyc8b - t->act8b;
3265 }
3266
3267 if (t->active + t->recover < t->cycle) {
3268 t->active += (t->cycle - (t->active + t->recover)) / 2;
3269 t->recover = t->cycle - t->active;
3270 }
3271
3272
3273
3274
3275 if (t->active + t->recover > t->cycle)
3276 t->cycle = t->active + t->recover;
3277
3278 return 0;
3279}
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3298{
3299 u8 base_mode = 0xff, last_mode = 0xff;
3300 const struct ata_xfer_ent *ent;
3301 const struct ata_timing *t;
3302
3303 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3304 if (ent->shift == xfer_shift)
3305 base_mode = ent->base;
3306
3307 for (t = ata_timing_find_mode(base_mode);
3308 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3309 unsigned short this_cycle;
3310
3311 switch (xfer_shift) {
3312 case ATA_SHIFT_PIO:
3313 case ATA_SHIFT_MWDMA:
3314 this_cycle = t->cycle;
3315 break;
3316 case ATA_SHIFT_UDMA:
3317 this_cycle = t->udma;
3318 break;
3319 default:
3320 return 0xff;
3321 }
3322
3323 if (cycle > this_cycle)
3324 break;
3325
3326 last_mode = t->mode;
3327 }
3328
3329 return last_mode;
3330}
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3348{
3349 char buf[32];
3350 unsigned long orig_mask, xfer_mask;
3351 unsigned long pio_mask, mwdma_mask, udma_mask;
3352 int quiet, highbit;
3353
3354 quiet = !!(sel & ATA_DNXFER_QUIET);
3355 sel &= ~ATA_DNXFER_QUIET;
3356
3357 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3358 dev->mwdma_mask,
3359 dev->udma_mask);
3360 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3361
3362 switch (sel) {
3363 case ATA_DNXFER_PIO:
3364 highbit = fls(pio_mask) - 1;
3365 pio_mask &= ~(1 << highbit);
3366 break;
3367
3368 case ATA_DNXFER_DMA:
3369 if (udma_mask) {
3370 highbit = fls(udma_mask) - 1;
3371 udma_mask &= ~(1 << highbit);
3372 if (!udma_mask)
3373 return -ENOENT;
3374 } else if (mwdma_mask) {
3375 highbit = fls(mwdma_mask) - 1;
3376 mwdma_mask &= ~(1 << highbit);
3377 if (!mwdma_mask)
3378 return -ENOENT;
3379 }
3380 break;
3381
3382 case ATA_DNXFER_40C:
3383 udma_mask &= ATA_UDMA_MASK_40C;
3384 break;
3385
3386 case ATA_DNXFER_FORCE_PIO0:
3387 pio_mask &= 1;
3388 case ATA_DNXFER_FORCE_PIO:
3389 mwdma_mask = 0;
3390 udma_mask = 0;
3391 break;
3392
3393 default:
3394 BUG();
3395 }
3396
3397 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3398
3399 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3400 return -ENOENT;
3401
3402 if (!quiet) {
3403 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3404 snprintf(buf, sizeof(buf), "%s:%s",
3405 ata_mode_string(xfer_mask),
3406 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3407 else
3408 snprintf(buf, sizeof(buf), "%s",
3409 ata_mode_string(xfer_mask));
3410
3411 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3412 }
3413
3414 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3415 &dev->udma_mask);
3416
3417 return 0;
3418}
3419
3420static int ata_dev_set_mode(struct ata_device *dev)
3421{
3422 struct ata_port *ap = dev->link->ap;
3423 struct ata_eh_context *ehc = &dev->link->eh_context;
3424 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3425 const char *dev_err_whine = "";
3426 int ign_dev_err = 0;
3427 unsigned int err_mask = 0;
3428 int rc;
3429
3430 dev->flags &= ~ATA_DFLAG_PIO;
3431 if (dev->xfer_shift == ATA_SHIFT_PIO)
3432 dev->flags |= ATA_DFLAG_PIO;
3433
3434 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3435 dev_err_whine = " (SET_XFERMODE skipped)";
3436 else {
3437 if (nosetxfer)
3438 ata_dev_warn(dev,
3439 "NOSETXFER but PATA detected - can't "
3440 "skip SETXFER, might malfunction\n");
3441 err_mask = ata_dev_set_xfermode(dev);
3442 }
3443
3444 if (err_mask & ~AC_ERR_DEV)
3445 goto fail;
3446
3447
3448 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3449 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3450 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3451 if (rc)
3452 return rc;
3453
3454 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3455
3456 if (ata_id_is_cfa(dev->id))
3457 ign_dev_err = 1;
3458
3459
3460 if (ata_id_major_version(dev->id) == 0 &&
3461 dev->pio_mode <= XFER_PIO_2)
3462 ign_dev_err = 1;
3463
3464
3465
3466 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3467 ign_dev_err = 1;
3468 }
3469
3470
3471 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3472 dev->dma_mode == XFER_MW_DMA_0 &&
3473 (dev->id[63] >> 8) & 1)
3474 ign_dev_err = 1;
3475
3476
3477 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3478 ign_dev_err = 1;
3479
3480 if (err_mask & AC_ERR_DEV) {
3481 if (!ign_dev_err)
3482 goto fail;
3483 else
3484 dev_err_whine = " (device error ignored)";
3485 }
3486
3487 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3488 dev->xfer_shift, (int)dev->xfer_mode);
3489
3490 ata_dev_info(dev, "configured for %s%s\n",
3491 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3492 dev_err_whine);
3493
3494 return 0;
3495
3496 fail:
3497 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3498 return -EIO;
3499}
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3519{
3520 struct ata_port *ap = link->ap;
3521 struct ata_device *dev;
3522 int rc = 0, used_dma = 0, found = 0;
3523
3524
3525 ata_for_each_dev(dev, link, ENABLED) {
3526 unsigned long pio_mask, dma_mask;
3527 unsigned int mode_mask;
3528
3529 mode_mask = ATA_DMA_MASK_ATA;
3530 if (dev->class == ATA_DEV_ATAPI)
3531 mode_mask = ATA_DMA_MASK_ATAPI;
3532 else if (ata_id_is_cfa(dev->id))
3533 mode_mask = ATA_DMA_MASK_CFA;
3534
3535 ata_dev_xfermask(dev);
3536 ata_force_xfermask(dev);
3537
3538 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3539
3540 if (libata_dma_mask & mode_mask)
3541 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3542 dev->udma_mask);
3543 else
3544 dma_mask = 0;
3545
3546 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3547 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3548
3549 found = 1;
3550 if (ata_dma_enabled(dev))
3551 used_dma = 1;
3552 }
3553 if (!found)
3554 goto out;
3555
3556
3557 ata_for_each_dev(dev, link, ENABLED) {
3558 if (dev->pio_mode == 0xff) {
3559 ata_dev_warn(dev, "no PIO support\n");
3560 rc = -EINVAL;
3561 goto out;
3562 }
3563
3564 dev->xfer_mode = dev->pio_mode;
3565 dev->xfer_shift = ATA_SHIFT_PIO;
3566 if (ap->ops->set_piomode)
3567 ap->ops->set_piomode(ap, dev);
3568 }
3569
3570
3571 ata_for_each_dev(dev, link, ENABLED) {
3572 if (!ata_dma_enabled(dev))
3573 continue;
3574
3575 dev->xfer_mode = dev->dma_mode;
3576 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3577 if (ap->ops->set_dmamode)
3578 ap->ops->set_dmamode(ap, dev);
3579 }
3580
3581
3582 ata_for_each_dev(dev, link, ENABLED) {
3583 rc = ata_dev_set_mode(dev);
3584 if (rc)
3585 goto out;
3586 }
3587
3588
3589
3590
3591 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3592 ap->host->simplex_claimed = ap;
3593
3594 out:
3595 if (rc)
3596 *r_failed_dev = dev;
3597 return rc;
3598}
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3621 int (*check_ready)(struct ata_link *link))
3622{
3623 unsigned long start = jiffies;
3624 unsigned long nodev_deadline;
3625 int warned = 0;
3626
3627
3628 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3629 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3630 else
3631 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3632
3633
3634
3635
3636
3637 WARN_ON(link == link->ap->slave_link);
3638
3639 if (time_after(nodev_deadline, deadline))
3640 nodev_deadline = deadline;
3641
3642 while (1) {
3643 unsigned long now = jiffies;
3644 int ready, tmp;
3645
3646 ready = tmp = check_ready(link);
3647 if (ready > 0)
3648 return 0;
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661 if (ready == -ENODEV) {
3662 if (ata_link_online(link))
3663 ready = 0;
3664 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3665 !ata_link_offline(link) &&
3666 time_before(now, nodev_deadline))
3667 ready = 0;
3668 }
3669
3670 if (ready)
3671 return ready;
3672 if (time_after(now, deadline))
3673 return -EBUSY;
3674
3675 if (!warned && time_after(now, start + 5 * HZ) &&
3676 (deadline - now > 3 * HZ)) {
3677 ata_link_warn(link,
3678 "link is slow to respond, please be patient "
3679 "(ready=%d)\n", tmp);
3680 warned = 1;
3681 }
3682
3683 ata_msleep(link->ap, 50);
3684 }
3685}
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3702 int (*check_ready)(struct ata_link *link))
3703{
3704 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3705
3706 return ata_wait_ready(link, deadline, check_ready);
3707}
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3732 unsigned long deadline)
3733{
3734 unsigned long interval = params[0];
3735 unsigned long duration = params[1];
3736 unsigned long last_jiffies, t;
3737 u32 last, cur;
3738 int rc;
3739
3740 t = ata_deadline(jiffies, params[2]);
3741 if (time_before(t, deadline))
3742 deadline = t;
3743
3744 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3745 return rc;
3746 cur &= 0xf;
3747
3748 last = cur;
3749 last_jiffies = jiffies;
3750
3751 while (1) {
3752 ata_msleep(link->ap, interval);
3753 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3754 return rc;
3755 cur &= 0xf;
3756
3757
3758 if (cur == last) {
3759 if (cur == 1 && time_before(jiffies, deadline))
3760 continue;
3761 if (time_after(jiffies,
3762 ata_deadline(last_jiffies, duration)))
3763 return 0;
3764 continue;
3765 }
3766
3767
3768 last = cur;
3769 last_jiffies = jiffies;
3770
3771
3772
3773
3774 if (time_after(jiffies, deadline))
3775 return -EPIPE;
3776 }
3777}
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793int sata_link_resume(struct ata_link *link, const unsigned long *params,
3794 unsigned long deadline)
3795{
3796 int tries = ATA_LINK_RESUME_TRIES;
3797 u32 scontrol, serror;
3798 int rc;
3799
3800 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3801 return rc;
3802
3803
3804
3805
3806
3807
3808 do {
3809 scontrol = (scontrol & 0x0f0) | 0x300;
3810 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3811 return rc;
3812
3813
3814
3815
3816
3817 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3818 ata_msleep(link->ap, 200);
3819
3820
3821 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3822 return rc;
3823 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3824
3825 if ((scontrol & 0xf0f) != 0x300) {
3826 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3827 scontrol);
3828 return 0;
3829 }
3830
3831 if (tries < ATA_LINK_RESUME_TRIES)
3832 ata_link_warn(link, "link resume succeeded after %d retries\n",
3833 ATA_LINK_RESUME_TRIES - tries);
3834
3835 if ((rc = sata_link_debounce(link, params, deadline)))
3836 return rc;
3837
3838
3839 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3840 rc = sata_scr_write(link, SCR_ERROR, serror);
3841
3842 return rc != -EINVAL ? rc : 0;
3843}
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3864 bool spm_wakeup)
3865{
3866 struct ata_eh_context *ehc = &link->eh_context;
3867 bool woken_up = false;
3868 u32 scontrol;
3869 int rc;
3870
3871 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3872 if (rc)
3873 return rc;
3874
3875 switch (policy) {
3876 case ATA_LPM_MAX_POWER:
3877
3878 scontrol |= (0x7 << 8);
3879
3880 if (spm_wakeup) {
3881 scontrol |= (0x4 << 12);
3882 woken_up = true;
3883 }
3884 break;
3885 case ATA_LPM_MED_POWER:
3886
3887 scontrol &= ~(0x1 << 8);
3888 scontrol |= (0x6 << 8);
3889 break;
3890 case ATA_LPM_MIN_POWER:
3891 if (ata_link_nr_enabled(link) > 0)
3892
3893 scontrol &= ~(0x7 << 8);
3894 else {
3895
3896 scontrol &= ~0xf;
3897 scontrol |= (0x1 << 2);
3898 }
3899 break;
3900 default:
3901 WARN_ON(1);
3902 }
3903
3904 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3905 if (rc)
3906 return rc;
3907
3908
3909 if (woken_up)
3910 msleep(10);
3911
3912
3913 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3914 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3915}
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3935{
3936 struct ata_port *ap = link->ap;
3937 struct ata_eh_context *ehc = &link->eh_context;
3938 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3939 int rc;
3940
3941
3942 if (ehc->i.action & ATA_EH_HARDRESET)
3943 return 0;
3944
3945
3946 if (ap->flags & ATA_FLAG_SATA) {
3947 rc = sata_link_resume(link, timing, deadline);
3948
3949 if (rc && rc != -EOPNOTSUPP)
3950 ata_link_warn(link,
3951 "failed to resume link for reset (errno=%d)\n",
3952 rc);
3953 }
3954
3955
3956 if (ata_phys_link_offline(link))
3957 ehc->i.action &= ~ATA_EH_SOFTRESET;
3958
3959 return 0;
3960}
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3987 unsigned long deadline,
3988 bool *online, int (*check_ready)(struct ata_link *))
3989{
3990 u32 scontrol;
3991 int rc;
3992
3993 DPRINTK("ENTER\n");
3994
3995 if (online)
3996 *online = false;
3997
3998 if (sata_set_spd_needed(link)) {
3999
4000
4001
4002
4003
4004 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4005 goto out;
4006
4007 scontrol = (scontrol & 0x0f0) | 0x304;
4008
4009 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
4010 goto out;
4011
4012 sata_set_spd(link);
4013 }
4014
4015
4016 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4017 goto out;
4018
4019 scontrol = (scontrol & 0x0f0) | 0x301;
4020
4021 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
4022 goto out;
4023
4024
4025
4026
4027 ata_msleep(link->ap, 1);
4028
4029
4030 rc = sata_link_resume(link, timing, deadline);
4031 if (rc)
4032 goto out;
4033
4034 if (ata_phys_link_offline(link))
4035 goto out;
4036
4037
4038 if (online)
4039 *online = true;
4040
4041 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
4042
4043
4044
4045
4046
4047 if (check_ready) {
4048 unsigned long pmp_deadline;
4049
4050 pmp_deadline = ata_deadline(jiffies,
4051 ATA_TMOUT_PMP_SRST_WAIT);
4052 if (time_after(pmp_deadline, deadline))
4053 pmp_deadline = deadline;
4054 ata_wait_ready(link, pmp_deadline, check_ready);
4055 }
4056 rc = -EAGAIN;
4057 goto out;
4058 }
4059
4060 rc = 0;
4061 if (check_ready)
4062 rc = ata_wait_ready(link, deadline, check_ready);
4063 out:
4064 if (rc && rc != -EAGAIN) {
4065
4066 if (online)
4067 *online = false;
4068 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
4069 }
4070 DPRINTK("EXIT, rc=%d\n", rc);
4071 return rc;
4072}
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4089 unsigned long deadline)
4090{
4091 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4092 bool online;
4093 int rc;
4094
4095
4096 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
4097 return online ? -EAGAIN : rc;
4098}
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4113{
4114 u32 serror;
4115
4116 DPRINTK("ENTER\n");
4117
4118
4119 if (!sata_scr_read(link, SCR_ERROR, &serror))
4120 sata_scr_write(link, SCR_ERROR, serror);
4121
4122
4123 sata_print_link_status(link);
4124
4125 DPRINTK("EXIT\n");
4126}
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4145 const u16 *new_id)
4146{
4147 const u16 *old_id = dev->id;
4148 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4149 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4150
4151 if (dev->class != new_class) {
4152 ata_dev_info(dev, "class mismatch %d != %d\n",
4153 dev->class, new_class);
4154 return 0;
4155 }
4156
4157 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4158 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4159 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4160 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4161
4162 if (strcmp(model[0], model[1])) {
4163 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
4164 model[0], model[1]);
4165 return 0;
4166 }
4167
4168 if (strcmp(serial[0], serial[1])) {
4169 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
4170 serial[0], serial[1]);
4171 return 0;
4172 }
4173
4174 return 1;
4175}
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4192{
4193 unsigned int class = dev->class;
4194 u16 *id = (void *)dev->link->ap->sector_buf;
4195 int rc;
4196
4197
4198 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4199 if (rc)
4200 return rc;
4201
4202
4203 if (!ata_dev_same_device(dev, class, id))
4204 return -ENODEV;
4205
4206 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4207 return 0;
4208}
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4226 unsigned int readid_flags)
4227{
4228 u64 n_sectors = dev->n_sectors;
4229 u64 n_native_sectors = dev->n_native_sectors;
4230 int rc;
4231
4232 if (!ata_dev_enabled(dev))
4233 return -ENODEV;
4234
4235
4236 if (ata_class_enabled(new_class) &&
4237 new_class != ATA_DEV_ATA &&
4238 new_class != ATA_DEV_ATAPI &&
4239 new_class != ATA_DEV_ZAC &&
4240 new_class != ATA_DEV_SEMB) {
4241 ata_dev_info(dev, "class mismatch %u != %u\n",
4242 dev->class, new_class);
4243 rc = -ENODEV;
4244 goto fail;
4245 }
4246
4247
4248 rc = ata_dev_reread_id(dev, readid_flags);
4249 if (rc)
4250 goto fail;
4251
4252
4253 rc = ata_dev_configure(dev);
4254 if (rc)
4255 goto fail;
4256
4257
4258 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4259 dev->n_sectors == n_sectors)
4260 return 0;
4261
4262
4263 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4264 (unsigned long long)n_sectors,
4265 (unsigned long long)dev->n_sectors);
4266
4267
4268
4269
4270
4271
4272 if (dev->n_native_sectors == n_native_sectors &&
4273 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4274 ata_dev_warn(dev,
4275 "new n_sectors matches native, probably "
4276 "late HPA unlock, n_sectors updated\n");
4277
4278 return 0;
4279 }
4280
4281
4282
4283
4284
4285
4286
4287 if (dev->n_native_sectors == n_native_sectors &&
4288 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4289 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4290 ata_dev_warn(dev,
4291 "old n_sectors matches native, probably "
4292 "late HPA lock, will try to unlock HPA\n");
4293
4294 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4295 rc = -EIO;
4296 } else
4297 rc = -ENODEV;
4298
4299
4300 dev->n_native_sectors = n_native_sectors;
4301 dev->n_sectors = n_sectors;
4302 fail:
4303 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4304 return rc;
4305}
4306
4307struct ata_blacklist_entry {
4308 const char *model_num;
4309 const char *model_rev;
4310 unsigned long horkage;
4311};
4312
4313static const struct ata_blacklist_entry ata_device_blacklist [] = {
4314
4315 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4316 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4317 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4318 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4319 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4320 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4321 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4322 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4323 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4324 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4325 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4326 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4327 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4328 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4329 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4330 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4331 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4332 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4333 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4334 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4335 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4336 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4337 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4338 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4339 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4340 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4341 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4342 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4343 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4344 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
4345
4346 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4347
4348
4349 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4350 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4351 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4352 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4353
4354
4355
4356
4357
4358 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4359
4360
4361
4362
4363
4364 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4365
4366
4367
4368
4369
4370 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4371 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4372
4373 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4374
4375 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4376 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4377 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4378 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4379 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4380
4381
4382 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4383 ATA_HORKAGE_FIRMWARE_WARN },
4384
4385 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4386 ATA_HORKAGE_FIRMWARE_WARN },
4387
4388 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4389 ATA_HORKAGE_FIRMWARE_WARN },
4390
4391 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4392 ATA_HORKAGE_FIRMWARE_WARN },
4393
4394
4395 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4396 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4397 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
4398
4399
4400
4401 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4402 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4403 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4404
4405
4406 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4407
4408
4409 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4410 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4411 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4412 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4413
4414
4415 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4416
4417
4418 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4419 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4420 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4421
4422
4423 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4424
4425 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4426
4427
4428 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4429 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4430
4431
4432 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4433 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4434
4435
4436
4437
4438
4439 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4440 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4441 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4442 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4443 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4444
4445
4446 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4447 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4448 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4449 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4450 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4451 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4452 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4453 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4454 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4455 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4456 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4457 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4458 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4459 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4460
4461
4462 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480 { "INTEL*SSDSC2MH*", NULL, 0, },
4481
4482 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4483 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4484 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4485 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4486 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4487 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4488 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4500 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4501 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4502 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4503 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4504 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4505 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4506
4507
4508 { }
4509};
4510
4511static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4512{
4513 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4514 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4515 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4516
4517 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4518 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4519
4520 while (ad->model_num) {
4521 if (glob_match(ad->model_num, model_num)) {
4522 if (ad->model_rev == NULL)
4523 return ad->horkage;
4524 if (glob_match(ad->model_rev, model_rev))
4525 return ad->horkage;
4526 }
4527 ad++;
4528 }
4529 return 0;
4530}
4531
4532static int ata_dma_blacklisted(const struct ata_device *dev)
4533{
4534
4535
4536
4537
4538 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4539 (dev->flags & ATA_DFLAG_CDB_INTR))
4540 return 1;
4541 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4542}
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552static int ata_is_40wire(struct ata_device *dev)
4553{
4554 if (dev->horkage & ATA_HORKAGE_IVB)
4555 return ata_drive_40wire_relaxed(dev->id);
4556 return ata_drive_40wire(dev->id);
4557}
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572static int cable_is_40wire(struct ata_port *ap)
4573{
4574 struct ata_link *link;
4575 struct ata_device *dev;
4576
4577
4578 if (ap->cbl == ATA_CBL_PATA40)
4579 return 1;
4580
4581
4582 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4583 return 0;
4584
4585
4586
4587
4588
4589 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4590 return 0;
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601 ata_for_each_link(link, ap, EDGE) {
4602 ata_for_each_dev(dev, link, ENABLED) {
4603 if (!ata_is_40wire(dev))
4604 return 0;
4605 }
4606 }
4607 return 1;
4608}
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622static void ata_dev_xfermask(struct ata_device *dev)
4623{
4624 struct ata_link *link = dev->link;
4625 struct ata_port *ap = link->ap;
4626 struct ata_host *host = ap->host;
4627 unsigned long xfer_mask;
4628
4629
4630 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4631 ap->mwdma_mask, ap->udma_mask);
4632
4633
4634 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4635 dev->mwdma_mask, dev->udma_mask);
4636 xfer_mask &= ata_id_xfermask(dev->id);
4637
4638
4639
4640
4641
4642 if (ata_dev_pair(dev)) {
4643
4644 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4645
4646 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4647 }
4648
4649 if (ata_dma_blacklisted(dev)) {
4650 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4651 ata_dev_warn(dev,
4652 "device is on DMA blacklist, disabling DMA\n");
4653 }
4654
4655 if ((host->flags & ATA_HOST_SIMPLEX) &&
4656 host->simplex_claimed && host->simplex_claimed != ap) {
4657 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4658 ata_dev_warn(dev,
4659 "simplex DMA is claimed by other device, disabling DMA\n");
4660 }
4661
4662 if (ap->flags & ATA_FLAG_NO_IORDY)
4663 xfer_mask &= ata_pio_mask_no_iordy(dev);
4664
4665 if (ap->ops->mode_filter)
4666 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4677
4678 if (cable_is_40wire(ap)) {
4679 ata_dev_warn(dev,
4680 "limited to UDMA/33 due to 40-wire cable\n");
4681 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4682 }
4683
4684 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4685 &dev->mwdma_mask, &dev->udma_mask);
4686}
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4703{
4704 struct ata_taskfile tf;
4705 unsigned int err_mask;
4706
4707
4708 DPRINTK("set features - xfer mode\n");
4709
4710
4711
4712
4713 ata_tf_init(dev, &tf);
4714 tf.command = ATA_CMD_SET_FEATURES;
4715 tf.feature = SETFEATURES_XFER;
4716 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4717 tf.protocol = ATA_PROT_NODATA;
4718
4719 if (ata_pio_need_iordy(dev))
4720 tf.nsect = dev->xfer_mode;
4721
4722 else if (ata_id_has_iordy(dev->id))
4723 tf.nsect = 0x01;
4724 else
4725 return 0;
4726
4727
4728 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4729
4730 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4731 return err_mask;
4732}
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4750{
4751 struct ata_taskfile tf;
4752 unsigned int err_mask;
4753 unsigned long timeout = 0;
4754
4755
4756 DPRINTK("set features - SATA features\n");
4757
4758 ata_tf_init(dev, &tf);
4759 tf.command = ATA_CMD_SET_FEATURES;
4760 tf.feature = enable;
4761 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4762 tf.protocol = ATA_PROT_NODATA;
4763 tf.nsect = feature;
4764
4765 if (enable == SETFEATURES_SPINUP)
4766 timeout = ata_probe_timeout ?
4767 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4768 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4769
4770 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4771 return err_mask;
4772}
4773EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787static unsigned int ata_dev_init_params(struct ata_device *dev,
4788 u16 heads, u16 sectors)
4789{
4790 struct ata_taskfile tf;
4791 unsigned int err_mask;
4792
4793
4794 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4795 return AC_ERR_INVALID;
4796
4797
4798 DPRINTK("init dev params \n");
4799
4800 ata_tf_init(dev, &tf);
4801 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4802 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4803 tf.protocol = ATA_PROT_NODATA;
4804 tf.nsect = sectors;
4805 tf.device |= (heads - 1) & 0x0f;
4806
4807 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4808
4809
4810
4811 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4812 err_mask = 0;
4813
4814 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4815 return err_mask;
4816}
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827void ata_sg_clean(struct ata_queued_cmd *qc)
4828{
4829 struct ata_port *ap = qc->ap;
4830 struct scatterlist *sg = qc->sg;
4831 int dir = qc->dma_dir;
4832
4833 WARN_ON_ONCE(sg == NULL);
4834
4835 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4836
4837 if (qc->n_elem)
4838 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4839
4840 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4841 qc->sg = NULL;
4842}
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858int atapi_check_dma(struct ata_queued_cmd *qc)
4859{
4860 struct ata_port *ap = qc->ap;
4861
4862
4863
4864
4865 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4866 unlikely(qc->nbytes & 15))
4867 return 1;
4868
4869 if (ap->ops->check_atapi_dma)
4870 return ap->ops->check_atapi_dma(qc);
4871
4872 return 0;
4873}
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890int ata_std_qc_defer(struct ata_queued_cmd *qc)
4891{
4892 struct ata_link *link = qc->dev->link;
4893
4894 if (ata_is_ncq(qc->tf.protocol)) {
4895 if (!ata_tag_valid(link->active_tag))
4896 return 0;
4897 } else {
4898 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4899 return 0;
4900 }
4901
4902 return ATA_DEFER_LINK;
4903}
4904
4905void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4921 unsigned int n_elem)
4922{
4923 qc->sg = sg;
4924 qc->n_elem = n_elem;
4925 qc->cursg = qc->sg;
4926}
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941static int ata_sg_setup(struct ata_queued_cmd *qc)
4942{
4943 struct ata_port *ap = qc->ap;
4944 unsigned int n_elem;
4945
4946 VPRINTK("ENTER, ata%u\n", ap->print_id);
4947
4948 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4949 if (n_elem < 1)
4950 return -1;
4951
4952 DPRINTK("%d sg elements mapped\n", n_elem);
4953 qc->orig_n_elem = qc->n_elem;
4954 qc->n_elem = n_elem;
4955 qc->flags |= ATA_QCFLAG_DMAMAP;
4956
4957 return 0;
4958}
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972void swap_buf_le16(u16 *buf, unsigned int buf_words)
4973{
4974#ifdef __BIG_ENDIAN
4975 unsigned int i;
4976
4977 for (i = 0; i < buf_words; i++)
4978 buf[i] = le16_to_cpu(buf[i]);
4979#endif
4980}
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4992{
4993 struct ata_port *ap = dev->link->ap;
4994 struct ata_queued_cmd *qc;
4995
4996
4997 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4998 return NULL;
4999
5000
5001 if (ap->flags & ATA_FLAG_SAS_HOST) {
5002 tag = ata_sas_allocate_tag(ap);
5003 if (tag < 0)
5004 return NULL;
5005 }
5006
5007 qc = __ata_qc_from_tag(ap, tag);
5008 qc->tag = tag;
5009 qc->scsicmd = NULL;
5010 qc->ap = ap;
5011 qc->dev = dev;
5012
5013 ata_qc_reinit(qc);
5014
5015 return qc;
5016}
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028void ata_qc_free(struct ata_queued_cmd *qc)
5029{
5030 struct ata_port *ap;
5031 unsigned int tag;
5032
5033 WARN_ON_ONCE(qc == NULL);
5034 ap = qc->ap;
5035
5036 qc->flags = 0;
5037 tag = qc->tag;
5038 if (likely(ata_tag_valid(tag))) {
5039 qc->tag = ATA_TAG_POISON;
5040 if (ap->flags & ATA_FLAG_SAS_HOST)
5041 ata_sas_free_tag(tag, ap);
5042 }
5043}
5044
5045void __ata_qc_complete(struct ata_queued_cmd *qc)
5046{
5047 struct ata_port *ap;
5048 struct ata_link *link;
5049
5050 WARN_ON_ONCE(qc == NULL);
5051 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
5052 ap = qc->ap;
5053 link = qc->dev->link;
5054
5055 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5056 ata_sg_clean(qc);
5057
5058
5059 if (ata_is_ncq(qc->tf.protocol)) {
5060 link->sactive &= ~(1 << qc->tag);
5061 if (!link->sactive)
5062 ap->nr_active_links--;
5063 } else {
5064 link->active_tag = ATA_TAG_POISON;
5065 ap->nr_active_links--;
5066 }
5067
5068
5069 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5070 ap->excl_link == link))
5071 ap->excl_link = NULL;
5072
5073
5074
5075
5076
5077 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5078 ap->qc_active &= ~(1 << qc->tag);
5079
5080
5081 qc->complete_fn(qc);
5082}
5083
5084static void fill_result_tf(struct ata_queued_cmd *qc)
5085{
5086 struct ata_port *ap = qc->ap;
5087
5088 qc->result_tf.flags = qc->tf.flags;
5089 ap->ops->qc_fill_rtf(qc);
5090}
5091
5092static void ata_verify_xfer(struct ata_queued_cmd *qc)
5093{
5094 struct ata_device *dev = qc->dev;
5095
5096 if (!ata_is_data(qc->tf.protocol))
5097 return;
5098
5099 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5100 return;
5101
5102 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5103}
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119
5120void ata_qc_complete(struct ata_queued_cmd *qc)
5121{
5122 struct ata_port *ap = qc->ap;
5123
5124
5125 ledtrig_disk_activity();
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140 if (ap->ops->error_handler) {
5141 struct ata_device *dev = qc->dev;
5142 struct ata_eh_info *ehi = &dev->link->eh_info;
5143
5144 if (unlikely(qc->err_mask))
5145 qc->flags |= ATA_QCFLAG_FAILED;
5146
5147
5148
5149
5150
5151 if (unlikely(ata_tag_internal(qc->tag))) {
5152 fill_result_tf(qc);
5153 trace_ata_qc_complete_internal(qc);
5154 __ata_qc_complete(qc);
5155 return;
5156 }
5157
5158
5159
5160
5161
5162 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5163 fill_result_tf(qc);
5164 trace_ata_qc_complete_failed(qc);
5165 ata_qc_schedule_eh(qc);
5166 return;
5167 }
5168
5169 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5170
5171
5172 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5173 fill_result_tf(qc);
5174
5175 trace_ata_qc_complete_done(qc);
5176
5177
5178
5179 switch (qc->tf.command) {
5180 case ATA_CMD_SET_FEATURES:
5181 if (qc->tf.feature != SETFEATURES_WC_ON &&
5182 qc->tf.feature != SETFEATURES_WC_OFF &&
5183 qc->tf.feature != SETFEATURES_RA_ON &&
5184 qc->tf.feature != SETFEATURES_RA_OFF)
5185 break;
5186
5187 case ATA_CMD_INIT_DEV_PARAMS:
5188 case ATA_CMD_SET_MULTI:
5189
5190 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5191 ata_port_schedule_eh(ap);
5192 break;
5193
5194 case ATA_CMD_SLEEP:
5195 dev->flags |= ATA_DFLAG_SLEEPING;
5196 break;
5197 }
5198
5199 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5200 ata_verify_xfer(qc);
5201
5202 __ata_qc_complete(qc);
5203 } else {
5204 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5205 return;
5206
5207
5208 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5209 fill_result_tf(qc);
5210
5211 __ata_qc_complete(qc);
5212 }
5213}
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5236{
5237 int nr_done = 0;
5238 u32 done_mask;
5239
5240 done_mask = ap->qc_active ^ qc_active;
5241
5242 if (unlikely(done_mask & qc_active)) {
5243 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5244 ap->qc_active, qc_active);
5245 return -EINVAL;
5246 }
5247
5248 while (done_mask) {
5249 struct ata_queued_cmd *qc;
5250 unsigned int tag = __ffs(done_mask);
5251
5252 qc = ata_qc_from_tag(ap, tag);
5253 if (qc) {
5254 ata_qc_complete(qc);
5255 nr_done++;
5256 }
5257 done_mask &= ~(1 << tag);
5258 }
5259
5260 return nr_done;
5261}
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275void ata_qc_issue(struct ata_queued_cmd *qc)
5276{
5277 struct ata_port *ap = qc->ap;
5278 struct ata_link *link = qc->dev->link;
5279 u8 prot = qc->tf.protocol;
5280
5281
5282
5283
5284
5285 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5286
5287 if (ata_is_ncq(prot)) {
5288 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5289
5290 if (!link->sactive)
5291 ap->nr_active_links++;
5292 link->sactive |= 1 << qc->tag;
5293 } else {
5294 WARN_ON_ONCE(link->sactive);
5295
5296 ap->nr_active_links++;
5297 link->active_tag = qc->tag;
5298 }
5299
5300 qc->flags |= ATA_QCFLAG_ACTIVE;
5301 ap->qc_active |= 1 << qc->tag;
5302
5303
5304
5305
5306
5307 if (WARN_ON_ONCE(ata_is_data(prot) &&
5308 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5309 goto sys_err;
5310
5311 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5312 (ap->flags & ATA_FLAG_PIO_DMA)))
5313 if (ata_sg_setup(qc))
5314 goto sys_err;
5315
5316
5317 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5318 link->eh_info.action |= ATA_EH_RESET;
5319 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5320 ata_link_abort(link);
5321 return;
5322 }
5323
5324 ap->ops->qc_prep(qc);
5325 trace_ata_qc_issue(qc);
5326 qc->err_mask |= ap->ops->qc_issue(qc);
5327 if (unlikely(qc->err_mask))
5328 goto err;
5329 return;
5330
5331sys_err:
5332 qc->err_mask |= AC_ERR_SYSTEM;
5333err:
5334 ata_qc_complete(qc);
5335}
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349int sata_scr_valid(struct ata_link *link)
5350{
5351 struct ata_port *ap = link->ap;
5352
5353 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5354}
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367
5368
5369
5370
5371
5372int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5373{
5374 if (ata_is_host_link(link)) {
5375 if (sata_scr_valid(link))
5376 return link->ap->ops->scr_read(link, reg, val);
5377 return -EOPNOTSUPP;
5378 }
5379
5380 return sata_pmp_scr_read(link, reg, val);
5381}
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399int sata_scr_write(struct ata_link *link, int reg, u32 val)
5400{
5401 if (ata_is_host_link(link)) {
5402 if (sata_scr_valid(link))
5403 return link->ap->ops->scr_write(link, reg, val);
5404 return -EOPNOTSUPP;
5405 }
5406
5407 return sata_pmp_scr_write(link, reg, val);
5408}
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5426{
5427 if (ata_is_host_link(link)) {
5428 int rc;
5429
5430 if (sata_scr_valid(link)) {
5431 rc = link->ap->ops->scr_write(link, reg, val);
5432 if (rc == 0)
5433 rc = link->ap->ops->scr_read(link, reg, &val);
5434 return rc;
5435 }
5436 return -EOPNOTSUPP;
5437 }
5438
5439 return sata_pmp_scr_write(link, reg, val);
5440}
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456bool ata_phys_link_online(struct ata_link *link)
5457{
5458 u32 sstatus;
5459
5460 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5461 ata_sstatus_online(sstatus))
5462 return true;
5463 return false;
5464}
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480bool ata_phys_link_offline(struct ata_link *link)
5481{
5482 u32 sstatus;
5483
5484 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5485 !ata_sstatus_online(sstatus))
5486 return true;
5487 return false;
5488}
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506bool ata_link_online(struct ata_link *link)
5507{
5508 struct ata_link *slave = link->ap->slave_link;
5509
5510 WARN_ON(link == slave);
5511
5512 return ata_phys_link_online(link) ||
5513 (slave && ata_phys_link_online(slave));
5514}
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532bool ata_link_offline(struct ata_link *link)
5533{
5534 struct ata_link *slave = link->ap->slave_link;
5535
5536 WARN_ON(link == slave);
5537
5538 return ata_phys_link_offline(link) &&
5539 (!slave || ata_phys_link_offline(slave));
5540}
5541
5542#ifdef CONFIG_PM
5543static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5544 unsigned int action, unsigned int ehi_flags,
5545 bool async)
5546{
5547 struct ata_link *link;
5548 unsigned long flags;
5549
5550
5551
5552
5553 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5554 ata_port_wait_eh(ap);
5555 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5556 }
5557
5558
5559 spin_lock_irqsave(ap->lock, flags);
5560
5561 ap->pm_mesg = mesg;
5562 ap->pflags |= ATA_PFLAG_PM_PENDING;
5563 ata_for_each_link(link, ap, HOST_FIRST) {
5564 link->eh_info.action |= action;
5565 link->eh_info.flags |= ehi_flags;
5566 }
5567
5568 ata_port_schedule_eh(ap);
5569
5570 spin_unlock_irqrestore(ap->lock, flags);
5571
5572 if (!async) {
5573 ata_port_wait_eh(ap);
5574 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5575 }
5576}
5577
5578
5579
5580
5581
5582
5583
5584
5585static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5586 | ATA_EHI_NO_AUTOPSY
5587 | ATA_EHI_NO_RECOVERY;
5588
5589static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5590{
5591 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5592}
5593
5594static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5595{
5596 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5597}
5598
5599static int ata_port_pm_suspend(struct device *dev)
5600{
5601 struct ata_port *ap = to_ata_port(dev);
5602
5603 if (pm_runtime_suspended(dev))
5604 return 0;
5605
5606 ata_port_suspend(ap, PMSG_SUSPEND);
5607 return 0;
5608}
5609
5610static int ata_port_pm_freeze(struct device *dev)
5611{
5612 struct ata_port *ap = to_ata_port(dev);
5613
5614 if (pm_runtime_suspended(dev))
5615 return 0;
5616
5617 ata_port_suspend(ap, PMSG_FREEZE);
5618 return 0;
5619}
5620
5621static int ata_port_pm_poweroff(struct device *dev)
5622{
5623 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5624 return 0;
5625}
5626
5627static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5628 | ATA_EHI_QUIET;
5629
5630static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5631{
5632 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5633}
5634
5635static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5636{
5637 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5638}
5639
5640static int ata_port_pm_resume(struct device *dev)
5641{
5642 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5643 pm_runtime_disable(dev);
5644 pm_runtime_set_active(dev);
5645 pm_runtime_enable(dev);
5646 return 0;
5647}
5648
5649
5650
5651
5652
5653
5654
5655
5656
5657static int ata_port_runtime_idle(struct device *dev)
5658{
5659 struct ata_port *ap = to_ata_port(dev);
5660 struct ata_link *link;
5661 struct ata_device *adev;
5662
5663 ata_for_each_link(link, ap, HOST_FIRST) {
5664 ata_for_each_dev(adev, link, ENABLED)
5665 if (adev->class == ATA_DEV_ATAPI &&
5666 !zpodd_dev_enabled(adev))
5667 return -EBUSY;
5668 }
5669
5670 return 0;
5671}
5672
5673static int ata_port_runtime_suspend(struct device *dev)
5674{
5675 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5676 return 0;
5677}
5678
5679static int ata_port_runtime_resume(struct device *dev)
5680{
5681 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5682 return 0;
5683}
5684
5685static const struct dev_pm_ops ata_port_pm_ops = {
5686 .suspend = ata_port_pm_suspend,
5687 .resume = ata_port_pm_resume,
5688 .freeze = ata_port_pm_freeze,
5689 .thaw = ata_port_pm_resume,
5690 .poweroff = ata_port_pm_poweroff,
5691 .restore = ata_port_pm_resume,
5692
5693 .runtime_suspend = ata_port_runtime_suspend,
5694 .runtime_resume = ata_port_runtime_resume,
5695 .runtime_idle = ata_port_runtime_idle,
5696};
5697
5698
5699
5700
5701
5702
5703void ata_sas_port_suspend(struct ata_port *ap)
5704{
5705 ata_port_suspend_async(ap, PMSG_SUSPEND);
5706}
5707EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5708
5709void ata_sas_port_resume(struct ata_port *ap)
5710{
5711 ata_port_resume_async(ap, PMSG_RESUME);
5712}
5713EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5714
5715
5716
5717
5718
5719
5720
5721
5722int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5723{
5724 host->dev->power.power_state = mesg;
5725 return 0;
5726}
5727
5728
5729
5730
5731
5732
5733
5734void ata_host_resume(struct ata_host *host)
5735{
5736 host->dev->power.power_state = PMSG_ON;
5737}
5738#endif
5739
5740struct device_type ata_port_type = {
5741 .name = "ata_port",
5742#ifdef CONFIG_PM
5743 .pm = &ata_port_pm_ops,
5744#endif
5745};
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756void ata_dev_init(struct ata_device *dev)
5757{
5758 struct ata_link *link = ata_dev_phys_link(dev);
5759 struct ata_port *ap = link->ap;
5760 unsigned long flags;
5761
5762
5763 link->sata_spd_limit = link->hw_sata_spd_limit;
5764 link->sata_spd = 0;
5765
5766
5767
5768
5769
5770 spin_lock_irqsave(ap->lock, flags);
5771 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5772 dev->horkage = 0;
5773 spin_unlock_irqrestore(ap->lock, flags);
5774
5775 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5776 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5777 dev->pio_mask = UINT_MAX;
5778 dev->mwdma_mask = UINT_MAX;
5779 dev->udma_mask = UINT_MAX;
5780}
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5794{
5795 int i;
5796
5797
5798 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5799 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5800
5801 link->ap = ap;
5802 link->pmp = pmp;
5803 link->active_tag = ATA_TAG_POISON;
5804 link->hw_sata_spd_limit = UINT_MAX;
5805
5806
5807 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5808 struct ata_device *dev = &link->device[i];
5809
5810 dev->link = link;
5811 dev->devno = dev - link->device;
5812#ifdef CONFIG_ATA_ACPI
5813 dev->gtf_filter = ata_acpi_gtf_filter;
5814#endif
5815 ata_dev_init(dev);
5816 }
5817}
5818
5819
5820
5821
5822
5823
5824
5825
5826
5827
5828
5829
5830
5831
5832int sata_link_init_spd(struct ata_link *link)
5833{
5834 u8 spd;
5835 int rc;
5836
5837 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5838 if (rc)
5839 return rc;
5840
5841 spd = (link->saved_scontrol >> 4) & 0xf;
5842 if (spd)
5843 link->hw_sata_spd_limit &= (1 << spd) - 1;
5844
5845 ata_force_link_limits(link);
5846
5847 link->sata_spd_limit = link->hw_sata_spd_limit;
5848
5849 return 0;
5850}
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864struct ata_port *ata_port_alloc(struct ata_host *host)
5865{
5866 struct ata_port *ap;
5867
5868 DPRINTK("ENTER\n");
5869
5870 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5871 if (!ap)
5872 return NULL;
5873
5874 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5875 ap->lock = &host->lock;
5876 ap->print_id = -1;
5877 ap->local_port_no = -1;
5878 ap->host = host;
5879 ap->dev = host->dev;
5880
5881#if defined(ATA_VERBOSE_DEBUG)
5882
5883 ap->msg_enable = 0x00FF;
5884#elif defined(ATA_DEBUG)
5885 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5886#else
5887 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5888#endif
5889
5890 mutex_init(&ap->scsi_scan_mutex);
5891 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5892 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5893 INIT_LIST_HEAD(&ap->eh_done_q);
5894 init_waitqueue_head(&ap->eh_wait_q);
5895 init_completion(&ap->park_req_pending);
5896 init_timer_deferrable(&ap->fastdrain_timer);
5897 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5898 ap->fastdrain_timer.data = (unsigned long)ap;
5899
5900 ap->cbl = ATA_CBL_NONE;
5901
5902 ata_link_init(ap, &ap->link, 0);
5903
5904#ifdef ATA_IRQ_TRAP
5905 ap->stats.unhandled_irq = 1;
5906 ap->stats.idle_irq = 1;
5907#endif
5908 ata_sff_port_init(ap);
5909
5910 return ap;
5911}
5912
5913static void ata_host_release(struct device *gendev, void *res)
5914{
5915 struct ata_host *host = dev_get_drvdata(gendev);
5916 int i;
5917
5918 for (i = 0; i < host->n_ports; i++) {
5919 struct ata_port *ap = host->ports[i];
5920
5921 if (!ap)
5922 continue;
5923
5924 if (ap->scsi_host)
5925 scsi_host_put(ap->scsi_host);
5926
5927 kfree(ap->pmp_link);
5928 kfree(ap->slave_link);
5929 kfree(ap);
5930 host->ports[i] = NULL;
5931 }
5932
5933 dev_set_drvdata(gendev, NULL);
5934}
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949
5950
5951
5952
5953
5954
5955
5956struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5957{
5958 struct ata_host *host;
5959 size_t sz;
5960 int i;
5961
5962 DPRINTK("ENTER\n");
5963
5964 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5965 return NULL;
5966
5967
5968 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5969
5970 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5971 if (!host)
5972 goto err_out;
5973
5974 devres_add(dev, host);
5975 dev_set_drvdata(dev, host);
5976
5977 spin_lock_init(&host->lock);
5978 mutex_init(&host->eh_mutex);
5979 host->dev = dev;
5980 host->n_ports = max_ports;
5981
5982
5983 for (i = 0; i < max_ports; i++) {
5984 struct ata_port *ap;
5985
5986 ap = ata_port_alloc(host);
5987 if (!ap)
5988 goto err_out;
5989
5990 ap->port_no = i;
5991 host->ports[i] = ap;
5992 }
5993
5994 devres_remove_group(dev, NULL);
5995 return host;
5996
5997 err_out:
5998 devres_release_group(dev, NULL);
5999 return NULL;
6000}
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013
6014
6015
6016
6017
6018struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6019 const struct ata_port_info * const * ppi,
6020 int n_ports)
6021{
6022 const struct ata_port_info *pi;
6023 struct ata_host *host;
6024 int i, j;
6025
6026 host = ata_host_alloc(dev, n_ports);
6027 if (!host)
6028 return NULL;
6029
6030 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6031 struct ata_port *ap = host->ports[i];
6032
6033 if (ppi[j])
6034 pi = ppi[j++];
6035
6036 ap->pio_mask = pi->pio_mask;
6037 ap->mwdma_mask = pi->mwdma_mask;
6038 ap->udma_mask = pi->udma_mask;
6039 ap->flags |= pi->flags;
6040 ap->link.flags |= pi->link_flags;
6041 ap->ops = pi->port_ops;
6042
6043 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6044 host->ops = pi->port_ops;
6045 }
6046
6047 return host;
6048}
6049
6050
6051
6052
6053
6054
6055
6056
6057
6058
6059
6060
6061
6062
6063
6064
6065
6066
6067
6068
6069
6070
6071
6072
6073
6074
6075
6076
6077
6078
6079
6080
6081
6082
6083
6084
6085
6086
6087
6088
6089
6090
6091
6092
6093
6094
6095
6096int ata_slave_link_init(struct ata_port *ap)
6097{
6098 struct ata_link *link;
6099
6100 WARN_ON(ap->slave_link);
6101 WARN_ON(ap->flags & ATA_FLAG_PMP);
6102
6103 link = kzalloc(sizeof(*link), GFP_KERNEL);
6104 if (!link)
6105 return -ENOMEM;
6106
6107 ata_link_init(ap, link, 1);
6108 ap->slave_link = link;
6109 return 0;
6110}
6111
6112static void ata_host_stop(struct device *gendev, void *res)
6113{
6114 struct ata_host *host = dev_get_drvdata(gendev);
6115 int i;
6116
6117 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6118
6119 for (i = 0; i < host->n_ports; i++) {
6120 struct ata_port *ap = host->ports[i];
6121
6122 if (ap->ops->port_stop)
6123 ap->ops->port_stop(ap);
6124 }
6125
6126 if (host->ops->host_stop)
6127 host->ops->host_stop(host);
6128}
6129
6130
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150static void ata_finalize_port_ops(struct ata_port_operations *ops)
6151{
6152 static DEFINE_SPINLOCK(lock);
6153 const struct ata_port_operations *cur;
6154 void **begin = (void **)ops;
6155 void **end = (void **)&ops->inherits;
6156 void **pp;
6157
6158 if (!ops || !ops->inherits)
6159 return;
6160
6161 spin_lock(&lock);
6162
6163 for (cur = ops->inherits; cur; cur = cur->inherits) {
6164 void **inherit = (void **)cur;
6165
6166 for (pp = begin; pp < end; pp++, inherit++)
6167 if (!*pp)
6168 *pp = *inherit;
6169 }
6170
6171 for (pp = begin; pp < end; pp++)
6172 if (IS_ERR(*pp))
6173 *pp = NULL;
6174
6175 ops->inherits = NULL;
6176
6177 spin_unlock(&lock);
6178}
6179
6180
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196int ata_host_start(struct ata_host *host)
6197{
6198 int have_stop = 0;
6199 void *start_dr = NULL;
6200 int i, rc;
6201
6202 if (host->flags & ATA_HOST_STARTED)
6203 return 0;
6204
6205 ata_finalize_port_ops(host->ops);
6206
6207 for (i = 0; i < host->n_ports; i++) {
6208 struct ata_port *ap = host->ports[i];
6209
6210 ata_finalize_port_ops(ap->ops);
6211
6212 if (!host->ops && !ata_port_is_dummy(ap))
6213 host->ops = ap->ops;
6214
6215 if (ap->ops->port_stop)
6216 have_stop = 1;
6217 }
6218
6219 if (host->ops->host_stop)
6220 have_stop = 1;
6221
6222 if (have_stop) {
6223 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6224 if (!start_dr)
6225 return -ENOMEM;
6226 }
6227
6228 for (i = 0; i < host->n_ports; i++) {
6229 struct ata_port *ap = host->ports[i];
6230
6231 if (ap->ops->port_start) {
6232 rc = ap->ops->port_start(ap);
6233 if (rc) {
6234 if (rc != -ENODEV)
6235 dev_err(host->dev,
6236 "failed to start port %d (errno=%d)\n",
6237 i, rc);
6238 goto err_out;
6239 }
6240 }
6241 ata_eh_freeze_port(ap);
6242 }
6243
6244 if (start_dr)
6245 devres_add(host->dev, start_dr);
6246 host->flags |= ATA_HOST_STARTED;
6247 return 0;
6248
6249 err_out:
6250 while (--i >= 0) {
6251 struct ata_port *ap = host->ports[i];
6252
6253 if (ap->ops->port_stop)
6254 ap->ops->port_stop(ap);
6255 }
6256 devres_free(start_dr);
6257 return rc;
6258}
6259
6260
6261
6262
6263
6264
6265
6266
6267void ata_host_init(struct ata_host *host, struct device *dev,
6268 struct ata_port_operations *ops)
6269{
6270 spin_lock_init(&host->lock);
6271 mutex_init(&host->eh_mutex);
6272 host->n_tags = ATA_MAX_QUEUE - 1;
6273 host->dev = dev;
6274 host->ops = ops;
6275}
6276
6277void __ata_port_probe(struct ata_port *ap)
6278{
6279 struct ata_eh_info *ehi = &ap->link.eh_info;
6280 unsigned long flags;
6281
6282
6283 spin_lock_irqsave(ap->lock, flags);
6284
6285 ehi->probe_mask |= ATA_ALL_DEVICES;
6286 ehi->action |= ATA_EH_RESET;
6287 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6288
6289 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6290 ap->pflags |= ATA_PFLAG_LOADING;
6291 ata_port_schedule_eh(ap);
6292
6293 spin_unlock_irqrestore(ap->lock, flags);
6294}
6295
6296int ata_port_probe(struct ata_port *ap)
6297{
6298 int rc = 0;
6299
6300 if (ap->ops->error_handler) {
6301 __ata_port_probe(ap);
6302 ata_port_wait_eh(ap);
6303 } else {
6304 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6305 rc = ata_bus_probe(ap);
6306 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6307 }
6308 return rc;
6309}
6310
6311
6312static void async_port_probe(void *data, async_cookie_t cookie)
6313{
6314 struct ata_port *ap = data;
6315
6316
6317
6318
6319
6320
6321
6322
6323 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6324 async_synchronize_cookie(cookie);
6325
6326 (void)ata_port_probe(ap);
6327
6328
6329 async_synchronize_cookie(cookie);
6330
6331 ata_scsi_scan_host(ap, 1);
6332}
6333
6334
6335
6336
6337
6338
6339
6340
6341
6342
6343
6344
6345
6346
6347
6348
6349
6350int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6351{
6352 int i, rc;
6353
6354 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6355
6356
6357 if (!(host->flags & ATA_HOST_STARTED)) {
6358 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6359 WARN_ON(1);
6360 return -EINVAL;
6361 }
6362
6363
6364
6365
6366
6367 for (i = host->n_ports; host->ports[i]; i++)
6368 kfree(host->ports[i]);
6369
6370
6371 for (i = 0; i < host->n_ports; i++) {
6372 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6373 host->ports[i]->local_port_no = i + 1;
6374 }
6375
6376
6377 for (i = 0; i < host->n_ports; i++) {
6378 rc = ata_tport_add(host->dev,host->ports[i]);
6379 if (rc) {
6380 goto err_tadd;
6381 }
6382 }
6383
6384 rc = ata_scsi_add_hosts(host, sht);
6385 if (rc)
6386 goto err_tadd;
6387
6388
6389 for (i = 0; i < host->n_ports; i++) {
6390 struct ata_port *ap = host->ports[i];
6391 unsigned long xfer_mask;
6392
6393
6394 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6395 ap->cbl = ATA_CBL_SATA;
6396
6397
6398 sata_link_init_spd(&ap->link);
6399 if (ap->slave_link)
6400 sata_link_init_spd(ap->slave_link);
6401
6402
6403 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6404 ap->udma_mask);
6405
6406 if (!ata_port_is_dummy(ap)) {
6407 ata_port_info(ap, "%cATA max %s %s\n",
6408 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6409 ata_mode_string(xfer_mask),
6410 ap->link.eh_info.desc);
6411 ata_ehi_clear_desc(&ap->link.eh_info);
6412 } else
6413 ata_port_info(ap, "DUMMY\n");
6414 }
6415
6416
6417 for (i = 0; i < host->n_ports; i++) {
6418 struct ata_port *ap = host->ports[i];
6419 async_schedule(async_port_probe, ap);
6420 }
6421
6422 return 0;
6423
6424 err_tadd:
6425 while (--i >= 0) {
6426 ata_tport_delete(host->ports[i]);
6427 }
6428 return rc;
6429
6430}
6431
6432
6433
6434
6435
6436
6437
6438
6439
6440
6441
6442
6443
6444
6445
6446
6447
6448
6449
6450
6451
6452
6453
6454
6455int ata_host_activate(struct ata_host *host, int irq,
6456 irq_handler_t irq_handler, unsigned long irq_flags,
6457 struct scsi_host_template *sht)
6458{
6459 int i, rc;
6460 char *irq_desc;
6461
6462 rc = ata_host_start(host);
6463 if (rc)
6464 return rc;
6465
6466
6467 if (!irq) {
6468 WARN_ON(irq_handler);
6469 return ata_host_register(host, sht);
6470 }
6471
6472 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6473 dev_driver_string(host->dev),
6474 dev_name(host->dev));
6475 if (!irq_desc)
6476 return -ENOMEM;
6477
6478 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6479 irq_desc, host);
6480 if (rc)
6481 return rc;
6482
6483 for (i = 0; i < host->n_ports; i++)
6484 ata_port_desc(host->ports[i], "irq %d", irq);
6485
6486 rc = ata_host_register(host, sht);
6487
6488 if (rc)
6489 devm_free_irq(host->dev, irq, host);
6490
6491 return rc;
6492}
6493
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505static void ata_port_detach(struct ata_port *ap)
6506{
6507 unsigned long flags;
6508 struct ata_link *link;
6509 struct ata_device *dev;
6510
6511 if (!ap->ops->error_handler)
6512 goto skip_eh;
6513
6514
6515 spin_lock_irqsave(ap->lock, flags);
6516 ap->pflags |= ATA_PFLAG_UNLOADING;
6517 ata_port_schedule_eh(ap);
6518 spin_unlock_irqrestore(ap->lock, flags);
6519
6520
6521 ata_port_wait_eh(ap);
6522
6523
6524 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6525
6526 cancel_delayed_work_sync(&ap->hotplug_task);
6527
6528 skip_eh:
6529
6530 ata_for_each_link(link, ap, HOST_FIRST) {
6531 ata_for_each_dev(dev, link, ALL) {
6532 if (zpodd_dev_enabled(dev))
6533 zpodd_exit(dev);
6534 }
6535 }
6536 if (ap->pmp_link) {
6537 int i;
6538 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6539 ata_tlink_delete(&ap->pmp_link[i]);
6540 }
6541
6542 scsi_remove_host(ap->scsi_host);
6543 ata_tport_delete(ap);
6544}
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555void ata_host_detach(struct ata_host *host)
6556{
6557 int i;
6558
6559 for (i = 0; i < host->n_ports; i++)
6560 ata_port_detach(host->ports[i]);
6561
6562
6563 ata_acpi_dissociate(host);
6564}
6565
6566#ifdef CONFIG_PCI
6567
6568
6569
6570
6571
6572
6573
6574
6575
6576
6577
6578
6579void ata_pci_remove_one(struct pci_dev *pdev)
6580{
6581 struct ata_host *host = pci_get_drvdata(pdev);
6582
6583 ata_host_detach(host);
6584}
6585
6586
6587int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6588{
6589 unsigned long tmp = 0;
6590
6591 switch (bits->width) {
6592 case 1: {
6593 u8 tmp8 = 0;
6594 pci_read_config_byte(pdev, bits->reg, &tmp8);
6595 tmp = tmp8;
6596 break;
6597 }
6598 case 2: {
6599 u16 tmp16 = 0;
6600 pci_read_config_word(pdev, bits->reg, &tmp16);
6601 tmp = tmp16;
6602 break;
6603 }
6604 case 4: {
6605 u32 tmp32 = 0;
6606 pci_read_config_dword(pdev, bits->reg, &tmp32);
6607 tmp = tmp32;
6608 break;
6609 }
6610
6611 default:
6612 return -EINVAL;
6613 }
6614
6615 tmp &= bits->mask;
6616
6617 return (tmp == bits->val) ? 1 : 0;
6618}
6619
6620#ifdef CONFIG_PM
6621void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6622{
6623 pci_save_state(pdev);
6624 pci_disable_device(pdev);
6625
6626 if (mesg.event & PM_EVENT_SLEEP)
6627 pci_set_power_state(pdev, PCI_D3hot);
6628}
6629
6630int ata_pci_device_do_resume(struct pci_dev *pdev)
6631{
6632 int rc;
6633
6634 pci_set_power_state(pdev, PCI_D0);
6635 pci_restore_state(pdev);
6636
6637 rc = pcim_enable_device(pdev);
6638 if (rc) {
6639 dev_err(&pdev->dev,
6640 "failed to enable device after resume (%d)\n", rc);
6641 return rc;
6642 }
6643
6644 pci_set_master(pdev);
6645 return 0;
6646}
6647
6648int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6649{
6650 struct ata_host *host = pci_get_drvdata(pdev);
6651 int rc = 0;
6652
6653 rc = ata_host_suspend(host, mesg);
6654 if (rc)
6655 return rc;
6656
6657 ata_pci_device_do_suspend(pdev, mesg);
6658
6659 return 0;
6660}
6661
6662int ata_pci_device_resume(struct pci_dev *pdev)
6663{
6664 struct ata_host *host = pci_get_drvdata(pdev);
6665 int rc;
6666
6667 rc = ata_pci_device_do_resume(pdev);
6668 if (rc == 0)
6669 ata_host_resume(host);
6670 return rc;
6671}
6672#endif
6673
6674#endif
6675
6676
6677
6678
6679
6680
6681
6682
6683
6684
6685
6686
6687int ata_platform_remove_one(struct platform_device *pdev)
6688{
6689 struct ata_host *host = platform_get_drvdata(pdev);
6690
6691 ata_host_detach(host);
6692
6693 return 0;
6694}
6695
6696static int __init ata_parse_force_one(char **cur,
6697 struct ata_force_ent *force_ent,
6698 const char **reason)
6699{
6700 static const struct ata_force_param force_tbl[] __initconst = {
6701 { "40c", .cbl = ATA_CBL_PATA40 },
6702 { "80c", .cbl = ATA_CBL_PATA80 },
6703 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6704 { "unk", .cbl = ATA_CBL_PATA_UNK },
6705 { "ign", .cbl = ATA_CBL_PATA_IGN },
6706 { "sata", .cbl = ATA_CBL_SATA },
6707 { "1.5Gbps", .spd_limit = 1 },
6708 { "3.0Gbps", .spd_limit = 2 },
6709 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6710 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6711 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
6712 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
6713 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6714 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6715 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6716 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6717 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6718 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6719 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6720 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6721 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6722 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6723 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6724 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6725 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6726 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6727 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6728 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6729 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6730 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6731 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6732 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6733 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6734 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6735 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6736 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6737 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6738 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6739 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6740 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6741 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6742 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6743 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6744 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6745 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6746 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6747 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6748 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6749 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6750 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6751 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6752 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6753 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
6754 };
6755 char *start = *cur, *p = *cur;
6756 char *id, *val, *endp;
6757 const struct ata_force_param *match_fp = NULL;
6758 int nr_matches = 0, i;
6759
6760
6761 while (*p != '\0' && *p != ',')
6762 p++;
6763
6764 if (*p == '\0')
6765 *cur = p;
6766 else
6767 *cur = p + 1;
6768
6769 *p = '\0';
6770
6771
6772 p = strchr(start, ':');
6773 if (!p) {
6774 val = strstrip(start);
6775 goto parse_val;
6776 }
6777 *p = '\0';
6778
6779 id = strstrip(start);
6780 val = strstrip(p + 1);
6781
6782
6783 p = strchr(id, '.');
6784 if (p) {
6785 *p++ = '\0';
6786 force_ent->device = simple_strtoul(p, &endp, 10);
6787 if (p == endp || *endp != '\0') {
6788 *reason = "invalid device";
6789 return -EINVAL;
6790 }
6791 }
6792
6793 force_ent->port = simple_strtoul(id, &endp, 10);
6794 if (p == endp || *endp != '\0') {
6795 *reason = "invalid port/link";
6796 return -EINVAL;
6797 }
6798
6799 parse_val:
6800
6801 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6802 const struct ata_force_param *fp = &force_tbl[i];
6803
6804 if (strncasecmp(val, fp->name, strlen(val)))
6805 continue;
6806
6807 nr_matches++;
6808 match_fp = fp;
6809
6810 if (strcasecmp(val, fp->name) == 0) {
6811 nr_matches = 1;
6812 break;
6813 }
6814 }
6815
6816 if (!nr_matches) {
6817 *reason = "unknown value";
6818 return -EINVAL;
6819 }
6820 if (nr_matches > 1) {
6821 *reason = "ambigious value";
6822 return -EINVAL;
6823 }
6824
6825 force_ent->param = *match_fp;
6826
6827 return 0;
6828}
6829
6830static void __init ata_parse_force_param(void)
6831{
6832 int idx = 0, size = 1;
6833 int last_port = -1, last_device = -1;
6834 char *p, *cur, *next;
6835
6836
6837 for (p = ata_force_param_buf; *p; p++)
6838 if (*p == ',')
6839 size++;
6840
6841 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6842 if (!ata_force_tbl) {
6843 printk(KERN_WARNING "ata: failed to extend force table, "
6844 "libata.force ignored\n");
6845 return;
6846 }
6847
6848
6849 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6850 const char *reason = "";
6851 struct ata_force_ent te = { .port = -1, .device = -1 };
6852
6853 next = cur;
6854 if (ata_parse_force_one(&next, &te, &reason)) {
6855 printk(KERN_WARNING "ata: failed to parse force "
6856 "parameter \"%s\" (%s)\n",
6857 cur, reason);
6858 continue;
6859 }
6860
6861 if (te.port == -1) {
6862 te.port = last_port;
6863 te.device = last_device;
6864 }
6865
6866 ata_force_tbl[idx++] = te;
6867
6868 last_port = te.port;
6869 last_device = te.device;
6870 }
6871
6872 ata_force_tbl_size = idx;
6873}
6874
6875static int __init ata_init(void)
6876{
6877 int rc;
6878
6879 ata_parse_force_param();
6880
6881 rc = ata_sff_init();
6882 if (rc) {
6883 kfree(ata_force_tbl);
6884 return rc;
6885 }
6886
6887 libata_transport_init();
6888 ata_scsi_transport_template = ata_attach_transport();
6889 if (!ata_scsi_transport_template) {
6890 ata_sff_exit();
6891 rc = -ENOMEM;
6892 goto err_out;
6893 }
6894
6895 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6896 return 0;
6897
6898err_out:
6899 return rc;
6900}
6901
6902static void __exit ata_exit(void)
6903{
6904 ata_release_transport(ata_scsi_transport_template);
6905 libata_transport_exit();
6906 ata_sff_exit();
6907 kfree(ata_force_tbl);
6908}
6909
6910subsys_initcall(ata_init);
6911module_exit(ata_exit);
6912
6913static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6914
6915int ata_ratelimit(void)
6916{
6917 return __ratelimit(&ratelimit);
6918}
6919
6920
6921
6922
6923
6924
6925
6926
6927
6928
6929
6930
6931
6932
6933
6934void ata_msleep(struct ata_port *ap, unsigned int msecs)
6935{
6936 bool owns_eh = ap && ap->host->eh_owner == current;
6937
6938 if (owns_eh)
6939 ata_eh_release(ap);
6940
6941 if (msecs < 20) {
6942 unsigned long usecs = msecs * USEC_PER_MSEC;
6943 usleep_range(usecs, usecs + 50);
6944 } else {
6945 msleep(msecs);
6946 }
6947
6948 if (owns_eh)
6949 ata_eh_acquire(ap);
6950}
6951
6952
6953
6954
6955
6956
6957
6958
6959
6960
6961
6962
6963
6964
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974
6975
6976u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6977 unsigned long interval, unsigned long timeout)
6978{
6979 unsigned long deadline;
6980 u32 tmp;
6981
6982 tmp = ioread32(reg);
6983
6984
6985
6986
6987
6988 deadline = ata_deadline(jiffies, timeout);
6989
6990 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6991 ata_msleep(ap, interval);
6992 tmp = ioread32(reg);
6993 }
6994
6995 return tmp;
6996}
6997
6998
6999
7000
7001
7002
7003
7004
7005
7006
7007
7008
7009
7010bool sata_lpm_ignore_phy_events(struct ata_link *link)
7011{
7012 unsigned long lpm_timeout = link->last_lpm_change +
7013 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
7014
7015
7016 if (link->lpm_policy > ATA_LPM_MAX_POWER)
7017 return true;
7018
7019
7020
7021
7022 if ((link->flags & ATA_LFLAG_CHANGED) &&
7023 time_before(jiffies, lpm_timeout))
7024 return true;
7025
7026 return false;
7027}
7028EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
7029
7030
7031
7032
7033static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7034{
7035 return AC_ERR_SYSTEM;
7036}
7037
7038static void ata_dummy_error_handler(struct ata_port *ap)
7039{
7040
7041}
7042
7043struct ata_port_operations ata_dummy_port_ops = {
7044 .qc_prep = ata_noop_qc_prep,
7045 .qc_issue = ata_dummy_qc_issue,
7046 .error_handler = ata_dummy_error_handler,
7047 .sched_eh = ata_std_sched_eh,
7048 .end_eh = ata_std_end_eh,
7049};
7050
7051const struct ata_port_info ata_dummy_port_info = {
7052 .port_ops = &ata_dummy_port_ops,
7053};
7054
7055
7056
7057
7058void ata_port_printk(const struct ata_port *ap, const char *level,
7059 const char *fmt, ...)
7060{
7061 struct va_format vaf;
7062 va_list args;
7063
7064 va_start(args, fmt);
7065
7066 vaf.fmt = fmt;
7067 vaf.va = &args;
7068
7069 printk("%sata%u: %pV", level, ap->print_id, &vaf);
7070
7071 va_end(args);
7072}
7073EXPORT_SYMBOL(ata_port_printk);
7074
7075void ata_link_printk(const struct ata_link *link, const char *level,
7076 const char *fmt, ...)
7077{
7078 struct va_format vaf;
7079 va_list args;
7080
7081 va_start(args, fmt);
7082
7083 vaf.fmt = fmt;
7084 vaf.va = &args;
7085
7086 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
7087 printk("%sata%u.%02u: %pV",
7088 level, link->ap->print_id, link->pmp, &vaf);
7089 else
7090 printk("%sata%u: %pV",
7091 level, link->ap->print_id, &vaf);
7092
7093 va_end(args);
7094}
7095EXPORT_SYMBOL(ata_link_printk);
7096
7097void ata_dev_printk(const struct ata_device *dev, const char *level,
7098 const char *fmt, ...)
7099{
7100 struct va_format vaf;
7101 va_list args;
7102
7103 va_start(args, fmt);
7104
7105 vaf.fmt = fmt;
7106 vaf.va = &args;
7107
7108 printk("%sata%u.%02u: %pV",
7109 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
7110 &vaf);
7111
7112 va_end(args);
7113}
7114EXPORT_SYMBOL(ata_dev_printk);
7115
7116void ata_print_version(const struct device *dev, const char *version)
7117{
7118 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
7119}
7120EXPORT_SYMBOL(ata_print_version);
7121
7122
7123
7124
7125
7126
7127
7128EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7129EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7130EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7131EXPORT_SYMBOL_GPL(ata_base_port_ops);
7132EXPORT_SYMBOL_GPL(sata_port_ops);
7133EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7134EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7135EXPORT_SYMBOL_GPL(ata_link_next);
7136EXPORT_SYMBOL_GPL(ata_dev_next);
7137EXPORT_SYMBOL_GPL(ata_std_bios_param);
7138EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
7139EXPORT_SYMBOL_GPL(ata_host_init);
7140EXPORT_SYMBOL_GPL(ata_host_alloc);
7141EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7142EXPORT_SYMBOL_GPL(ata_slave_link_init);
7143EXPORT_SYMBOL_GPL(ata_host_start);
7144EXPORT_SYMBOL_GPL(ata_host_register);
7145EXPORT_SYMBOL_GPL(ata_host_activate);
7146EXPORT_SYMBOL_GPL(ata_host_detach);
7147EXPORT_SYMBOL_GPL(ata_sg_init);
7148EXPORT_SYMBOL_GPL(ata_qc_complete);
7149EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7150EXPORT_SYMBOL_GPL(atapi_cmd_type);
7151EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7152EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7153EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7154EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7155EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7156EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7157EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7158EXPORT_SYMBOL_GPL(ata_mode_string);
7159EXPORT_SYMBOL_GPL(ata_id_xfermask);
7160EXPORT_SYMBOL_GPL(ata_do_set_mode);
7161EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7162EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7163EXPORT_SYMBOL_GPL(ata_dev_disable);
7164EXPORT_SYMBOL_GPL(sata_set_spd);
7165EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7166EXPORT_SYMBOL_GPL(sata_link_debounce);
7167EXPORT_SYMBOL_GPL(sata_link_resume);
7168EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
7169EXPORT_SYMBOL_GPL(ata_std_prereset);
7170EXPORT_SYMBOL_GPL(sata_link_hardreset);
7171EXPORT_SYMBOL_GPL(sata_std_hardreset);
7172EXPORT_SYMBOL_GPL(ata_std_postreset);
7173EXPORT_SYMBOL_GPL(ata_dev_classify);
7174EXPORT_SYMBOL_GPL(ata_dev_pair);
7175EXPORT_SYMBOL_GPL(ata_ratelimit);
7176EXPORT_SYMBOL_GPL(ata_msleep);
7177EXPORT_SYMBOL_GPL(ata_wait_register);
7178EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7179EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7180EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7181EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7182EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
7183EXPORT_SYMBOL_GPL(sata_scr_valid);
7184EXPORT_SYMBOL_GPL(sata_scr_read);
7185EXPORT_SYMBOL_GPL(sata_scr_write);
7186EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7187EXPORT_SYMBOL_GPL(ata_link_online);
7188EXPORT_SYMBOL_GPL(ata_link_offline);
7189#ifdef CONFIG_PM
7190EXPORT_SYMBOL_GPL(ata_host_suspend);
7191EXPORT_SYMBOL_GPL(ata_host_resume);
7192#endif
7193EXPORT_SYMBOL_GPL(ata_id_string);
7194EXPORT_SYMBOL_GPL(ata_id_c_string);
7195EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
7196EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7197
7198EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7199EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7200EXPORT_SYMBOL_GPL(ata_timing_compute);
7201EXPORT_SYMBOL_GPL(ata_timing_merge);
7202EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7203
7204#ifdef CONFIG_PCI
7205EXPORT_SYMBOL_GPL(pci_test_config_bits);
7206EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7207#ifdef CONFIG_PM
7208EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7209EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7210EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7211EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7212#endif
7213#endif
7214
7215EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7216
7217EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7218EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7219EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7220EXPORT_SYMBOL_GPL(ata_port_desc);
7221#ifdef CONFIG_PCI
7222EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7223#endif
7224EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7225EXPORT_SYMBOL_GPL(ata_link_abort);
7226EXPORT_SYMBOL_GPL(ata_port_abort);
7227EXPORT_SYMBOL_GPL(ata_port_freeze);
7228EXPORT_SYMBOL_GPL(sata_async_notification);
7229EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7230EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7231EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7232EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7233EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7234EXPORT_SYMBOL_GPL(ata_do_eh);
7235EXPORT_SYMBOL_GPL(ata_std_error_handler);
7236
7237EXPORT_SYMBOL_GPL(ata_cable_40wire);
7238EXPORT_SYMBOL_GPL(ata_cable_80wire);
7239EXPORT_SYMBOL_GPL(ata_cable_unknown);
7240EXPORT_SYMBOL_GPL(ata_cable_ignore);
7241EXPORT_SYMBOL_GPL(ata_cable_sata);
7242