1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/list.h>
32#include <linux/mm.h>
33#include <linux/spinlock.h>
34#include <linux/blkdev.h>
35#include <linux/delay.h>
36#include <linux/timer.h>
37#include <linux/time.h>
38#include <linux/interrupt.h>
39#include <linux/completion.h>
40#include <linux/suspend.h>
41#include <linux/workqueue.h>
42#include <linux/scatterlist.h>
43#include <linux/io.h>
44#include <linux/async.h>
45#include <linux/log2.h>
46#include <linux/slab.h>
47#include <linux/glob.h>
48#include <scsi/scsi.h>
49#include <scsi/scsi_cmnd.h>
50#include <scsi/scsi_host.h>
51#include <linux/libata.h>
52#include <asm/byteorder.h>
53#include <asm/unaligned.h>
54#include <linux/cdrom.h>
55#include <linux/ratelimit.h>
56#include <linux/leds.h>
57#include <linux/pm_runtime.h>
58#include <linux/platform_device.h>
59
60#define CREATE_TRACE_POINTS
61#include <trace/events/libata.h>
62
63#include "libata.h"
64#include "libata-transport.h"
65
66
67const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
68const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
69const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
70
71const struct ata_port_operations ata_base_port_ops = {
72 .prereset = ata_std_prereset,
73 .postreset = ata_std_postreset,
74 .error_handler = ata_std_error_handler,
75 .sched_eh = ata_std_sched_eh,
76 .end_eh = ata_std_end_eh,
77};
78
79const struct ata_port_operations sata_port_ops = {
80 .inherits = &ata_base_port_ops,
81
82 .qc_defer = ata_std_qc_defer,
83 .hardreset = sata_std_hardreset,
84};
85
86static unsigned int ata_dev_init_params(struct ata_device *dev,
87 u16 heads, u16 sectors);
88static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
89static void ata_dev_xfermask(struct ata_device *dev);
90static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
91
92atomic_t ata_print_id = ATOMIC_INIT(0);
93
94struct ata_force_param {
95 const char *name;
96 unsigned int cbl;
97 int spd_limit;
98 unsigned long xfer_mask;
99 unsigned int horkage_on;
100 unsigned int horkage_off;
101 unsigned int lflags;
102};
103
104struct ata_force_ent {
105 int port;
106 int device;
107 struct ata_force_param param;
108};
109
110static struct ata_force_ent *ata_force_tbl;
111static int ata_force_tbl_size;
112
113static char ata_force_param_buf[PAGE_SIZE] __initdata;
114
115module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
116MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
117
118static int atapi_enabled = 1;
119module_param(atapi_enabled, int, 0444);
120MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
121
122static int atapi_dmadir = 0;
123module_param(atapi_dmadir, int, 0444);
124MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
125
126int atapi_passthru16 = 1;
127module_param(atapi_passthru16, int, 0444);
128MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
129
130int libata_fua = 0;
131module_param_named(fua, libata_fua, int, 0444);
132MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
133
134static int ata_ignore_hpa;
135module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
136MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
137
138static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
139module_param_named(dma, libata_dma_mask, int, 0444);
140MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
141
142static int ata_probe_timeout;
143module_param(ata_probe_timeout, int, 0444);
144MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
145
146int libata_noacpi = 0;
147module_param_named(noacpi, libata_noacpi, int, 0444);
148MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
149
150int libata_allow_tpm = 0;
151module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
152MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
153
154static int atapi_an;
155module_param(atapi_an, int, 0444);
156MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
157
158MODULE_AUTHOR("Jeff Garzik");
159MODULE_DESCRIPTION("Library module for ATA devices");
160MODULE_LICENSE("GPL");
161MODULE_VERSION(DRV_VERSION);
162
163
164static bool ata_sstatus_online(u32 sstatus)
165{
166 return (sstatus & 0xf) == 0x3;
167}
168
169
170
171
172
173
174
175
176
177
178
179
180
181struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
182 enum ata_link_iter_mode mode)
183{
184 BUG_ON(mode != ATA_LITER_EDGE &&
185 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
186
187
188 if (!link)
189 switch (mode) {
190 case ATA_LITER_EDGE:
191 case ATA_LITER_PMP_FIRST:
192 if (sata_pmp_attached(ap))
193 return ap->pmp_link;
194
195 case ATA_LITER_HOST_FIRST:
196 return &ap->link;
197 }
198
199
200 if (link == &ap->link)
201 switch (mode) {
202 case ATA_LITER_HOST_FIRST:
203 if (sata_pmp_attached(ap))
204 return ap->pmp_link;
205
206 case ATA_LITER_PMP_FIRST:
207 if (unlikely(ap->slave_link))
208 return ap->slave_link;
209
210 case ATA_LITER_EDGE:
211 return NULL;
212 }
213
214
215 if (unlikely(link == ap->slave_link))
216 return NULL;
217
218
219 if (++link < ap->pmp_link + ap->nr_pmp_links)
220 return link;
221
222 if (mode == ATA_LITER_PMP_FIRST)
223 return &ap->link;
224
225 return NULL;
226}
227
228
229
230
231
232
233
234
235
236
237
238
239
240struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
241 enum ata_dev_iter_mode mode)
242{
243 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
244 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
245
246
247 if (!dev)
248 switch (mode) {
249 case ATA_DITER_ENABLED:
250 case ATA_DITER_ALL:
251 dev = link->device;
252 goto check;
253 case ATA_DITER_ENABLED_REVERSE:
254 case ATA_DITER_ALL_REVERSE:
255 dev = link->device + ata_link_max_devices(link) - 1;
256 goto check;
257 }
258
259 next:
260
261 switch (mode) {
262 case ATA_DITER_ENABLED:
263 case ATA_DITER_ALL:
264 if (++dev < link->device + ata_link_max_devices(link))
265 goto check;
266 return NULL;
267 case ATA_DITER_ENABLED_REVERSE:
268 case ATA_DITER_ALL_REVERSE:
269 if (--dev >= link->device)
270 goto check;
271 return NULL;
272 }
273
274 check:
275 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
276 !ata_dev_enabled(dev))
277 goto next;
278 return dev;
279}
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295struct ata_link *ata_dev_phys_link(struct ata_device *dev)
296{
297 struct ata_port *ap = dev->link->ap;
298
299 if (!ap->slave_link)
300 return dev->link;
301 if (!dev->devno)
302 return &ap->link;
303 return ap->slave_link;
304}
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319void ata_force_cbl(struct ata_port *ap)
320{
321 int i;
322
323 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
324 const struct ata_force_ent *fe = &ata_force_tbl[i];
325
326 if (fe->port != -1 && fe->port != ap->print_id)
327 continue;
328
329 if (fe->param.cbl == ATA_CBL_NONE)
330 continue;
331
332 ap->cbl = fe->param.cbl;
333 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
334 return;
335 }
336}
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354static void ata_force_link_limits(struct ata_link *link)
355{
356 bool did_spd = false;
357 int linkno = link->pmp;
358 int i;
359
360 if (ata_is_host_link(link))
361 linkno += 15;
362
363 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
364 const struct ata_force_ent *fe = &ata_force_tbl[i];
365
366 if (fe->port != -1 && fe->port != link->ap->print_id)
367 continue;
368
369 if (fe->device != -1 && fe->device != linkno)
370 continue;
371
372
373 if (!did_spd && fe->param.spd_limit) {
374 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
375 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
376 fe->param.name);
377 did_spd = true;
378 }
379
380
381 if (fe->param.lflags) {
382 link->flags |= fe->param.lflags;
383 ata_link_notice(link,
384 "FORCE: link flag 0x%x forced -> 0x%x\n",
385 fe->param.lflags, link->flags);
386 }
387 }
388}
389
390
391
392
393
394
395
396
397
398
399
400
401static void ata_force_xfermask(struct ata_device *dev)
402{
403 int devno = dev->link->pmp + dev->devno;
404 int alt_devno = devno;
405 int i;
406
407
408 if (ata_is_host_link(dev->link))
409 alt_devno += 15;
410
411 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
412 const struct ata_force_ent *fe = &ata_force_tbl[i];
413 unsigned long pio_mask, mwdma_mask, udma_mask;
414
415 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
416 continue;
417
418 if (fe->device != -1 && fe->device != devno &&
419 fe->device != alt_devno)
420 continue;
421
422 if (!fe->param.xfer_mask)
423 continue;
424
425 ata_unpack_xfermask(fe->param.xfer_mask,
426 &pio_mask, &mwdma_mask, &udma_mask);
427 if (udma_mask)
428 dev->udma_mask = udma_mask;
429 else if (mwdma_mask) {
430 dev->udma_mask = 0;
431 dev->mwdma_mask = mwdma_mask;
432 } else {
433 dev->udma_mask = 0;
434 dev->mwdma_mask = 0;
435 dev->pio_mask = pio_mask;
436 }
437
438 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
439 fe->param.name);
440 return;
441 }
442}
443
444
445
446
447
448
449
450
451
452
453
454
455static void ata_force_horkage(struct ata_device *dev)
456{
457 int devno = dev->link->pmp + dev->devno;
458 int alt_devno = devno;
459 int i;
460
461
462 if (ata_is_host_link(dev->link))
463 alt_devno += 15;
464
465 for (i = 0; i < ata_force_tbl_size; i++) {
466 const struct ata_force_ent *fe = &ata_force_tbl[i];
467
468 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
469 continue;
470
471 if (fe->device != -1 && fe->device != devno &&
472 fe->device != alt_devno)
473 continue;
474
475 if (!(~dev->horkage & fe->param.horkage_on) &&
476 !(dev->horkage & fe->param.horkage_off))
477 continue;
478
479 dev->horkage |= fe->param.horkage_on;
480 dev->horkage &= ~fe->param.horkage_off;
481
482 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
483 fe->param.name);
484 }
485}
486
487
488
489
490
491
492
493
494
495
496
497
498
499int atapi_cmd_type(u8 opcode)
500{
501 switch (opcode) {
502 case GPCMD_READ_10:
503 case GPCMD_READ_12:
504 return ATAPI_READ;
505
506 case GPCMD_WRITE_10:
507 case GPCMD_WRITE_12:
508 case GPCMD_WRITE_AND_VERIFY_10:
509 return ATAPI_WRITE;
510
511 case GPCMD_READ_CD:
512 case GPCMD_READ_CD_MSF:
513 return ATAPI_READ_CD;
514
515 case ATA_16:
516 case ATA_12:
517 if (atapi_passthru16)
518 return ATAPI_PASS_THRU;
519
520 default:
521 return ATAPI_MISC;
522 }
523}
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
539{
540 fis[0] = 0x27;
541 fis[1] = pmp & 0xf;
542 if (is_cmd)
543 fis[1] |= (1 << 7);
544
545 fis[2] = tf->command;
546 fis[3] = tf->feature;
547
548 fis[4] = tf->lbal;
549 fis[5] = tf->lbam;
550 fis[6] = tf->lbah;
551 fis[7] = tf->device;
552
553 fis[8] = tf->hob_lbal;
554 fis[9] = tf->hob_lbam;
555 fis[10] = tf->hob_lbah;
556 fis[11] = tf->hob_feature;
557
558 fis[12] = tf->nsect;
559 fis[13] = tf->hob_nsect;
560 fis[14] = 0;
561 fis[15] = tf->ctl;
562
563 fis[16] = tf->auxiliary & 0xff;
564 fis[17] = (tf->auxiliary >> 8) & 0xff;
565 fis[18] = (tf->auxiliary >> 16) & 0xff;
566 fis[19] = (tf->auxiliary >> 24) & 0xff;
567}
568
569
570
571
572
573
574
575
576
577
578
579
580void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
581{
582 tf->command = fis[2];
583 tf->feature = fis[3];
584
585 tf->lbal = fis[4];
586 tf->lbam = fis[5];
587 tf->lbah = fis[6];
588 tf->device = fis[7];
589
590 tf->hob_lbal = fis[8];
591 tf->hob_lbam = fis[9];
592 tf->hob_lbah = fis[10];
593
594 tf->nsect = fis[12];
595 tf->hob_nsect = fis[13];
596}
597
598static const u8 ata_rw_cmds[] = {
599
600 ATA_CMD_READ_MULTI,
601 ATA_CMD_WRITE_MULTI,
602 ATA_CMD_READ_MULTI_EXT,
603 ATA_CMD_WRITE_MULTI_EXT,
604 0,
605 0,
606 0,
607 ATA_CMD_WRITE_MULTI_FUA_EXT,
608
609 ATA_CMD_PIO_READ,
610 ATA_CMD_PIO_WRITE,
611 ATA_CMD_PIO_READ_EXT,
612 ATA_CMD_PIO_WRITE_EXT,
613 0,
614 0,
615 0,
616 0,
617
618 ATA_CMD_READ,
619 ATA_CMD_WRITE,
620 ATA_CMD_READ_EXT,
621 ATA_CMD_WRITE_EXT,
622 0,
623 0,
624 0,
625 ATA_CMD_WRITE_FUA_EXT
626};
627
628
629
630
631
632
633
634
635
636
637
638
639static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
640{
641 u8 cmd;
642
643 int index, fua, lba48, write;
644
645 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
646 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
647 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
648
649 if (dev->flags & ATA_DFLAG_PIO) {
650 tf->protocol = ATA_PROT_PIO;
651 index = dev->multi_count ? 0 : 8;
652 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
653
654 tf->protocol = ATA_PROT_PIO;
655 index = dev->multi_count ? 0 : 8;
656 } else {
657 tf->protocol = ATA_PROT_DMA;
658 index = 16;
659 }
660
661 cmd = ata_rw_cmds[index + fua + lba48 + write];
662 if (cmd) {
663 tf->command = cmd;
664 return 0;
665 }
666 return -1;
667}
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
685{
686 u64 block = 0;
687
688 if (tf->flags & ATA_TFLAG_LBA) {
689 if (tf->flags & ATA_TFLAG_LBA48) {
690 block |= (u64)tf->hob_lbah << 40;
691 block |= (u64)tf->hob_lbam << 32;
692 block |= (u64)tf->hob_lbal << 24;
693 } else
694 block |= (tf->device & 0xf) << 24;
695
696 block |= tf->lbah << 16;
697 block |= tf->lbam << 8;
698 block |= tf->lbal;
699 } else {
700 u32 cyl, head, sect;
701
702 cyl = tf->lbam | (tf->lbah << 8);
703 head = tf->device & 0xf;
704 sect = tf->lbal;
705
706 if (!sect) {
707 ata_dev_warn(dev,
708 "device reported invalid CHS sector 0\n");
709 return U64_MAX;
710 }
711
712 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
713 }
714
715 return block;
716}
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
740 u64 block, u32 n_block, unsigned int tf_flags,
741 unsigned int tag, int class)
742{
743 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
744 tf->flags |= tf_flags;
745
746 if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
747
748 if (!lba_48_ok(block, n_block))
749 return -ERANGE;
750
751 tf->protocol = ATA_PROT_NCQ;
752 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
753
754 if (tf->flags & ATA_TFLAG_WRITE)
755 tf->command = ATA_CMD_FPDMA_WRITE;
756 else
757 tf->command = ATA_CMD_FPDMA_READ;
758
759 tf->nsect = tag << 3;
760 tf->hob_feature = (n_block >> 8) & 0xff;
761 tf->feature = n_block & 0xff;
762
763 tf->hob_lbah = (block >> 40) & 0xff;
764 tf->hob_lbam = (block >> 32) & 0xff;
765 tf->hob_lbal = (block >> 24) & 0xff;
766 tf->lbah = (block >> 16) & 0xff;
767 tf->lbam = (block >> 8) & 0xff;
768 tf->lbal = block & 0xff;
769
770 tf->device = ATA_LBA;
771 if (tf->flags & ATA_TFLAG_FUA)
772 tf->device |= 1 << 7;
773
774 if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
775 if (class == IOPRIO_CLASS_RT)
776 tf->hob_nsect |= ATA_PRIO_HIGH <<
777 ATA_SHIFT_PRIO;
778 }
779 } else if (dev->flags & ATA_DFLAG_LBA) {
780 tf->flags |= ATA_TFLAG_LBA;
781
782 if (lba_28_ok(block, n_block)) {
783
784 tf->device |= (block >> 24) & 0xf;
785 } else if (lba_48_ok(block, n_block)) {
786 if (!(dev->flags & ATA_DFLAG_LBA48))
787 return -ERANGE;
788
789
790 tf->flags |= ATA_TFLAG_LBA48;
791
792 tf->hob_nsect = (n_block >> 8) & 0xff;
793
794 tf->hob_lbah = (block >> 40) & 0xff;
795 tf->hob_lbam = (block >> 32) & 0xff;
796 tf->hob_lbal = (block >> 24) & 0xff;
797 } else
798
799 return -ERANGE;
800
801 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
802 return -EINVAL;
803
804 tf->nsect = n_block & 0xff;
805
806 tf->lbah = (block >> 16) & 0xff;
807 tf->lbam = (block >> 8) & 0xff;
808 tf->lbal = block & 0xff;
809
810 tf->device |= ATA_LBA;
811 } else {
812
813 u32 sect, head, cyl, track;
814
815
816 if (!lba_28_ok(block, n_block))
817 return -ERANGE;
818
819 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
820 return -EINVAL;
821
822
823 track = (u32)block / dev->sectors;
824 cyl = track / dev->heads;
825 head = track % dev->heads;
826 sect = (u32)block % dev->sectors + 1;
827
828 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
829 (u32)block, track, cyl, head, sect);
830
831
832
833
834
835 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
836 return -ERANGE;
837
838 tf->nsect = n_block & 0xff;
839 tf->lbal = sect;
840 tf->lbam = cyl;
841 tf->lbah = cyl >> 8;
842 tf->device |= head;
843 }
844
845 return 0;
846}
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863unsigned long ata_pack_xfermask(unsigned long pio_mask,
864 unsigned long mwdma_mask,
865 unsigned long udma_mask)
866{
867 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
868 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
869 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
870}
871
872
873
874
875
876
877
878
879
880
881
882void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
883 unsigned long *mwdma_mask, unsigned long *udma_mask)
884{
885 if (pio_mask)
886 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
887 if (mwdma_mask)
888 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
889 if (udma_mask)
890 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
891}
892
893static const struct ata_xfer_ent {
894 int shift, bits;
895 u8 base;
896} ata_xfer_tbl[] = {
897 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
898 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
899 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
900 { -1, },
901};
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916u8 ata_xfer_mask2mode(unsigned long xfer_mask)
917{
918 int highbit = fls(xfer_mask) - 1;
919 const struct ata_xfer_ent *ent;
920
921 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
922 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
923 return ent->base + highbit - ent->shift;
924 return 0xff;
925}
926
927
928
929
930
931
932
933
934
935
936
937
938
939unsigned long ata_xfer_mode2mask(u8 xfer_mode)
940{
941 const struct ata_xfer_ent *ent;
942
943 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
944 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
945 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
946 & ~((1 << ent->shift) - 1);
947 return 0;
948}
949
950
951
952
953
954
955
956
957
958
959
960
961
962int ata_xfer_mode2shift(unsigned long xfer_mode)
963{
964 const struct ata_xfer_ent *ent;
965
966 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
967 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
968 return ent->shift;
969 return -1;
970}
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986const char *ata_mode_string(unsigned long xfer_mask)
987{
988 static const char * const xfer_mode_str[] = {
989 "PIO0",
990 "PIO1",
991 "PIO2",
992 "PIO3",
993 "PIO4",
994 "PIO5",
995 "PIO6",
996 "MWDMA0",
997 "MWDMA1",
998 "MWDMA2",
999 "MWDMA3",
1000 "MWDMA4",
1001 "UDMA/16",
1002 "UDMA/25",
1003 "UDMA/33",
1004 "UDMA/44",
1005 "UDMA/66",
1006 "UDMA/100",
1007 "UDMA/133",
1008 "UDMA7",
1009 };
1010 int highbit;
1011
1012 highbit = fls(xfer_mask) - 1;
1013 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1014 return xfer_mode_str[highbit];
1015 return "<n/a>";
1016}
1017
1018const char *sata_spd_string(unsigned int spd)
1019{
1020 static const char * const spd_str[] = {
1021 "1.5 Gbps",
1022 "3.0 Gbps",
1023 "6.0 Gbps",
1024 };
1025
1026 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1027 return "<unknown>";
1028 return spd_str[spd - 1];
1029}
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1047{
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1070 DPRINTK("found ATA device by sig\n");
1071 return ATA_DEV_ATA;
1072 }
1073
1074 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1075 DPRINTK("found ATAPI device by sig\n");
1076 return ATA_DEV_ATAPI;
1077 }
1078
1079 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1080 DPRINTK("found PMP device by sig\n");
1081 return ATA_DEV_PMP;
1082 }
1083
1084 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1085 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1086 return ATA_DEV_SEMB;
1087 }
1088
1089 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1090 DPRINTK("found ZAC device by sig\n");
1091 return ATA_DEV_ZAC;
1092 }
1093
1094 DPRINTK("unknown device\n");
1095 return ATA_DEV_UNKNOWN;
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113void ata_id_string(const u16 *id, unsigned char *s,
1114 unsigned int ofs, unsigned int len)
1115{
1116 unsigned int c;
1117
1118 BUG_ON(len & 1);
1119
1120 while (len > 0) {
1121 c = id[ofs] >> 8;
1122 *s = c;
1123 s++;
1124
1125 c = id[ofs] & 0xff;
1126 *s = c;
1127 s++;
1128
1129 ofs++;
1130 len -= 2;
1131 }
1132}
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148void ata_id_c_string(const u16 *id, unsigned char *s,
1149 unsigned int ofs, unsigned int len)
1150{
1151 unsigned char *p;
1152
1153 ata_id_string(id, s, ofs, len - 1);
1154
1155 p = s + strnlen(s, len - 1);
1156 while (p > s && p[-1] == ' ')
1157 p--;
1158 *p = '\0';
1159}
1160
1161static u64 ata_id_n_sectors(const u16 *id)
1162{
1163 if (ata_id_has_lba(id)) {
1164 if (ata_id_has_lba48(id))
1165 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1166 else
1167 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1168 } else {
1169 if (ata_id_current_chs_valid(id))
1170 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1171 id[ATA_ID_CUR_SECTORS];
1172 else
1173 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1174 id[ATA_ID_SECTORS];
1175 }
1176}
1177
1178u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1179{
1180 u64 sectors = 0;
1181
1182 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1183 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1184 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1185 sectors |= (tf->lbah & 0xff) << 16;
1186 sectors |= (tf->lbam & 0xff) << 8;
1187 sectors |= (tf->lbal & 0xff);
1188
1189 return sectors;
1190}
1191
1192u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1193{
1194 u64 sectors = 0;
1195
1196 sectors |= (tf->device & 0x0f) << 24;
1197 sectors |= (tf->lbah & 0xff) << 16;
1198 sectors |= (tf->lbam & 0xff) << 8;
1199 sectors |= (tf->lbal & 0xff);
1200
1201 return sectors;
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1217{
1218 unsigned int err_mask;
1219 struct ata_taskfile tf;
1220 int lba48 = ata_id_has_lba48(dev->id);
1221
1222 ata_tf_init(dev, &tf);
1223
1224
1225 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1226
1227 if (lba48) {
1228 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1229 tf.flags |= ATA_TFLAG_LBA48;
1230 } else
1231 tf.command = ATA_CMD_READ_NATIVE_MAX;
1232
1233 tf.protocol = ATA_PROT_NODATA;
1234 tf.device |= ATA_LBA;
1235
1236 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1237 if (err_mask) {
1238 ata_dev_warn(dev,
1239 "failed to read native max address (err_mask=0x%x)\n",
1240 err_mask);
1241 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1242 return -EACCES;
1243 return -EIO;
1244 }
1245
1246 if (lba48)
1247 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1248 else
1249 *max_sectors = ata_tf_to_lba(&tf) + 1;
1250 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1251 (*max_sectors)--;
1252 return 0;
1253}
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1268{
1269 unsigned int err_mask;
1270 struct ata_taskfile tf;
1271 int lba48 = ata_id_has_lba48(dev->id);
1272
1273 new_sectors--;
1274
1275 ata_tf_init(dev, &tf);
1276
1277 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1278
1279 if (lba48) {
1280 tf.command = ATA_CMD_SET_MAX_EXT;
1281 tf.flags |= ATA_TFLAG_LBA48;
1282
1283 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1284 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1285 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1286 } else {
1287 tf.command = ATA_CMD_SET_MAX;
1288
1289 tf.device |= (new_sectors >> 24) & 0xf;
1290 }
1291
1292 tf.protocol = ATA_PROT_NODATA;
1293 tf.device |= ATA_LBA;
1294
1295 tf.lbal = (new_sectors >> 0) & 0xff;
1296 tf.lbam = (new_sectors >> 8) & 0xff;
1297 tf.lbah = (new_sectors >> 16) & 0xff;
1298
1299 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1300 if (err_mask) {
1301 ata_dev_warn(dev,
1302 "failed to set max address (err_mask=0x%x)\n",
1303 err_mask);
1304 if (err_mask == AC_ERR_DEV &&
1305 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1306 return -EACCES;
1307 return -EIO;
1308 }
1309
1310 return 0;
1311}
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324static int ata_hpa_resize(struct ata_device *dev)
1325{
1326 struct ata_eh_context *ehc = &dev->link->eh_context;
1327 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1328 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1329 u64 sectors = ata_id_n_sectors(dev->id);
1330 u64 native_sectors;
1331 int rc;
1332
1333
1334 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1335 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1336 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1337 return 0;
1338
1339
1340 rc = ata_read_native_max_address(dev, &native_sectors);
1341 if (rc) {
1342
1343
1344
1345 if (rc == -EACCES || !unlock_hpa) {
1346 ata_dev_warn(dev,
1347 "HPA support seems broken, skipping HPA handling\n");
1348 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1349
1350
1351 if (rc == -EACCES)
1352 rc = 0;
1353 }
1354
1355 return rc;
1356 }
1357 dev->n_native_sectors = native_sectors;
1358
1359
1360 if (native_sectors <= sectors || !unlock_hpa) {
1361 if (!print_info || native_sectors == sectors)
1362 return 0;
1363
1364 if (native_sectors > sectors)
1365 ata_dev_info(dev,
1366 "HPA detected: current %llu, native %llu\n",
1367 (unsigned long long)sectors,
1368 (unsigned long long)native_sectors);
1369 else if (native_sectors < sectors)
1370 ata_dev_warn(dev,
1371 "native sectors (%llu) is smaller than sectors (%llu)\n",
1372 (unsigned long long)native_sectors,
1373 (unsigned long long)sectors);
1374 return 0;
1375 }
1376
1377
1378 rc = ata_set_max_sectors(dev, native_sectors);
1379 if (rc == -EACCES) {
1380
1381 ata_dev_warn(dev,
1382 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1383 (unsigned long long)sectors,
1384 (unsigned long long)native_sectors);
1385 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1386 return 0;
1387 } else if (rc)
1388 return rc;
1389
1390
1391 rc = ata_dev_reread_id(dev, 0);
1392 if (rc) {
1393 ata_dev_err(dev,
1394 "failed to re-read IDENTIFY data after HPA resizing\n");
1395 return rc;
1396 }
1397
1398 if (print_info) {
1399 u64 new_sectors = ata_id_n_sectors(dev->id);
1400 ata_dev_info(dev,
1401 "HPA unlocked: %llu -> %llu, native %llu\n",
1402 (unsigned long long)sectors,
1403 (unsigned long long)new_sectors,
1404 (unsigned long long)native_sectors);
1405 }
1406
1407 return 0;
1408}
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421static inline void ata_dump_id(const u16 *id)
1422{
1423 DPRINTK("49==0x%04x "
1424 "53==0x%04x "
1425 "63==0x%04x "
1426 "64==0x%04x "
1427 "75==0x%04x \n",
1428 id[49],
1429 id[53],
1430 id[63],
1431 id[64],
1432 id[75]);
1433 DPRINTK("80==0x%04x "
1434 "81==0x%04x "
1435 "82==0x%04x "
1436 "83==0x%04x "
1437 "84==0x%04x \n",
1438 id[80],
1439 id[81],
1440 id[82],
1441 id[83],
1442 id[84]);
1443 DPRINTK("88==0x%04x "
1444 "93==0x%04x\n",
1445 id[88],
1446 id[93]);
1447}
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464unsigned long ata_id_xfermask(const u16 *id)
1465{
1466 unsigned long pio_mask, mwdma_mask, udma_mask;
1467
1468
1469 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1470 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1471 pio_mask <<= 3;
1472 pio_mask |= 0x7;
1473 } else {
1474
1475
1476
1477
1478 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1479 if (mode < 5)
1480 pio_mask = (2 << mode) - 1;
1481 else
1482 pio_mask = 1;
1483
1484
1485
1486
1487
1488
1489
1490 }
1491
1492 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1493
1494 if (ata_id_is_cfa(id)) {
1495
1496
1497
1498 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1499 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1500
1501 if (pio)
1502 pio_mask |= (1 << 5);
1503 if (pio > 1)
1504 pio_mask |= (1 << 6);
1505 if (dma)
1506 mwdma_mask |= (1 << 3);
1507 if (dma > 1)
1508 mwdma_mask |= (1 << 4);
1509 }
1510
1511 udma_mask = 0;
1512 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1513 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1514
1515 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1516}
1517
1518static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1519{
1520 struct completion *waiting = qc->private_data;
1521
1522 complete(waiting);
1523}
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547unsigned ata_exec_internal_sg(struct ata_device *dev,
1548 struct ata_taskfile *tf, const u8 *cdb,
1549 int dma_dir, struct scatterlist *sgl,
1550 unsigned int n_elem, unsigned long timeout)
1551{
1552 struct ata_link *link = dev->link;
1553 struct ata_port *ap = link->ap;
1554 u8 command = tf->command;
1555 int auto_timeout = 0;
1556 struct ata_queued_cmd *qc;
1557 unsigned int preempted_tag;
1558 u32 preempted_sactive;
1559 u64 preempted_qc_active;
1560 int preempted_nr_active_links;
1561 DECLARE_COMPLETION_ONSTACK(wait);
1562 unsigned long flags;
1563 unsigned int err_mask;
1564 int rc;
1565
1566 spin_lock_irqsave(ap->lock, flags);
1567
1568
1569 if (ap->pflags & ATA_PFLAG_FROZEN) {
1570 spin_unlock_irqrestore(ap->lock, flags);
1571 return AC_ERR_SYSTEM;
1572 }
1573
1574
1575 qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1576
1577 qc->tag = ATA_TAG_INTERNAL;
1578 qc->hw_tag = 0;
1579 qc->scsicmd = NULL;
1580 qc->ap = ap;
1581 qc->dev = dev;
1582 ata_qc_reinit(qc);
1583
1584 preempted_tag = link->active_tag;
1585 preempted_sactive = link->sactive;
1586 preempted_qc_active = ap->qc_active;
1587 preempted_nr_active_links = ap->nr_active_links;
1588 link->active_tag = ATA_TAG_POISON;
1589 link->sactive = 0;
1590 ap->qc_active = 0;
1591 ap->nr_active_links = 0;
1592
1593
1594 qc->tf = *tf;
1595 if (cdb)
1596 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1597
1598
1599 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1600 dma_dir == DMA_FROM_DEVICE)
1601 qc->tf.feature |= ATAPI_DMADIR;
1602
1603 qc->flags |= ATA_QCFLAG_RESULT_TF;
1604 qc->dma_dir = dma_dir;
1605 if (dma_dir != DMA_NONE) {
1606 unsigned int i, buflen = 0;
1607 struct scatterlist *sg;
1608
1609 for_each_sg(sgl, sg, n_elem, i)
1610 buflen += sg->length;
1611
1612 ata_sg_init(qc, sgl, n_elem);
1613 qc->nbytes = buflen;
1614 }
1615
1616 qc->private_data = &wait;
1617 qc->complete_fn = ata_qc_complete_internal;
1618
1619 ata_qc_issue(qc);
1620
1621 spin_unlock_irqrestore(ap->lock, flags);
1622
1623 if (!timeout) {
1624 if (ata_probe_timeout)
1625 timeout = ata_probe_timeout * 1000;
1626 else {
1627 timeout = ata_internal_cmd_timeout(dev, command);
1628 auto_timeout = 1;
1629 }
1630 }
1631
1632 if (ap->ops->error_handler)
1633 ata_eh_release(ap);
1634
1635 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1636
1637 if (ap->ops->error_handler)
1638 ata_eh_acquire(ap);
1639
1640 ata_sff_flush_pio_task(ap);
1641
1642 if (!rc) {
1643 spin_lock_irqsave(ap->lock, flags);
1644
1645
1646
1647
1648
1649
1650 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1651 qc->err_mask |= AC_ERR_TIMEOUT;
1652
1653 if (ap->ops->error_handler)
1654 ata_port_freeze(ap);
1655 else
1656 ata_qc_complete(qc);
1657
1658 if (ata_msg_warn(ap))
1659 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1660 command);
1661 }
1662
1663 spin_unlock_irqrestore(ap->lock, flags);
1664 }
1665
1666
1667 if (ap->ops->post_internal_cmd)
1668 ap->ops->post_internal_cmd(qc);
1669
1670
1671 if (qc->flags & ATA_QCFLAG_FAILED) {
1672 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1673 qc->err_mask |= AC_ERR_DEV;
1674
1675 if (!qc->err_mask)
1676 qc->err_mask |= AC_ERR_OTHER;
1677
1678 if (qc->err_mask & ~AC_ERR_OTHER)
1679 qc->err_mask &= ~AC_ERR_OTHER;
1680 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1681 qc->result_tf.command |= ATA_SENSE;
1682 }
1683
1684
1685 spin_lock_irqsave(ap->lock, flags);
1686
1687 *tf = qc->result_tf;
1688 err_mask = qc->err_mask;
1689
1690 ata_qc_free(qc);
1691 link->active_tag = preempted_tag;
1692 link->sactive = preempted_sactive;
1693 ap->qc_active = preempted_qc_active;
1694 ap->nr_active_links = preempted_nr_active_links;
1695
1696 spin_unlock_irqrestore(ap->lock, flags);
1697
1698 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1699 ata_internal_cmd_timed_out(dev, command);
1700
1701 return err_mask;
1702}
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723unsigned ata_exec_internal(struct ata_device *dev,
1724 struct ata_taskfile *tf, const u8 *cdb,
1725 int dma_dir, void *buf, unsigned int buflen,
1726 unsigned long timeout)
1727{
1728 struct scatterlist *psg = NULL, sg;
1729 unsigned int n_elem = 0;
1730
1731 if (dma_dir != DMA_NONE) {
1732 WARN_ON(!buf);
1733 sg_init_one(&sg, buf, buflen);
1734 psg = &sg;
1735 n_elem++;
1736 }
1737
1738 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1739 timeout);
1740}
1741
1742
1743
1744
1745
1746
1747
1748
1749unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1750{
1751
1752
1753
1754
1755 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1756 return 0;
1757
1758
1759
1760 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1761 return 0;
1762
1763 if (ata_id_is_cfa(adev->id)
1764 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1765 return 0;
1766
1767 if (adev->pio_mode > XFER_PIO_2)
1768 return 1;
1769
1770 if (ata_id_has_iordy(adev->id))
1771 return 1;
1772 return 0;
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1783{
1784
1785 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
1786 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1787
1788 if (pio) {
1789
1790 if (pio > 240)
1791 return 3 << ATA_SHIFT_PIO;
1792 return 7 << ATA_SHIFT_PIO;
1793 }
1794 }
1795 return 3 << ATA_SHIFT_PIO;
1796}
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808unsigned int ata_do_dev_read_id(struct ata_device *dev,
1809 struct ata_taskfile *tf, u16 *id)
1810{
1811 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1812 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1813}
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1837 unsigned int flags, u16 *id)
1838{
1839 struct ata_port *ap = dev->link->ap;
1840 unsigned int class = *p_class;
1841 struct ata_taskfile tf;
1842 unsigned int err_mask = 0;
1843 const char *reason;
1844 bool is_semb = class == ATA_DEV_SEMB;
1845 int may_fallback = 1, tried_spinup = 0;
1846 int rc;
1847
1848 if (ata_msg_ctl(ap))
1849 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1850
1851retry:
1852 ata_tf_init(dev, &tf);
1853
1854 switch (class) {
1855 case ATA_DEV_SEMB:
1856 class = ATA_DEV_ATA;
1857
1858 case ATA_DEV_ATA:
1859 case ATA_DEV_ZAC:
1860 tf.command = ATA_CMD_ID_ATA;
1861 break;
1862 case ATA_DEV_ATAPI:
1863 tf.command = ATA_CMD_ID_ATAPI;
1864 break;
1865 default:
1866 rc = -ENODEV;
1867 reason = "unsupported class";
1868 goto err_out;
1869 }
1870
1871 tf.protocol = ATA_PROT_PIO;
1872
1873
1874
1875
1876 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1877
1878
1879
1880
1881 tf.flags |= ATA_TFLAG_POLLING;
1882
1883 if (ap->ops->read_id)
1884 err_mask = ap->ops->read_id(dev, &tf, id);
1885 else
1886 err_mask = ata_do_dev_read_id(dev, &tf, id);
1887
1888 if (err_mask) {
1889 if (err_mask & AC_ERR_NODEV_HINT) {
1890 ata_dev_dbg(dev, "NODEV after polling detection\n");
1891 return -ENOENT;
1892 }
1893
1894 if (is_semb) {
1895 ata_dev_info(dev,
1896 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1897
1898 *p_class = ATA_DEV_SEMB_UNSUP;
1899 return 0;
1900 }
1901
1902 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1903
1904
1905
1906
1907
1908 if (may_fallback) {
1909 may_fallback = 0;
1910
1911 if (class == ATA_DEV_ATA)
1912 class = ATA_DEV_ATAPI;
1913 else
1914 class = ATA_DEV_ATA;
1915 goto retry;
1916 }
1917
1918
1919
1920
1921
1922 ata_dev_dbg(dev,
1923 "both IDENTIFYs aborted, assuming NODEV\n");
1924 return -ENOENT;
1925 }
1926
1927 rc = -EIO;
1928 reason = "I/O error";
1929 goto err_out;
1930 }
1931
1932 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1933 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1934 "class=%d may_fallback=%d tried_spinup=%d\n",
1935 class, may_fallback, tried_spinup);
1936 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1937 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1938 }
1939
1940
1941
1942
1943 may_fallback = 0;
1944
1945 swap_buf_le16(id, ATA_ID_WORDS);
1946
1947
1948 rc = -EINVAL;
1949 reason = "device reports invalid type";
1950
1951 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1952 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1953 goto err_out;
1954 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1955 ata_id_is_ata(id)) {
1956 ata_dev_dbg(dev,
1957 "host indicates ignore ATA devices, ignored\n");
1958 return -ENOENT;
1959 }
1960 } else {
1961 if (ata_id_is_ata(id))
1962 goto err_out;
1963 }
1964
1965 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1966 tried_spinup = 1;
1967
1968
1969
1970
1971
1972 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1973 if (err_mask && id[2] != 0x738c) {
1974 rc = -EIO;
1975 reason = "SPINUP failed";
1976 goto err_out;
1977 }
1978
1979
1980
1981
1982 if (id[2] == 0x37c8)
1983 goto retry;
1984 }
1985
1986 if ((flags & ATA_READID_POSTRESET) &&
1987 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2000 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2001 if (err_mask) {
2002 rc = -EIO;
2003 reason = "INIT_DEV_PARAMS failed";
2004 goto err_out;
2005 }
2006
2007
2008
2009
2010 flags &= ~ATA_READID_POSTRESET;
2011 goto retry;
2012 }
2013 }
2014
2015 *p_class = class;
2016
2017 return 0;
2018
2019 err_out:
2020 if (ata_msg_warn(ap))
2021 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2022 reason, err_mask);
2023 return rc;
2024}
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
2043 u8 page, void *buf, unsigned int sectors)
2044{
2045 unsigned long ap_flags = dev->link->ap->flags;
2046 struct ata_taskfile tf;
2047 unsigned int err_mask;
2048 bool dma = false;
2049
2050 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
2051
2052
2053
2054
2055
2056 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
2057 return AC_ERR_DEV;
2058
2059retry:
2060 ata_tf_init(dev, &tf);
2061 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
2062 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
2063 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2064 tf.protocol = ATA_PROT_DMA;
2065 dma = true;
2066 } else {
2067 tf.command = ATA_CMD_READ_LOG_EXT;
2068 tf.protocol = ATA_PROT_PIO;
2069 dma = false;
2070 }
2071 tf.lbal = log;
2072 tf.lbam = page;
2073 tf.nsect = sectors;
2074 tf.hob_nsect = sectors >> 8;
2075 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2076
2077 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2078 buf, sectors * ATA_SECT_SIZE, 0);
2079
2080 if (err_mask && dma) {
2081 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2082 ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
2083 goto retry;
2084 }
2085
2086 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2087 return err_mask;
2088}
2089
2090static bool ata_log_supported(struct ata_device *dev, u8 log)
2091{
2092 struct ata_port *ap = dev->link->ap;
2093
2094 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2095 return false;
2096 return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
2097}
2098
2099static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2100{
2101 struct ata_port *ap = dev->link->ap;
2102 unsigned int err, i;
2103
2104 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2105 ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
2106 return false;
2107 }
2108
2109
2110
2111
2112
2113 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2114 1);
2115 if (err) {
2116 ata_dev_info(dev,
2117 "failed to get Device Identify Log Emask 0x%x\n",
2118 err);
2119 return false;
2120 }
2121
2122 for (i = 0; i < ap->sector_buf[8]; i++) {
2123 if (ap->sector_buf[9 + i] == page)
2124 return true;
2125 }
2126
2127 return false;
2128}
2129
2130static int ata_do_link_spd_horkage(struct ata_device *dev)
2131{
2132 struct ata_link *plink = ata_dev_phys_link(dev);
2133 u32 target, target_limit;
2134
2135 if (!sata_scr_valid(plink))
2136 return 0;
2137
2138 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2139 target = 1;
2140 else
2141 return 0;
2142
2143 target_limit = (1 << target) - 1;
2144
2145
2146 if (plink->sata_spd_limit <= target_limit)
2147 return 0;
2148
2149 plink->sata_spd_limit = target_limit;
2150
2151
2152
2153
2154
2155 if (plink->sata_spd > target) {
2156 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2157 sata_spd_string(target));
2158 return -EAGAIN;
2159 }
2160 return 0;
2161}
2162
2163static inline u8 ata_dev_knobble(struct ata_device *dev)
2164{
2165 struct ata_port *ap = dev->link->ap;
2166
2167 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2168 return 0;
2169
2170 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2171}
2172
2173static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2174{
2175 struct ata_port *ap = dev->link->ap;
2176 unsigned int err_mask;
2177
2178 if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2179 ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2180 return;
2181 }
2182 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2183 0, ap->sector_buf, 1);
2184 if (err_mask) {
2185 ata_dev_dbg(dev,
2186 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2187 err_mask);
2188 } else {
2189 u8 *cmds = dev->ncq_send_recv_cmds;
2190
2191 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2192 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2193
2194 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2195 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2196 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2197 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2198 }
2199 }
2200}
2201
2202static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2203{
2204 struct ata_port *ap = dev->link->ap;
2205 unsigned int err_mask;
2206
2207 if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2208 ata_dev_warn(dev,
2209 "NCQ Send/Recv Log not supported\n");
2210 return;
2211 }
2212 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2213 0, ap->sector_buf, 1);
2214 if (err_mask) {
2215 ata_dev_dbg(dev,
2216 "failed to get NCQ Non-Data Log Emask 0x%x\n",
2217 err_mask);
2218 } else {
2219 u8 *cmds = dev->ncq_non_data_cmds;
2220
2221 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2222 }
2223}
2224
2225static void ata_dev_config_ncq_prio(struct ata_device *dev)
2226{
2227 struct ata_port *ap = dev->link->ap;
2228 unsigned int err_mask;
2229
2230 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
2231 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2232 return;
2233 }
2234
2235 err_mask = ata_read_log_page(dev,
2236 ATA_LOG_IDENTIFY_DEVICE,
2237 ATA_LOG_SATA_SETTINGS,
2238 ap->sector_buf,
2239 1);
2240 if (err_mask) {
2241 ata_dev_dbg(dev,
2242 "failed to get Identify Device data, Emask 0x%x\n",
2243 err_mask);
2244 return;
2245 }
2246
2247 if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
2248 dev->flags |= ATA_DFLAG_NCQ_PRIO;
2249 } else {
2250 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2251 ata_dev_dbg(dev, "SATA page does not support priority\n");
2252 }
2253
2254}
2255
2256static int ata_dev_config_ncq(struct ata_device *dev,
2257 char *desc, size_t desc_sz)
2258{
2259 struct ata_port *ap = dev->link->ap;
2260 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2261 unsigned int err_mask;
2262 char *aa_desc = "";
2263
2264 if (!ata_id_has_ncq(dev->id)) {
2265 desc[0] = '\0';
2266 return 0;
2267 }
2268 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2269 snprintf(desc, desc_sz, "NCQ (not used)");
2270 return 0;
2271 }
2272 if (ap->flags & ATA_FLAG_NCQ) {
2273 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
2274 dev->flags |= ATA_DFLAG_NCQ;
2275 }
2276
2277 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2278 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2279 ata_id_has_fpdma_aa(dev->id)) {
2280 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2281 SATA_FPDMA_AA);
2282 if (err_mask) {
2283 ata_dev_err(dev,
2284 "failed to enable AA (error_mask=0x%x)\n",
2285 err_mask);
2286 if (err_mask != AC_ERR_DEV) {
2287 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2288 return -EIO;
2289 }
2290 } else
2291 aa_desc = ", AA";
2292 }
2293
2294 if (hdepth >= ddepth)
2295 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2296 else
2297 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2298 ddepth, aa_desc);
2299
2300 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2301 if (ata_id_has_ncq_send_and_recv(dev->id))
2302 ata_dev_config_ncq_send_recv(dev);
2303 if (ata_id_has_ncq_non_data(dev->id))
2304 ata_dev_config_ncq_non_data(dev);
2305 if (ata_id_has_ncq_prio(dev->id))
2306 ata_dev_config_ncq_prio(dev);
2307 }
2308
2309 return 0;
2310}
2311
2312static void ata_dev_config_sense_reporting(struct ata_device *dev)
2313{
2314 unsigned int err_mask;
2315
2316 if (!ata_id_has_sense_reporting(dev->id))
2317 return;
2318
2319 if (ata_id_sense_reporting_enabled(dev->id))
2320 return;
2321
2322 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2323 if (err_mask) {
2324 ata_dev_dbg(dev,
2325 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2326 err_mask);
2327 }
2328}
2329
2330static void ata_dev_config_zac(struct ata_device *dev)
2331{
2332 struct ata_port *ap = dev->link->ap;
2333 unsigned int err_mask;
2334 u8 *identify_buf = ap->sector_buf;
2335
2336 dev->zac_zones_optimal_open = U32_MAX;
2337 dev->zac_zones_optimal_nonseq = U32_MAX;
2338 dev->zac_zones_max_open = U32_MAX;
2339
2340
2341
2342
2343 if (dev->class == ATA_DEV_ZAC)
2344 dev->flags |= ATA_DFLAG_ZAC;
2345 else if (ata_id_zoned_cap(dev->id) == 0x01)
2346
2347
2348
2349 dev->flags |= ATA_DFLAG_ZAC;
2350
2351 if (!(dev->flags & ATA_DFLAG_ZAC))
2352 return;
2353
2354 if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2355 ata_dev_warn(dev,
2356 "ATA Zoned Information Log not supported\n");
2357 return;
2358 }
2359
2360
2361
2362
2363 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2364 ATA_LOG_ZONED_INFORMATION,
2365 identify_buf, 1);
2366 if (!err_mask) {
2367 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2368
2369 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2370 if ((zoned_cap >> 63))
2371 dev->zac_zoned_cap = (zoned_cap & 1);
2372 opt_open = get_unaligned_le64(&identify_buf[24]);
2373 if ((opt_open >> 63))
2374 dev->zac_zones_optimal_open = (u32)opt_open;
2375 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2376 if ((opt_nonseq >> 63))
2377 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2378 max_open = get_unaligned_le64(&identify_buf[40]);
2379 if ((max_open >> 63))
2380 dev->zac_zones_max_open = (u32)max_open;
2381 }
2382}
2383
2384static void ata_dev_config_trusted(struct ata_device *dev)
2385{
2386 struct ata_port *ap = dev->link->ap;
2387 u64 trusted_cap;
2388 unsigned int err;
2389
2390 if (!ata_id_has_trusted(dev->id))
2391 return;
2392
2393 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2394 ata_dev_warn(dev,
2395 "Security Log not supported\n");
2396 return;
2397 }
2398
2399 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2400 ap->sector_buf, 1);
2401 if (err) {
2402 ata_dev_dbg(dev,
2403 "failed to read Security Log, Emask 0x%x\n", err);
2404 return;
2405 }
2406
2407 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2408 if (!(trusted_cap & (1ULL << 63))) {
2409 ata_dev_dbg(dev,
2410 "Trusted Computing capability qword not valid!\n");
2411 return;
2412 }
2413
2414 if (trusted_cap & (1 << 0))
2415 dev->flags |= ATA_DFLAG_TRUSTED;
2416}
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431int ata_dev_configure(struct ata_device *dev)
2432{
2433 struct ata_port *ap = dev->link->ap;
2434 struct ata_eh_context *ehc = &dev->link->eh_context;
2435 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2436 const u16 *id = dev->id;
2437 unsigned long xfer_mask;
2438 unsigned int err_mask;
2439 char revbuf[7];
2440 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2441 char modelbuf[ATA_ID_PROD_LEN+1];
2442 int rc;
2443
2444 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2445 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2446 return 0;
2447 }
2448
2449 if (ata_msg_probe(ap))
2450 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2451
2452
2453 dev->horkage |= ata_dev_blacklisted(dev);
2454 ata_force_horkage(dev);
2455
2456 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2457 ata_dev_info(dev, "unsupported device, disabling\n");
2458 ata_dev_disable(dev);
2459 return 0;
2460 }
2461
2462 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2463 dev->class == ATA_DEV_ATAPI) {
2464 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2465 atapi_enabled ? "not supported with this driver"
2466 : "disabled");
2467 ata_dev_disable(dev);
2468 return 0;
2469 }
2470
2471 rc = ata_do_link_spd_horkage(dev);
2472 if (rc)
2473 return rc;
2474
2475
2476 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2477 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2478 dev->horkage |= ATA_HORKAGE_NOLPM;
2479
2480 if (ap->flags & ATA_FLAG_NO_LPM)
2481 dev->horkage |= ATA_HORKAGE_NOLPM;
2482
2483 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2484 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2485 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2486 }
2487
2488
2489 rc = ata_acpi_on_devcfg(dev);
2490 if (rc)
2491 return rc;
2492
2493
2494 rc = ata_hpa_resize(dev);
2495 if (rc)
2496 return rc;
2497
2498
2499 if (ata_msg_probe(ap))
2500 ata_dev_dbg(dev,
2501 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2502 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2503 __func__,
2504 id[49], id[82], id[83], id[84],
2505 id[85], id[86], id[87], id[88]);
2506
2507
2508 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2509 dev->max_sectors = 0;
2510 dev->cdb_len = 0;
2511 dev->n_sectors = 0;
2512 dev->cylinders = 0;
2513 dev->heads = 0;
2514 dev->sectors = 0;
2515 dev->multi_count = 0;
2516
2517
2518
2519
2520
2521
2522 xfer_mask = ata_id_xfermask(id);
2523
2524 if (ata_msg_probe(ap))
2525 ata_dump_id(id);
2526
2527
2528 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2529 sizeof(fwrevbuf));
2530
2531 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2532 sizeof(modelbuf));
2533
2534
2535 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2536 if (ata_id_is_cfa(id)) {
2537
2538 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2539 ata_dev_warn(dev,
2540 "supports DRM functions and may not be fully accessible\n");
2541 snprintf(revbuf, 7, "CFA");
2542 } else {
2543 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2544
2545 if (ata_id_has_tpm(id))
2546 ata_dev_warn(dev,
2547 "supports DRM functions and may not be fully accessible\n");
2548 }
2549
2550 dev->n_sectors = ata_id_n_sectors(id);
2551
2552
2553 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2554 unsigned int max = dev->id[47] & 0xff;
2555 unsigned int cnt = dev->id[59] & 0xff;
2556
2557 if (is_power_of_2(max) && is_power_of_2(cnt))
2558 if (cnt <= max)
2559 dev->multi_count = cnt;
2560 }
2561
2562 if (ata_id_has_lba(id)) {
2563 const char *lba_desc;
2564 char ncq_desc[24];
2565
2566 lba_desc = "LBA";
2567 dev->flags |= ATA_DFLAG_LBA;
2568 if (ata_id_has_lba48(id)) {
2569 dev->flags |= ATA_DFLAG_LBA48;
2570 lba_desc = "LBA48";
2571
2572 if (dev->n_sectors >= (1UL << 28) &&
2573 ata_id_has_flush_ext(id))
2574 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2575 }
2576
2577
2578 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2579 if (rc)
2580 return rc;
2581
2582
2583 if (ata_msg_drv(ap) && print_info) {
2584 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2585 revbuf, modelbuf, fwrevbuf,
2586 ata_mode_string(xfer_mask));
2587 ata_dev_info(dev,
2588 "%llu sectors, multi %u: %s %s\n",
2589 (unsigned long long)dev->n_sectors,
2590 dev->multi_count, lba_desc, ncq_desc);
2591 }
2592 } else {
2593
2594
2595
2596 dev->cylinders = id[1];
2597 dev->heads = id[3];
2598 dev->sectors = id[6];
2599
2600 if (ata_id_current_chs_valid(id)) {
2601
2602 dev->cylinders = id[54];
2603 dev->heads = id[55];
2604 dev->sectors = id[56];
2605 }
2606
2607
2608 if (ata_msg_drv(ap) && print_info) {
2609 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2610 revbuf, modelbuf, fwrevbuf,
2611 ata_mode_string(xfer_mask));
2612 ata_dev_info(dev,
2613 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2614 (unsigned long long)dev->n_sectors,
2615 dev->multi_count, dev->cylinders,
2616 dev->heads, dev->sectors);
2617 }
2618 }
2619
2620
2621
2622
2623 if (ata_id_has_devslp(dev->id)) {
2624 u8 *sata_setting = ap->sector_buf;
2625 int i, j;
2626
2627 dev->flags |= ATA_DFLAG_DEVSLP;
2628 err_mask = ata_read_log_page(dev,
2629 ATA_LOG_IDENTIFY_DEVICE,
2630 ATA_LOG_SATA_SETTINGS,
2631 sata_setting,
2632 1);
2633 if (err_mask)
2634 ata_dev_dbg(dev,
2635 "failed to get Identify Device Data, Emask 0x%x\n",
2636 err_mask);
2637 else
2638 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2639 j = ATA_LOG_DEVSLP_OFFSET + i;
2640 dev->devslp_timing[i] = sata_setting[j];
2641 }
2642 }
2643 ata_dev_config_sense_reporting(dev);
2644 ata_dev_config_zac(dev);
2645 ata_dev_config_trusted(dev);
2646 dev->cdb_len = 32;
2647 }
2648
2649
2650 else if (dev->class == ATA_DEV_ATAPI) {
2651 const char *cdb_intr_string = "";
2652 const char *atapi_an_string = "";
2653 const char *dma_dir_string = "";
2654 u32 sntf;
2655
2656 rc = atapi_cdb_len(id);
2657 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2658 if (ata_msg_warn(ap))
2659 ata_dev_warn(dev, "unsupported CDB len\n");
2660 rc = -EINVAL;
2661 goto err_out_nosup;
2662 }
2663 dev->cdb_len = (unsigned int) rc;
2664
2665
2666
2667
2668
2669
2670 if (atapi_an &&
2671 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2672 (!sata_pmp_attached(ap) ||
2673 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2674
2675 err_mask = ata_dev_set_feature(dev,
2676 SETFEATURES_SATA_ENABLE, SATA_AN);
2677 if (err_mask)
2678 ata_dev_err(dev,
2679 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2680 err_mask);
2681 else {
2682 dev->flags |= ATA_DFLAG_AN;
2683 atapi_an_string = ", ATAPI AN";
2684 }
2685 }
2686
2687 if (ata_id_cdb_intr(dev->id)) {
2688 dev->flags |= ATA_DFLAG_CDB_INTR;
2689 cdb_intr_string = ", CDB intr";
2690 }
2691
2692 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2693 dev->flags |= ATA_DFLAG_DMADIR;
2694 dma_dir_string = ", DMADIR";
2695 }
2696
2697 if (ata_id_has_da(dev->id)) {
2698 dev->flags |= ATA_DFLAG_DA;
2699 zpodd_init(dev);
2700 }
2701
2702
2703 if (ata_msg_drv(ap) && print_info)
2704 ata_dev_info(dev,
2705 "ATAPI: %s, %s, max %s%s%s%s\n",
2706 modelbuf, fwrevbuf,
2707 ata_mode_string(xfer_mask),
2708 cdb_intr_string, atapi_an_string,
2709 dma_dir_string);
2710 }
2711
2712
2713 dev->max_sectors = ATA_MAX_SECTORS;
2714 if (dev->flags & ATA_DFLAG_LBA48)
2715 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2716
2717
2718
2719 if (ata_dev_knobble(dev)) {
2720 if (ata_msg_drv(ap) && print_info)
2721 ata_dev_info(dev, "applying bridge limits\n");
2722 dev->udma_mask &= ATA_UDMA5;
2723 dev->max_sectors = ATA_MAX_SECTORS;
2724 }
2725
2726 if ((dev->class == ATA_DEV_ATAPI) &&
2727 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2728 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2729 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2730 }
2731
2732 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2733 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2734 dev->max_sectors);
2735
2736 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2737 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2738 dev->max_sectors);
2739
2740 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2741 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2742
2743 if (ap->ops->dev_config)
2744 ap->ops->dev_config(dev);
2745
2746 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2747
2748
2749
2750
2751
2752
2753 if (print_info) {
2754 ata_dev_warn(dev,
2755"Drive reports diagnostics failure. This may indicate a drive\n");
2756 ata_dev_warn(dev,
2757"fault or invalid emulation. Contact drive vendor for information.\n");
2758 }
2759 }
2760
2761 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2762 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2763 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2764 }
2765
2766 return 0;
2767
2768err_out_nosup:
2769 if (ata_msg_probe(ap))
2770 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2771 return rc;
2772}
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782int ata_cable_40wire(struct ata_port *ap)
2783{
2784 return ATA_CBL_PATA40;
2785}
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795int ata_cable_80wire(struct ata_port *ap)
2796{
2797 return ATA_CBL_PATA80;
2798}
2799
2800
2801
2802
2803
2804
2805
2806
2807int ata_cable_unknown(struct ata_port *ap)
2808{
2809 return ATA_CBL_PATA_UNK;
2810}
2811
2812
2813
2814
2815
2816
2817
2818
2819int ata_cable_ignore(struct ata_port *ap)
2820{
2821 return ATA_CBL_PATA_IGN;
2822}
2823
2824
2825
2826
2827
2828
2829
2830
2831int ata_cable_sata(struct ata_port *ap)
2832{
2833 return ATA_CBL_SATA;
2834}
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851int ata_bus_probe(struct ata_port *ap)
2852{
2853 unsigned int classes[ATA_MAX_DEVICES];
2854 int tries[ATA_MAX_DEVICES];
2855 int rc;
2856 struct ata_device *dev;
2857
2858 ata_for_each_dev(dev, &ap->link, ALL)
2859 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2860
2861 retry:
2862 ata_for_each_dev(dev, &ap->link, ALL) {
2863
2864
2865
2866
2867
2868
2869
2870 dev->pio_mode = XFER_PIO_0;
2871 dev->dma_mode = 0xff;
2872
2873
2874
2875
2876
2877
2878 if (ap->ops->set_piomode)
2879 ap->ops->set_piomode(ap, dev);
2880 }
2881
2882
2883 ap->ops->phy_reset(ap);
2884
2885 ata_for_each_dev(dev, &ap->link, ALL) {
2886 if (dev->class != ATA_DEV_UNKNOWN)
2887 classes[dev->devno] = dev->class;
2888 else
2889 classes[dev->devno] = ATA_DEV_NONE;
2890
2891 dev->class = ATA_DEV_UNKNOWN;
2892 }
2893
2894
2895
2896
2897
2898 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2899 if (tries[dev->devno])
2900 dev->class = classes[dev->devno];
2901
2902 if (!ata_dev_enabled(dev))
2903 continue;
2904
2905 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2906 dev->id);
2907 if (rc)
2908 goto fail;
2909 }
2910
2911
2912 if (ap->ops->cable_detect)
2913 ap->cbl = ap->ops->cable_detect(ap);
2914
2915
2916
2917
2918
2919
2920 ata_for_each_dev(dev, &ap->link, ENABLED)
2921 if (ata_id_is_sata(dev->id))
2922 ap->cbl = ATA_CBL_SATA;
2923
2924
2925
2926
2927 ata_for_each_dev(dev, &ap->link, ENABLED) {
2928 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2929 rc = ata_dev_configure(dev);
2930 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2931 if (rc)
2932 goto fail;
2933 }
2934
2935
2936 rc = ata_set_mode(&ap->link, &dev);
2937 if (rc)
2938 goto fail;
2939
2940 ata_for_each_dev(dev, &ap->link, ENABLED)
2941 return 0;
2942
2943 return -ENODEV;
2944
2945 fail:
2946 tries[dev->devno]--;
2947
2948 switch (rc) {
2949 case -EINVAL:
2950
2951 tries[dev->devno] = 0;
2952 break;
2953
2954 case -ENODEV:
2955
2956 tries[dev->devno] = min(tries[dev->devno], 1);
2957
2958 case -EIO:
2959 if (tries[dev->devno] == 1) {
2960
2961
2962
2963 sata_down_spd_limit(&ap->link, 0);
2964 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2965 }
2966 }
2967
2968 if (!tries[dev->devno])
2969 ata_dev_disable(dev);
2970
2971 goto retry;
2972}
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983static void sata_print_link_status(struct ata_link *link)
2984{
2985 u32 sstatus, scontrol, tmp;
2986
2987 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2988 return;
2989 sata_scr_read(link, SCR_CONTROL, &scontrol);
2990
2991 if (ata_phys_link_online(link)) {
2992 tmp = (sstatus >> 4) & 0xf;
2993 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2994 sata_spd_string(tmp), sstatus, scontrol);
2995 } else {
2996 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2997 sstatus, scontrol);
2998 }
2999}
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009struct ata_device *ata_dev_pair(struct ata_device *adev)
3010{
3011 struct ata_link *link = adev->link;
3012 struct ata_device *pair = &link->device[1 - adev->devno];
3013 if (!ata_dev_enabled(pair))
3014 return NULL;
3015 return pair;
3016}
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3039{
3040 u32 sstatus, spd, mask;
3041 int rc, bit;
3042
3043 if (!sata_scr_valid(link))
3044 return -EOPNOTSUPP;
3045
3046
3047
3048
3049 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3050 if (rc == 0 && ata_sstatus_online(sstatus))
3051 spd = (sstatus >> 4) & 0xf;
3052 else
3053 spd = link->sata_spd;
3054
3055 mask = link->sata_spd_limit;
3056 if (mask <= 1)
3057 return -EINVAL;
3058
3059
3060 bit = fls(mask) - 1;
3061 mask &= ~(1 << bit);
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072 if (spd > 1)
3073 mask &= (1 << (spd - 1)) - 1;
3074 else
3075 return -EINVAL;
3076
3077
3078 if (!mask)
3079 return -EINVAL;
3080
3081 if (spd_limit) {
3082 if (mask & ((1 << spd_limit) - 1))
3083 mask &= (1 << spd_limit) - 1;
3084 else {
3085 bit = ffs(mask) - 1;
3086 mask = 1 << bit;
3087 }
3088 }
3089
3090 link->sata_spd_limit = mask;
3091
3092 ata_link_warn(link, "limiting SATA link speed to %s\n",
3093 sata_spd_string(fls(mask)));
3094
3095 return 0;
3096}
3097
3098static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
3099{
3100 struct ata_link *host_link = &link->ap->link;
3101 u32 limit, target, spd;
3102
3103 limit = link->sata_spd_limit;
3104
3105
3106
3107
3108
3109 if (!ata_is_host_link(link) && host_link->sata_spd)
3110 limit &= (1 << host_link->sata_spd) - 1;
3111
3112 if (limit == UINT_MAX)
3113 target = 0;
3114 else
3115 target = fls(limit);
3116
3117 spd = (*scontrol >> 4) & 0xf;
3118 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3119
3120 return spd != target;
3121}
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138static int sata_set_spd_needed(struct ata_link *link)
3139{
3140 u32 scontrol;
3141
3142 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3143 return 1;
3144
3145 return __sata_set_spd_needed(link, &scontrol);
3146}
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161int sata_set_spd(struct ata_link *link)
3162{
3163 u32 scontrol;
3164 int rc;
3165
3166 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3167 return rc;
3168
3169 if (!__sata_set_spd_needed(link, &scontrol))
3170 return 0;
3171
3172 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3173 return rc;
3174
3175 return 1;
3176}
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190static const struct ata_timing ata_timing[] = {
3191
3192 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
3193 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
3194 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
3195 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
3196 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
3197 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
3198 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
3199
3200 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
3201 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
3202 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
3203
3204 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
3205 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
3206 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
3207 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
3208 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
3209
3210
3211 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
3212 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
3213 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
3214 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
3215 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
3216 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
3217 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
3218
3219 { 0xFF }
3220};
3221
3222#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3223#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0)
3224
3225static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3226{
3227 q->setup = EZ(t->setup, T);
3228 q->act8b = EZ(t->act8b, T);
3229 q->rec8b = EZ(t->rec8b, T);
3230 q->cyc8b = EZ(t->cyc8b, T);
3231 q->active = EZ(t->active, T);
3232 q->recover = EZ(t->recover, T);
3233 q->dmack_hold = EZ(t->dmack_hold, T);
3234 q->cycle = EZ(t->cycle, T);
3235 q->udma = EZ(t->udma, UT);
3236}
3237
3238void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3239 struct ata_timing *m, unsigned int what)
3240{
3241 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3242 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3243 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3244 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3245 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3246 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3247 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3248 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3249 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3250}
3251
3252const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3253{
3254 const struct ata_timing *t = ata_timing;
3255
3256 while (xfer_mode > t->mode)
3257 t++;
3258
3259 if (xfer_mode == t->mode)
3260 return t;
3261
3262 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3263 __func__, xfer_mode);
3264
3265 return NULL;
3266}
3267
3268int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3269 struct ata_timing *t, int T, int UT)
3270{
3271 const u16 *id = adev->id;
3272 const struct ata_timing *s;
3273 struct ata_timing p;
3274
3275
3276
3277
3278
3279 if (!(s = ata_timing_find_mode(speed)))
3280 return -EINVAL;
3281
3282 memcpy(t, s, sizeof(*s));
3283
3284
3285
3286
3287
3288
3289 if (id[ATA_ID_FIELD_VALID] & 2) {
3290 memset(&p, 0, sizeof(p));
3291
3292 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3293 if (speed <= XFER_PIO_2)
3294 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3295 else if ((speed <= XFER_PIO_4) ||
3296 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3297 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3298 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3299 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3300
3301 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3302 }
3303
3304
3305
3306
3307
3308 ata_timing_quantize(t, t, T, UT);
3309
3310
3311
3312
3313
3314
3315
3316 if (speed > XFER_PIO_6) {
3317 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3318 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3319 }
3320
3321
3322
3323
3324
3325 if (t->act8b + t->rec8b < t->cyc8b) {
3326 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3327 t->rec8b = t->cyc8b - t->act8b;
3328 }
3329
3330 if (t->active + t->recover < t->cycle) {
3331 t->active += (t->cycle - (t->active + t->recover)) / 2;
3332 t->recover = t->cycle - t->active;
3333 }
3334
3335
3336
3337
3338 if (t->active + t->recover > t->cycle)
3339 t->cycle = t->active + t->recover;
3340
3341 return 0;
3342}
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3361{
3362 u8 base_mode = 0xff, last_mode = 0xff;
3363 const struct ata_xfer_ent *ent;
3364 const struct ata_timing *t;
3365
3366 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3367 if (ent->shift == xfer_shift)
3368 base_mode = ent->base;
3369
3370 for (t = ata_timing_find_mode(base_mode);
3371 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3372 unsigned short this_cycle;
3373
3374 switch (xfer_shift) {
3375 case ATA_SHIFT_PIO:
3376 case ATA_SHIFT_MWDMA:
3377 this_cycle = t->cycle;
3378 break;
3379 case ATA_SHIFT_UDMA:
3380 this_cycle = t->udma;
3381 break;
3382 default:
3383 return 0xff;
3384 }
3385
3386 if (cycle > this_cycle)
3387 break;
3388
3389 last_mode = t->mode;
3390 }
3391
3392 return last_mode;
3393}
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3411{
3412 char buf[32];
3413 unsigned long orig_mask, xfer_mask;
3414 unsigned long pio_mask, mwdma_mask, udma_mask;
3415 int quiet, highbit;
3416
3417 quiet = !!(sel & ATA_DNXFER_QUIET);
3418 sel &= ~ATA_DNXFER_QUIET;
3419
3420 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3421 dev->mwdma_mask,
3422 dev->udma_mask);
3423 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3424
3425 switch (sel) {
3426 case ATA_DNXFER_PIO:
3427 highbit = fls(pio_mask) - 1;
3428 pio_mask &= ~(1 << highbit);
3429 break;
3430
3431 case ATA_DNXFER_DMA:
3432 if (udma_mask) {
3433 highbit = fls(udma_mask) - 1;
3434 udma_mask &= ~(1 << highbit);
3435 if (!udma_mask)
3436 return -ENOENT;
3437 } else if (mwdma_mask) {
3438 highbit = fls(mwdma_mask) - 1;
3439 mwdma_mask &= ~(1 << highbit);
3440 if (!mwdma_mask)
3441 return -ENOENT;
3442 }
3443 break;
3444
3445 case ATA_DNXFER_40C:
3446 udma_mask &= ATA_UDMA_MASK_40C;
3447 break;
3448
3449 case ATA_DNXFER_FORCE_PIO0:
3450 pio_mask &= 1;
3451
3452 case ATA_DNXFER_FORCE_PIO:
3453 mwdma_mask = 0;
3454 udma_mask = 0;
3455 break;
3456
3457 default:
3458 BUG();
3459 }
3460
3461 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3462
3463 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3464 return -ENOENT;
3465
3466 if (!quiet) {
3467 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3468 snprintf(buf, sizeof(buf), "%s:%s",
3469 ata_mode_string(xfer_mask),
3470 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3471 else
3472 snprintf(buf, sizeof(buf), "%s",
3473 ata_mode_string(xfer_mask));
3474
3475 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3476 }
3477
3478 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3479 &dev->udma_mask);
3480
3481 return 0;
3482}
3483
3484static int ata_dev_set_mode(struct ata_device *dev)
3485{
3486 struct ata_port *ap = dev->link->ap;
3487 struct ata_eh_context *ehc = &dev->link->eh_context;
3488 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3489 const char *dev_err_whine = "";
3490 int ign_dev_err = 0;
3491 unsigned int err_mask = 0;
3492 int rc;
3493
3494 dev->flags &= ~ATA_DFLAG_PIO;
3495 if (dev->xfer_shift == ATA_SHIFT_PIO)
3496 dev->flags |= ATA_DFLAG_PIO;
3497
3498 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3499 dev_err_whine = " (SET_XFERMODE skipped)";
3500 else {
3501 if (nosetxfer)
3502 ata_dev_warn(dev,
3503 "NOSETXFER but PATA detected - can't "
3504 "skip SETXFER, might malfunction\n");
3505 err_mask = ata_dev_set_xfermode(dev);
3506 }
3507
3508 if (err_mask & ~AC_ERR_DEV)
3509 goto fail;
3510
3511
3512 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3513 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3514 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3515 if (rc)
3516 return rc;
3517
3518 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3519
3520 if (ata_id_is_cfa(dev->id))
3521 ign_dev_err = 1;
3522
3523
3524 if (ata_id_major_version(dev->id) == 0 &&
3525 dev->pio_mode <= XFER_PIO_2)
3526 ign_dev_err = 1;
3527
3528
3529
3530 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3531 ign_dev_err = 1;
3532 }
3533
3534
3535 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3536 dev->dma_mode == XFER_MW_DMA_0 &&
3537 (dev->id[63] >> 8) & 1)
3538 ign_dev_err = 1;
3539
3540
3541 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3542 ign_dev_err = 1;
3543
3544 if (err_mask & AC_ERR_DEV) {
3545 if (!ign_dev_err)
3546 goto fail;
3547 else
3548 dev_err_whine = " (device error ignored)";
3549 }
3550
3551 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3552 dev->xfer_shift, (int)dev->xfer_mode);
3553
3554 if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3555 ehc->i.flags & ATA_EHI_DID_HARDRESET)
3556 ata_dev_info(dev, "configured for %s%s\n",
3557 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3558 dev_err_whine);
3559
3560 return 0;
3561
3562 fail:
3563 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3564 return -EIO;
3565}
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3585{
3586 struct ata_port *ap = link->ap;
3587 struct ata_device *dev;
3588 int rc = 0, used_dma = 0, found = 0;
3589
3590
3591 ata_for_each_dev(dev, link, ENABLED) {
3592 unsigned long pio_mask, dma_mask;
3593 unsigned int mode_mask;
3594
3595 mode_mask = ATA_DMA_MASK_ATA;
3596 if (dev->class == ATA_DEV_ATAPI)
3597 mode_mask = ATA_DMA_MASK_ATAPI;
3598 else if (ata_id_is_cfa(dev->id))
3599 mode_mask = ATA_DMA_MASK_CFA;
3600
3601 ata_dev_xfermask(dev);
3602 ata_force_xfermask(dev);
3603
3604 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3605
3606 if (libata_dma_mask & mode_mask)
3607 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3608 dev->udma_mask);
3609 else
3610 dma_mask = 0;
3611
3612 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3613 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3614
3615 found = 1;
3616 if (ata_dma_enabled(dev))
3617 used_dma = 1;
3618 }
3619 if (!found)
3620 goto out;
3621
3622
3623 ata_for_each_dev(dev, link, ENABLED) {
3624 if (dev->pio_mode == 0xff) {
3625 ata_dev_warn(dev, "no PIO support\n");
3626 rc = -EINVAL;
3627 goto out;
3628 }
3629
3630 dev->xfer_mode = dev->pio_mode;
3631 dev->xfer_shift = ATA_SHIFT_PIO;
3632 if (ap->ops->set_piomode)
3633 ap->ops->set_piomode(ap, dev);
3634 }
3635
3636
3637 ata_for_each_dev(dev, link, ENABLED) {
3638 if (!ata_dma_enabled(dev))
3639 continue;
3640
3641 dev->xfer_mode = dev->dma_mode;
3642 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3643 if (ap->ops->set_dmamode)
3644 ap->ops->set_dmamode(ap, dev);
3645 }
3646
3647
3648 ata_for_each_dev(dev, link, ENABLED) {
3649 rc = ata_dev_set_mode(dev);
3650 if (rc)
3651 goto out;
3652 }
3653
3654
3655
3656
3657 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3658 ap->host->simplex_claimed = ap;
3659
3660 out:
3661 if (rc)
3662 *r_failed_dev = dev;
3663 return rc;
3664}
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3687 int (*check_ready)(struct ata_link *link))
3688{
3689 unsigned long start = jiffies;
3690 unsigned long nodev_deadline;
3691 int warned = 0;
3692
3693
3694 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3695 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3696 else
3697 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3698
3699
3700
3701
3702
3703 WARN_ON(link == link->ap->slave_link);
3704
3705 if (time_after(nodev_deadline, deadline))
3706 nodev_deadline = deadline;
3707
3708 while (1) {
3709 unsigned long now = jiffies;
3710 int ready, tmp;
3711
3712 ready = tmp = check_ready(link);
3713 if (ready > 0)
3714 return 0;
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727 if (ready == -ENODEV) {
3728 if (ata_link_online(link))
3729 ready = 0;
3730 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3731 !ata_link_offline(link) &&
3732 time_before(now, nodev_deadline))
3733 ready = 0;
3734 }
3735
3736 if (ready)
3737 return ready;
3738 if (time_after(now, deadline))
3739 return -EBUSY;
3740
3741 if (!warned && time_after(now, start + 5 * HZ) &&
3742 (deadline - now > 3 * HZ)) {
3743 ata_link_warn(link,
3744 "link is slow to respond, please be patient "
3745 "(ready=%d)\n", tmp);
3746 warned = 1;
3747 }
3748
3749 ata_msleep(link->ap, 50);
3750 }
3751}
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3768 int (*check_ready)(struct ata_link *link))
3769{
3770 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3771
3772 return ata_wait_ready(link, deadline, check_ready);
3773}
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3798 unsigned long deadline)
3799{
3800 unsigned long interval = params[0];
3801 unsigned long duration = params[1];
3802 unsigned long last_jiffies, t;
3803 u32 last, cur;
3804 int rc;
3805
3806 t = ata_deadline(jiffies, params[2]);
3807 if (time_before(t, deadline))
3808 deadline = t;
3809
3810 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3811 return rc;
3812 cur &= 0xf;
3813
3814 last = cur;
3815 last_jiffies = jiffies;
3816
3817 while (1) {
3818 ata_msleep(link->ap, interval);
3819 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3820 return rc;
3821 cur &= 0xf;
3822
3823
3824 if (cur == last) {
3825 if (cur == 1 && time_before(jiffies, deadline))
3826 continue;
3827 if (time_after(jiffies,
3828 ata_deadline(last_jiffies, duration)))
3829 return 0;
3830 continue;
3831 }
3832
3833
3834 last = cur;
3835 last_jiffies = jiffies;
3836
3837
3838
3839
3840 if (time_after(jiffies, deadline))
3841 return -EPIPE;
3842 }
3843}
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859int sata_link_resume(struct ata_link *link, const unsigned long *params,
3860 unsigned long deadline)
3861{
3862 int tries = ATA_LINK_RESUME_TRIES;
3863 u32 scontrol, serror;
3864 int rc;
3865
3866 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3867 return rc;
3868
3869
3870
3871
3872
3873
3874 do {
3875 scontrol = (scontrol & 0x0f0) | 0x300;
3876 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3877 return rc;
3878
3879
3880
3881
3882
3883 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3884 ata_msleep(link->ap, 200);
3885
3886
3887 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3888 return rc;
3889 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3890
3891 if ((scontrol & 0xf0f) != 0x300) {
3892 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3893 scontrol);
3894 return 0;
3895 }
3896
3897 if (tries < ATA_LINK_RESUME_TRIES)
3898 ata_link_warn(link, "link resume succeeded after %d retries\n",
3899 ATA_LINK_RESUME_TRIES - tries);
3900
3901 if ((rc = sata_link_debounce(link, params, deadline)))
3902 return rc;
3903
3904
3905 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3906 rc = sata_scr_write(link, SCR_ERROR, serror);
3907
3908 return rc != -EINVAL ? rc : 0;
3909}
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3930 bool spm_wakeup)
3931{
3932 struct ata_eh_context *ehc = &link->eh_context;
3933 bool woken_up = false;
3934 u32 scontrol;
3935 int rc;
3936
3937 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3938 if (rc)
3939 return rc;
3940
3941 switch (policy) {
3942 case ATA_LPM_MAX_POWER:
3943
3944 scontrol |= (0x7 << 8);
3945
3946 if (spm_wakeup) {
3947 scontrol |= (0x4 << 12);
3948 woken_up = true;
3949 }
3950 break;
3951 case ATA_LPM_MED_POWER:
3952
3953 scontrol &= ~(0x1 << 8);
3954 scontrol |= (0x6 << 8);
3955 break;
3956 case ATA_LPM_MED_POWER_WITH_DIPM:
3957 case ATA_LPM_MIN_POWER_WITH_PARTIAL:
3958 case ATA_LPM_MIN_POWER:
3959 if (ata_link_nr_enabled(link) > 0)
3960
3961 scontrol &= ~(0x7 << 8);
3962 else {
3963
3964 scontrol &= ~0xf;
3965 scontrol |= (0x1 << 2);
3966 }
3967 break;
3968 default:
3969 WARN_ON(1);
3970 }
3971
3972 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3973 if (rc)
3974 return rc;
3975
3976
3977 if (woken_up)
3978 msleep(10);
3979
3980
3981 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3982 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3983}
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002int ata_std_prereset(struct ata_link *link, unsigned long deadline)
4003{
4004 struct ata_port *ap = link->ap;
4005 struct ata_eh_context *ehc = &link->eh_context;
4006 const unsigned long *timing = sata_ehc_deb_timing(ehc);
4007 int rc;
4008
4009
4010 if (ehc->i.action & ATA_EH_HARDRESET)
4011 return 0;
4012
4013
4014 if (ap->flags & ATA_FLAG_SATA) {
4015 rc = sata_link_resume(link, timing, deadline);
4016
4017 if (rc && rc != -EOPNOTSUPP)
4018 ata_link_warn(link,
4019 "failed to resume link for reset (errno=%d)\n",
4020 rc);
4021 }
4022
4023
4024 if (ata_phys_link_offline(link))
4025 ehc->i.action &= ~ATA_EH_SOFTRESET;
4026
4027 return 0;
4028}
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
4055 unsigned long deadline,
4056 bool *online, int (*check_ready)(struct ata_link *))
4057{
4058 u32 scontrol;
4059 int rc;
4060
4061 DPRINTK("ENTER\n");
4062
4063 if (online)
4064 *online = false;
4065
4066 if (sata_set_spd_needed(link)) {
4067
4068
4069
4070
4071
4072 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4073 goto out;
4074
4075 scontrol = (scontrol & 0x0f0) | 0x304;
4076
4077 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
4078 goto out;
4079
4080 sata_set_spd(link);
4081 }
4082
4083
4084 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4085 goto out;
4086
4087 scontrol = (scontrol & 0x0f0) | 0x301;
4088
4089 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
4090 goto out;
4091
4092
4093
4094
4095 ata_msleep(link->ap, 1);
4096
4097
4098 rc = sata_link_resume(link, timing, deadline);
4099 if (rc)
4100 goto out;
4101
4102 if (ata_phys_link_offline(link))
4103 goto out;
4104
4105
4106 if (online)
4107 *online = true;
4108
4109 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
4110
4111
4112
4113
4114
4115 if (check_ready) {
4116 unsigned long pmp_deadline;
4117
4118 pmp_deadline = ata_deadline(jiffies,
4119 ATA_TMOUT_PMP_SRST_WAIT);
4120 if (time_after(pmp_deadline, deadline))
4121 pmp_deadline = deadline;
4122 ata_wait_ready(link, pmp_deadline, check_ready);
4123 }
4124 rc = -EAGAIN;
4125 goto out;
4126 }
4127
4128 rc = 0;
4129 if (check_ready)
4130 rc = ata_wait_ready(link, deadline, check_ready);
4131 out:
4132 if (rc && rc != -EAGAIN) {
4133
4134 if (online)
4135 *online = false;
4136 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
4137 }
4138 DPRINTK("EXIT, rc=%d\n", rc);
4139 return rc;
4140}
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4157 unsigned long deadline)
4158{
4159 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4160 bool online;
4161 int rc;
4162
4163
4164 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
4165 return online ? -EAGAIN : rc;
4166}
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4181{
4182 u32 serror;
4183
4184 DPRINTK("ENTER\n");
4185
4186
4187 if (!sata_scr_read(link, SCR_ERROR, &serror))
4188 sata_scr_write(link, SCR_ERROR, serror);
4189
4190
4191 sata_print_link_status(link);
4192
4193 DPRINTK("EXIT\n");
4194}
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4213 const u16 *new_id)
4214{
4215 const u16 *old_id = dev->id;
4216 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4217 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4218
4219 if (dev->class != new_class) {
4220 ata_dev_info(dev, "class mismatch %d != %d\n",
4221 dev->class, new_class);
4222 return 0;
4223 }
4224
4225 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4226 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4227 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4228 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4229
4230 if (strcmp(model[0], model[1])) {
4231 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
4232 model[0], model[1]);
4233 return 0;
4234 }
4235
4236 if (strcmp(serial[0], serial[1])) {
4237 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
4238 serial[0], serial[1]);
4239 return 0;
4240 }
4241
4242 return 1;
4243}
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4260{
4261 unsigned int class = dev->class;
4262 u16 *id = (void *)dev->link->ap->sector_buf;
4263 int rc;
4264
4265
4266 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4267 if (rc)
4268 return rc;
4269
4270
4271 if (!ata_dev_same_device(dev, class, id))
4272 return -ENODEV;
4273
4274 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4275 return 0;
4276}
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4294 unsigned int readid_flags)
4295{
4296 u64 n_sectors = dev->n_sectors;
4297 u64 n_native_sectors = dev->n_native_sectors;
4298 int rc;
4299
4300 if (!ata_dev_enabled(dev))
4301 return -ENODEV;
4302
4303
4304 if (ata_class_enabled(new_class) &&
4305 new_class != ATA_DEV_ATA &&
4306 new_class != ATA_DEV_ATAPI &&
4307 new_class != ATA_DEV_ZAC &&
4308 new_class != ATA_DEV_SEMB) {
4309 ata_dev_info(dev, "class mismatch %u != %u\n",
4310 dev->class, new_class);
4311 rc = -ENODEV;
4312 goto fail;
4313 }
4314
4315
4316 rc = ata_dev_reread_id(dev, readid_flags);
4317 if (rc)
4318 goto fail;
4319
4320
4321 rc = ata_dev_configure(dev);
4322 if (rc)
4323 goto fail;
4324
4325
4326 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4327 dev->n_sectors == n_sectors)
4328 return 0;
4329
4330
4331 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4332 (unsigned long long)n_sectors,
4333 (unsigned long long)dev->n_sectors);
4334
4335
4336
4337
4338
4339
4340 if (dev->n_native_sectors == n_native_sectors &&
4341 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4342 ata_dev_warn(dev,
4343 "new n_sectors matches native, probably "
4344 "late HPA unlock, n_sectors updated\n");
4345
4346 return 0;
4347 }
4348
4349
4350
4351
4352
4353
4354
4355 if (dev->n_native_sectors == n_native_sectors &&
4356 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4357 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4358 ata_dev_warn(dev,
4359 "old n_sectors matches native, probably "
4360 "late HPA lock, will try to unlock HPA\n");
4361
4362 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4363 rc = -EIO;
4364 } else
4365 rc = -ENODEV;
4366
4367
4368 dev->n_native_sectors = n_native_sectors;
4369 dev->n_sectors = n_sectors;
4370 fail:
4371 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4372 return rc;
4373}
4374
4375struct ata_blacklist_entry {
4376 const char *model_num;
4377 const char *model_rev;
4378 unsigned long horkage;
4379};
4380
4381static const struct ata_blacklist_entry ata_device_blacklist [] = {
4382
4383 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4384 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4385 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4386 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4387 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4388 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4389 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4390 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4391 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4392 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4393 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4394 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4395 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4396 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4397 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4398 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4399 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4400 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4401 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4402 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4403 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4404 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4405 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4406 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4407 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4408 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4409 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4410 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4411 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4412 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
4413
4414 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4415
4416
4417 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4418 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4419 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4420 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4421
4422
4423
4424
4425
4426 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4427
4428
4429
4430
4431
4432 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4433 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4434
4435
4436
4437
4438
4439 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4440 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4441
4442 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4443
4444 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4445 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4446 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4447 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4448 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4449
4450
4451 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4452 ATA_HORKAGE_FIRMWARE_WARN },
4453
4454 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4455 ATA_HORKAGE_FIRMWARE_WARN },
4456
4457 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4458 ATA_HORKAGE_FIRMWARE_WARN },
4459
4460 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4461 ATA_HORKAGE_FIRMWARE_WARN },
4462
4463
4464
4465 { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
4466 ATA_HORKAGE_NOLPM, },
4467 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
4468
4469
4470
4471 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4472 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4473 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4474
4475
4476 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4477
4478
4479
4480 { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
4481
4482
4483 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4484 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4485 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4486 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4487
4488
4489 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4490
4491
4492 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4493 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4494 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4495
4496
4497 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4498
4499 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4500
4501
4502 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4503 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4504
4505
4506 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4507 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4508
4509
4510
4511
4512
4513 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4514 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4515 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4516 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4517 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4518
4519
4520 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
4521
4522
4523 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4524 ATA_HORKAGE_ZERO_AFTER_TRIM |
4525 ATA_HORKAGE_NOLPM, },
4526
4527 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
4528 ATA_HORKAGE_NOLPM, },
4529
4530
4531 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4532 ATA_HORKAGE_ZERO_AFTER_TRIM |
4533 ATA_HORKAGE_NOLPM, },
4534 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4535 ATA_HORKAGE_ZERO_AFTER_TRIM |
4536 ATA_HORKAGE_NOLPM, },
4537
4538
4539 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
4540 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
4541 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
4542 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
4543
4544
4545 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4546 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4547 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4548 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4549 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4550 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4551 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4552 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4553 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4554 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4555 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4556 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4557 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4558 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4559 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4560 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4561 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4562 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4563
4564
4565 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583 { "INTEL*SSDSC2MH*", NULL, 0, },
4584
4585 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4586 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4587 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4588 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4589 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4590 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4591 { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4592 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4604 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4605 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4606 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4607 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4608 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4609 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4610
4611
4612 { }
4613};
4614
4615static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4616{
4617 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4618 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4619 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4620
4621 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4622 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4623
4624 while (ad->model_num) {
4625 if (glob_match(ad->model_num, model_num)) {
4626 if (ad->model_rev == NULL)
4627 return ad->horkage;
4628 if (glob_match(ad->model_rev, model_rev))
4629 return ad->horkage;
4630 }
4631 ad++;
4632 }
4633 return 0;
4634}
4635
4636static int ata_dma_blacklisted(const struct ata_device *dev)
4637{
4638
4639
4640
4641
4642 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4643 (dev->flags & ATA_DFLAG_CDB_INTR))
4644 return 1;
4645 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4646}
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656static int ata_is_40wire(struct ata_device *dev)
4657{
4658 if (dev->horkage & ATA_HORKAGE_IVB)
4659 return ata_drive_40wire_relaxed(dev->id);
4660 return ata_drive_40wire(dev->id);
4661}
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676static int cable_is_40wire(struct ata_port *ap)
4677{
4678 struct ata_link *link;
4679 struct ata_device *dev;
4680
4681
4682 if (ap->cbl == ATA_CBL_PATA40)
4683 return 1;
4684
4685
4686 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4687 return 0;
4688
4689
4690
4691
4692
4693 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4694 return 0;
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705 ata_for_each_link(link, ap, EDGE) {
4706 ata_for_each_dev(dev, link, ENABLED) {
4707 if (!ata_is_40wire(dev))
4708 return 0;
4709 }
4710 }
4711 return 1;
4712}
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726static void ata_dev_xfermask(struct ata_device *dev)
4727{
4728 struct ata_link *link = dev->link;
4729 struct ata_port *ap = link->ap;
4730 struct ata_host *host = ap->host;
4731 unsigned long xfer_mask;
4732
4733
4734 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4735 ap->mwdma_mask, ap->udma_mask);
4736
4737
4738 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4739 dev->mwdma_mask, dev->udma_mask);
4740 xfer_mask &= ata_id_xfermask(dev->id);
4741
4742
4743
4744
4745
4746 if (ata_dev_pair(dev)) {
4747
4748 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4749
4750 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4751 }
4752
4753 if (ata_dma_blacklisted(dev)) {
4754 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4755 ata_dev_warn(dev,
4756 "device is on DMA blacklist, disabling DMA\n");
4757 }
4758
4759 if ((host->flags & ATA_HOST_SIMPLEX) &&
4760 host->simplex_claimed && host->simplex_claimed != ap) {
4761 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4762 ata_dev_warn(dev,
4763 "simplex DMA is claimed by other device, disabling DMA\n");
4764 }
4765
4766 if (ap->flags & ATA_FLAG_NO_IORDY)
4767 xfer_mask &= ata_pio_mask_no_iordy(dev);
4768
4769 if (ap->ops->mode_filter)
4770 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4781
4782 if (cable_is_40wire(ap)) {
4783 ata_dev_warn(dev,
4784 "limited to UDMA/33 due to 40-wire cable\n");
4785 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4786 }
4787
4788 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4789 &dev->mwdma_mask, &dev->udma_mask);
4790}
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4807{
4808 struct ata_taskfile tf;
4809 unsigned int err_mask;
4810
4811
4812 DPRINTK("set features - xfer mode\n");
4813
4814
4815
4816
4817 ata_tf_init(dev, &tf);
4818 tf.command = ATA_CMD_SET_FEATURES;
4819 tf.feature = SETFEATURES_XFER;
4820 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4821 tf.protocol = ATA_PROT_NODATA;
4822
4823 if (ata_pio_need_iordy(dev))
4824 tf.nsect = dev->xfer_mode;
4825
4826 else if (ata_id_has_iordy(dev->id))
4827 tf.nsect = 0x01;
4828 else
4829 return 0;
4830
4831
4832 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4833
4834 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4835 return err_mask;
4836}
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4854{
4855 struct ata_taskfile tf;
4856 unsigned int err_mask;
4857 unsigned long timeout = 0;
4858
4859
4860 DPRINTK("set features - SATA features\n");
4861
4862 ata_tf_init(dev, &tf);
4863 tf.command = ATA_CMD_SET_FEATURES;
4864 tf.feature = enable;
4865 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4866 tf.protocol = ATA_PROT_NODATA;
4867 tf.nsect = feature;
4868
4869 if (enable == SETFEATURES_SPINUP)
4870 timeout = ata_probe_timeout ?
4871 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4872 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4873
4874 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4875 return err_mask;
4876}
4877EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891static unsigned int ata_dev_init_params(struct ata_device *dev,
4892 u16 heads, u16 sectors)
4893{
4894 struct ata_taskfile tf;
4895 unsigned int err_mask;
4896
4897
4898 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4899 return AC_ERR_INVALID;
4900
4901
4902 DPRINTK("init dev params \n");
4903
4904 ata_tf_init(dev, &tf);
4905 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4906 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4907 tf.protocol = ATA_PROT_NODATA;
4908 tf.nsect = sectors;
4909 tf.device |= (heads - 1) & 0x0f;
4910
4911 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4912
4913
4914
4915 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4916 err_mask = 0;
4917
4918 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4919 return err_mask;
4920}
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936int atapi_check_dma(struct ata_queued_cmd *qc)
4937{
4938 struct ata_port *ap = qc->ap;
4939
4940
4941
4942
4943 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4944 unlikely(qc->nbytes & 15))
4945 return 1;
4946
4947 if (ap->ops->check_atapi_dma)
4948 return ap->ops->check_atapi_dma(qc);
4949
4950 return 0;
4951}
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968int ata_std_qc_defer(struct ata_queued_cmd *qc)
4969{
4970 struct ata_link *link = qc->dev->link;
4971
4972 if (ata_is_ncq(qc->tf.protocol)) {
4973 if (!ata_tag_valid(link->active_tag))
4974 return 0;
4975 } else {
4976 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4977 return 0;
4978 }
4979
4980 return ATA_DEFER_LINK;
4981}
4982
4983enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
4984{
4985 return AC_ERR_OK;
4986}
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996
4997
4998
4999
5000
5001void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
5002 unsigned int n_elem)
5003{
5004 qc->sg = sg;
5005 qc->n_elem = n_elem;
5006 qc->cursg = qc->sg;
5007}
5008
5009#ifdef CONFIG_HAS_DMA
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019
5020static void ata_sg_clean(struct ata_queued_cmd *qc)
5021{
5022 struct ata_port *ap = qc->ap;
5023 struct scatterlist *sg = qc->sg;
5024 int dir = qc->dma_dir;
5025
5026 WARN_ON_ONCE(sg == NULL);
5027
5028 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
5029
5030 if (qc->n_elem)
5031 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
5032
5033 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5034 qc->sg = NULL;
5035}
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050static int ata_sg_setup(struct ata_queued_cmd *qc)
5051{
5052 struct ata_port *ap = qc->ap;
5053 unsigned int n_elem;
5054
5055 VPRINTK("ENTER, ata%u\n", ap->print_id);
5056
5057 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
5058 if (n_elem < 1)
5059 return -1;
5060
5061 VPRINTK("%d sg elements mapped\n", n_elem);
5062 qc->orig_n_elem = qc->n_elem;
5063 qc->n_elem = n_elem;
5064 qc->flags |= ATA_QCFLAG_DMAMAP;
5065
5066 return 0;
5067}
5068
5069#else
5070
5071static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
5072static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
5073
5074#endif
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088void swap_buf_le16(u16 *buf, unsigned int buf_words)
5089{
5090#ifdef __BIG_ENDIAN
5091 unsigned int i;
5092
5093 for (i = 0; i < buf_words; i++)
5094 buf[i] = le16_to_cpu(buf[i]);
5095#endif
5096}
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
5108{
5109 struct ata_port *ap = dev->link->ap;
5110 struct ata_queued_cmd *qc;
5111
5112
5113 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5114 return NULL;
5115
5116
5117 if (ap->flags & ATA_FLAG_SAS_HOST) {
5118 tag = ata_sas_allocate_tag(ap);
5119 if (tag < 0)
5120 return NULL;
5121 }
5122
5123 qc = __ata_qc_from_tag(ap, tag);
5124 qc->tag = qc->hw_tag = tag;
5125 qc->scsicmd = NULL;
5126 qc->ap = ap;
5127 qc->dev = dev;
5128
5129 ata_qc_reinit(qc);
5130
5131 return qc;
5132}
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144void ata_qc_free(struct ata_queued_cmd *qc)
5145{
5146 struct ata_port *ap;
5147 unsigned int tag;
5148
5149 WARN_ON_ONCE(qc == NULL);
5150 ap = qc->ap;
5151
5152 qc->flags = 0;
5153 tag = qc->tag;
5154 if (ata_tag_valid(tag)) {
5155 qc->tag = ATA_TAG_POISON;
5156 if (ap->flags & ATA_FLAG_SAS_HOST)
5157 ata_sas_free_tag(tag, ap);
5158 }
5159}
5160
5161void __ata_qc_complete(struct ata_queued_cmd *qc)
5162{
5163 struct ata_port *ap;
5164 struct ata_link *link;
5165
5166 WARN_ON_ONCE(qc == NULL);
5167 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
5168 ap = qc->ap;
5169 link = qc->dev->link;
5170
5171 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5172 ata_sg_clean(qc);
5173
5174
5175 if (ata_is_ncq(qc->tf.protocol)) {
5176 link->sactive &= ~(1 << qc->hw_tag);
5177 if (!link->sactive)
5178 ap->nr_active_links--;
5179 } else {
5180 link->active_tag = ATA_TAG_POISON;
5181 ap->nr_active_links--;
5182 }
5183
5184
5185 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5186 ap->excl_link == link))
5187 ap->excl_link = NULL;
5188
5189
5190
5191
5192
5193 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5194 ap->qc_active &= ~(1ULL << qc->tag);
5195
5196
5197 qc->complete_fn(qc);
5198}
5199
5200static void fill_result_tf(struct ata_queued_cmd *qc)
5201{
5202 struct ata_port *ap = qc->ap;
5203
5204 qc->result_tf.flags = qc->tf.flags;
5205 ap->ops->qc_fill_rtf(qc);
5206}
5207
5208static void ata_verify_xfer(struct ata_queued_cmd *qc)
5209{
5210 struct ata_device *dev = qc->dev;
5211
5212 if (!ata_is_data(qc->tf.protocol))
5213 return;
5214
5215 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5216 return;
5217
5218 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5219}
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236void ata_qc_complete(struct ata_queued_cmd *qc)
5237{
5238 struct ata_port *ap = qc->ap;
5239
5240
5241 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256 if (ap->ops->error_handler) {
5257 struct ata_device *dev = qc->dev;
5258 struct ata_eh_info *ehi = &dev->link->eh_info;
5259
5260 if (unlikely(qc->err_mask))
5261 qc->flags |= ATA_QCFLAG_FAILED;
5262
5263
5264
5265
5266
5267 if (unlikely(ata_tag_internal(qc->tag))) {
5268 fill_result_tf(qc);
5269 trace_ata_qc_complete_internal(qc);
5270 __ata_qc_complete(qc);
5271 return;
5272 }
5273
5274
5275
5276
5277
5278 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5279 fill_result_tf(qc);
5280 trace_ata_qc_complete_failed(qc);
5281 ata_qc_schedule_eh(qc);
5282 return;
5283 }
5284
5285 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5286
5287
5288 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5289 fill_result_tf(qc);
5290
5291 trace_ata_qc_complete_done(qc);
5292
5293
5294
5295 switch (qc->tf.command) {
5296 case ATA_CMD_SET_FEATURES:
5297 if (qc->tf.feature != SETFEATURES_WC_ON &&
5298 qc->tf.feature != SETFEATURES_WC_OFF &&
5299 qc->tf.feature != SETFEATURES_RA_ON &&
5300 qc->tf.feature != SETFEATURES_RA_OFF)
5301 break;
5302
5303 case ATA_CMD_INIT_DEV_PARAMS:
5304 case ATA_CMD_SET_MULTI:
5305
5306 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5307 ata_port_schedule_eh(ap);
5308 break;
5309
5310 case ATA_CMD_SLEEP:
5311 dev->flags |= ATA_DFLAG_SLEEPING;
5312 break;
5313 }
5314
5315 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5316 ata_verify_xfer(qc);
5317
5318 __ata_qc_complete(qc);
5319 } else {
5320 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5321 return;
5322
5323
5324 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5325 fill_result_tf(qc);
5326
5327 __ata_qc_complete(qc);
5328 }
5329}
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341u64 ata_qc_get_active(struct ata_port *ap)
5342{
5343 u64 qc_active = ap->qc_active;
5344
5345
5346 if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
5347 qc_active |= (1 << 0);
5348 qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
5349 }
5350
5351 return qc_active;
5352}
5353EXPORT_SYMBOL_GPL(ata_qc_get_active);
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
5376{
5377 u64 done_mask, ap_qc_active = ap->qc_active;
5378 int nr_done = 0;
5379
5380
5381
5382
5383
5384
5385 if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
5386 qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
5387 qc_active ^= qc_active & 0x01;
5388 }
5389
5390 done_mask = ap_qc_active ^ qc_active;
5391
5392 if (unlikely(done_mask & qc_active)) {
5393 ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
5394 ap->qc_active, qc_active);
5395 return -EINVAL;
5396 }
5397
5398 while (done_mask) {
5399 struct ata_queued_cmd *qc;
5400 unsigned int tag = __ffs64(done_mask);
5401
5402 qc = ata_qc_from_tag(ap, tag);
5403 if (qc) {
5404 ata_qc_complete(qc);
5405 nr_done++;
5406 }
5407 done_mask &= ~(1ULL << tag);
5408 }
5409
5410 return nr_done;
5411}
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425void ata_qc_issue(struct ata_queued_cmd *qc)
5426{
5427 struct ata_port *ap = qc->ap;
5428 struct ata_link *link = qc->dev->link;
5429 u8 prot = qc->tf.protocol;
5430
5431
5432
5433
5434
5435 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5436
5437 if (ata_is_ncq(prot)) {
5438 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
5439
5440 if (!link->sactive)
5441 ap->nr_active_links++;
5442 link->sactive |= 1 << qc->hw_tag;
5443 } else {
5444 WARN_ON_ONCE(link->sactive);
5445
5446 ap->nr_active_links++;
5447 link->active_tag = qc->tag;
5448 }
5449
5450 qc->flags |= ATA_QCFLAG_ACTIVE;
5451 ap->qc_active |= 1ULL << qc->tag;
5452
5453
5454
5455
5456
5457 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
5458 goto sys_err;
5459
5460 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5461 (ap->flags & ATA_FLAG_PIO_DMA)))
5462 if (ata_sg_setup(qc))
5463 goto sys_err;
5464
5465
5466 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5467 link->eh_info.action |= ATA_EH_RESET;
5468 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5469 ata_link_abort(link);
5470 return;
5471 }
5472
5473 qc->err_mask |= ap->ops->qc_prep(qc);
5474 if (unlikely(qc->err_mask))
5475 goto err;
5476 trace_ata_qc_issue(qc);
5477 qc->err_mask |= ap->ops->qc_issue(qc);
5478 if (unlikely(qc->err_mask))
5479 goto err;
5480 return;
5481
5482sys_err:
5483 qc->err_mask |= AC_ERR_SYSTEM;
5484err:
5485 ata_qc_complete(qc);
5486}
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500int sata_scr_valid(struct ata_link *link)
5501{
5502 struct ata_port *ap = link->ap;
5503
5504 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5505}
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5524{
5525 if (ata_is_host_link(link)) {
5526 if (sata_scr_valid(link))
5527 return link->ap->ops->scr_read(link, reg, val);
5528 return -EOPNOTSUPP;
5529 }
5530
5531 return sata_pmp_scr_read(link, reg, val);
5532}
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550int sata_scr_write(struct ata_link *link, int reg, u32 val)
5551{
5552 if (ata_is_host_link(link)) {
5553 if (sata_scr_valid(link))
5554 return link->ap->ops->scr_write(link, reg, val);
5555 return -EOPNOTSUPP;
5556 }
5557
5558 return sata_pmp_scr_write(link, reg, val);
5559}
5560
5561
5562
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575
5576int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5577{
5578 if (ata_is_host_link(link)) {
5579 int rc;
5580
5581 if (sata_scr_valid(link)) {
5582 rc = link->ap->ops->scr_write(link, reg, val);
5583 if (rc == 0)
5584 rc = link->ap->ops->scr_read(link, reg, &val);
5585 return rc;
5586 }
5587 return -EOPNOTSUPP;
5588 }
5589
5590 return sata_pmp_scr_write(link, reg, val);
5591}
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607bool ata_phys_link_online(struct ata_link *link)
5608{
5609 u32 sstatus;
5610
5611 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5612 ata_sstatus_online(sstatus))
5613 return true;
5614 return false;
5615}
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629
5630
5631bool ata_phys_link_offline(struct ata_link *link)
5632{
5633 u32 sstatus;
5634
5635 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5636 !ata_sstatus_online(sstatus))
5637 return true;
5638 return false;
5639}
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650
5651
5652
5653
5654
5655
5656
5657bool ata_link_online(struct ata_link *link)
5658{
5659 struct ata_link *slave = link->ap->slave_link;
5660
5661 WARN_ON(link == slave);
5662
5663 return ata_phys_link_online(link) ||
5664 (slave && ata_phys_link_online(slave));
5665}
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683bool ata_link_offline(struct ata_link *link)
5684{
5685 struct ata_link *slave = link->ap->slave_link;
5686
5687 WARN_ON(link == slave);
5688
5689 return ata_phys_link_offline(link) &&
5690 (!slave || ata_phys_link_offline(slave));
5691}
5692
5693#ifdef CONFIG_PM
5694static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5695 unsigned int action, unsigned int ehi_flags,
5696 bool async)
5697{
5698 struct ata_link *link;
5699 unsigned long flags;
5700
5701
5702
5703
5704 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5705 ata_port_wait_eh(ap);
5706 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5707 }
5708
5709
5710 spin_lock_irqsave(ap->lock, flags);
5711
5712 ap->pm_mesg = mesg;
5713 ap->pflags |= ATA_PFLAG_PM_PENDING;
5714 ata_for_each_link(link, ap, HOST_FIRST) {
5715 link->eh_info.action |= action;
5716 link->eh_info.flags |= ehi_flags;
5717 }
5718
5719 ata_port_schedule_eh(ap);
5720
5721 spin_unlock_irqrestore(ap->lock, flags);
5722
5723 if (!async) {
5724 ata_port_wait_eh(ap);
5725 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5726 }
5727}
5728
5729
5730
5731
5732
5733
5734
5735
5736static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5737 | ATA_EHI_NO_AUTOPSY
5738 | ATA_EHI_NO_RECOVERY;
5739
5740static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5741{
5742 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5743}
5744
5745static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5746{
5747 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5748}
5749
5750static int ata_port_pm_suspend(struct device *dev)
5751{
5752 struct ata_port *ap = to_ata_port(dev);
5753
5754 if (pm_runtime_suspended(dev))
5755 return 0;
5756
5757 ata_port_suspend(ap, PMSG_SUSPEND);
5758 return 0;
5759}
5760
5761static int ata_port_pm_freeze(struct device *dev)
5762{
5763 struct ata_port *ap = to_ata_port(dev);
5764
5765 if (pm_runtime_suspended(dev))
5766 return 0;
5767
5768 ata_port_suspend(ap, PMSG_FREEZE);
5769 return 0;
5770}
5771
5772static int ata_port_pm_poweroff(struct device *dev)
5773{
5774 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5775 return 0;
5776}
5777
5778static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5779 | ATA_EHI_QUIET;
5780
5781static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5782{
5783 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5784}
5785
5786static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5787{
5788 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5789}
5790
5791static int ata_port_pm_resume(struct device *dev)
5792{
5793 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5794 pm_runtime_disable(dev);
5795 pm_runtime_set_active(dev);
5796 pm_runtime_enable(dev);
5797 return 0;
5798}
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808static int ata_port_runtime_idle(struct device *dev)
5809{
5810 struct ata_port *ap = to_ata_port(dev);
5811 struct ata_link *link;
5812 struct ata_device *adev;
5813
5814 ata_for_each_link(link, ap, HOST_FIRST) {
5815 ata_for_each_dev(adev, link, ENABLED)
5816 if (adev->class == ATA_DEV_ATAPI &&
5817 !zpodd_dev_enabled(adev))
5818 return -EBUSY;
5819 }
5820
5821 return 0;
5822}
5823
5824static int ata_port_runtime_suspend(struct device *dev)
5825{
5826 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5827 return 0;
5828}
5829
5830static int ata_port_runtime_resume(struct device *dev)
5831{
5832 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5833 return 0;
5834}
5835
5836static const struct dev_pm_ops ata_port_pm_ops = {
5837 .suspend = ata_port_pm_suspend,
5838 .resume = ata_port_pm_resume,
5839 .freeze = ata_port_pm_freeze,
5840 .thaw = ata_port_pm_resume,
5841 .poweroff = ata_port_pm_poweroff,
5842 .restore = ata_port_pm_resume,
5843
5844 .runtime_suspend = ata_port_runtime_suspend,
5845 .runtime_resume = ata_port_runtime_resume,
5846 .runtime_idle = ata_port_runtime_idle,
5847};
5848
5849
5850
5851
5852
5853
5854void ata_sas_port_suspend(struct ata_port *ap)
5855{
5856 ata_port_suspend_async(ap, PMSG_SUSPEND);
5857}
5858EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5859
5860void ata_sas_port_resume(struct ata_port *ap)
5861{
5862 ata_port_resume_async(ap, PMSG_RESUME);
5863}
5864EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5865
5866
5867
5868
5869
5870
5871
5872
5873int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5874{
5875 host->dev->power.power_state = mesg;
5876 return 0;
5877}
5878
5879
5880
5881
5882
5883
5884
5885void ata_host_resume(struct ata_host *host)
5886{
5887 host->dev->power.power_state = PMSG_ON;
5888}
5889#endif
5890
5891const struct device_type ata_port_type = {
5892 .name = "ata_port",
5893#ifdef CONFIG_PM
5894 .pm = &ata_port_pm_ops,
5895#endif
5896};
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906
5907void ata_dev_init(struct ata_device *dev)
5908{
5909 struct ata_link *link = ata_dev_phys_link(dev);
5910 struct ata_port *ap = link->ap;
5911 unsigned long flags;
5912
5913
5914 link->sata_spd_limit = link->hw_sata_spd_limit;
5915 link->sata_spd = 0;
5916
5917
5918
5919
5920
5921 spin_lock_irqsave(ap->lock, flags);
5922 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5923 dev->horkage = 0;
5924 spin_unlock_irqrestore(ap->lock, flags);
5925
5926 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5927 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5928 dev->pio_mask = UINT_MAX;
5929 dev->mwdma_mask = UINT_MAX;
5930 dev->udma_mask = UINT_MAX;
5931}
5932
5933
5934
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5945{
5946 int i;
5947
5948
5949 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5950 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5951
5952 link->ap = ap;
5953 link->pmp = pmp;
5954 link->active_tag = ATA_TAG_POISON;
5955 link->hw_sata_spd_limit = UINT_MAX;
5956
5957
5958 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5959 struct ata_device *dev = &link->device[i];
5960
5961 dev->link = link;
5962 dev->devno = dev - link->device;
5963#ifdef CONFIG_ATA_ACPI
5964 dev->gtf_filter = ata_acpi_gtf_filter;
5965#endif
5966 ata_dev_init(dev);
5967 }
5968}
5969
5970
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980
5981
5982
5983int sata_link_init_spd(struct ata_link *link)
5984{
5985 u8 spd;
5986 int rc;
5987
5988 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5989 if (rc)
5990 return rc;
5991
5992 spd = (link->saved_scontrol >> 4) & 0xf;
5993 if (spd)
5994 link->hw_sata_spd_limit &= (1 << spd) - 1;
5995
5996 ata_force_link_limits(link);
5997
5998 link->sata_spd_limit = link->hw_sata_spd_limit;
5999
6000 return 0;
6001}
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013
6014
6015struct ata_port *ata_port_alloc(struct ata_host *host)
6016{
6017 struct ata_port *ap;
6018
6019 DPRINTK("ENTER\n");
6020
6021 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6022 if (!ap)
6023 return NULL;
6024
6025 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
6026 ap->lock = &host->lock;
6027 ap->print_id = -1;
6028 ap->local_port_no = -1;
6029 ap->host = host;
6030 ap->dev = host->dev;
6031
6032#if defined(ATA_VERBOSE_DEBUG)
6033
6034 ap->msg_enable = 0x00FF;
6035#elif defined(ATA_DEBUG)
6036 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6037#else
6038 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6039#endif
6040
6041 mutex_init(&ap->scsi_scan_mutex);
6042 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6043 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6044 INIT_LIST_HEAD(&ap->eh_done_q);
6045 init_waitqueue_head(&ap->eh_wait_q);
6046 init_completion(&ap->park_req_pending);
6047 timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
6048 TIMER_DEFERRABLE);
6049
6050 ap->cbl = ATA_CBL_NONE;
6051
6052 ata_link_init(ap, &ap->link, 0);
6053
6054#ifdef ATA_IRQ_TRAP
6055 ap->stats.unhandled_irq = 1;
6056 ap->stats.idle_irq = 1;
6057#endif
6058 ata_sff_port_init(ap);
6059
6060 return ap;
6061}
6062
6063static void ata_devres_release(struct device *gendev, void *res)
6064{
6065 struct ata_host *host = dev_get_drvdata(gendev);
6066 int i;
6067
6068 for (i = 0; i < host->n_ports; i++) {
6069 struct ata_port *ap = host->ports[i];
6070
6071 if (!ap)
6072 continue;
6073
6074 if (ap->scsi_host)
6075 scsi_host_put(ap->scsi_host);
6076
6077 }
6078
6079 dev_set_drvdata(gendev, NULL);
6080 ata_host_put(host);
6081}
6082
6083static void ata_host_release(struct kref *kref)
6084{
6085 struct ata_host *host = container_of(kref, struct ata_host, kref);
6086 int i;
6087
6088 for (i = 0; i < host->n_ports; i++) {
6089 struct ata_port *ap = host->ports[i];
6090
6091 kfree(ap->pmp_link);
6092 kfree(ap->slave_link);
6093 kfree(ap);
6094 host->ports[i] = NULL;
6095 }
6096 kfree(host);
6097}
6098
6099void ata_host_get(struct ata_host *host)
6100{
6101 kref_get(&host->kref);
6102}
6103
6104void ata_host_put(struct ata_host *host)
6105{
6106 kref_put(&host->kref, ata_host_release);
6107}
6108
6109
6110
6111
6112
6113
6114
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128
6129struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6130{
6131 struct ata_host *host;
6132 size_t sz;
6133 int i;
6134 void *dr;
6135
6136 DPRINTK("ENTER\n");
6137
6138
6139 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6140 host = kzalloc(sz, GFP_KERNEL);
6141 if (!host)
6142 return NULL;
6143
6144 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6145 goto err_free;
6146
6147 dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
6148 if (!dr)
6149 goto err_out;
6150
6151 devres_add(dev, dr);
6152 dev_set_drvdata(dev, host);
6153
6154 spin_lock_init(&host->lock);
6155 mutex_init(&host->eh_mutex);
6156 host->dev = dev;
6157 host->n_ports = max_ports;
6158 kref_init(&host->kref);
6159
6160
6161 for (i = 0; i < max_ports; i++) {
6162 struct ata_port *ap;
6163
6164 ap = ata_port_alloc(host);
6165 if (!ap)
6166 goto err_out;
6167
6168 ap->port_no = i;
6169 host->ports[i] = ap;
6170 }
6171
6172 devres_remove_group(dev, NULL);
6173 return host;
6174
6175 err_out:
6176 devres_release_group(dev, NULL);
6177 err_free:
6178 kfree(host);
6179 return NULL;
6180}
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6199 const struct ata_port_info * const * ppi,
6200 int n_ports)
6201{
6202 const struct ata_port_info *pi;
6203 struct ata_host *host;
6204 int i, j;
6205
6206 host = ata_host_alloc(dev, n_ports);
6207 if (!host)
6208 return NULL;
6209
6210 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6211 struct ata_port *ap = host->ports[i];
6212
6213 if (ppi[j])
6214 pi = ppi[j++];
6215
6216 ap->pio_mask = pi->pio_mask;
6217 ap->mwdma_mask = pi->mwdma_mask;
6218 ap->udma_mask = pi->udma_mask;
6219 ap->flags |= pi->flags;
6220 ap->link.flags |= pi->link_flags;
6221 ap->ops = pi->port_ops;
6222
6223 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6224 host->ops = pi->port_ops;
6225 }
6226
6227 return host;
6228}
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251
6252
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271
6272
6273
6274
6275
6276int ata_slave_link_init(struct ata_port *ap)
6277{
6278 struct ata_link *link;
6279
6280 WARN_ON(ap->slave_link);
6281 WARN_ON(ap->flags & ATA_FLAG_PMP);
6282
6283 link = kzalloc(sizeof(*link), GFP_KERNEL);
6284 if (!link)
6285 return -ENOMEM;
6286
6287 ata_link_init(ap, link, 1);
6288 ap->slave_link = link;
6289 return 0;
6290}
6291
6292static void ata_host_stop(struct device *gendev, void *res)
6293{
6294 struct ata_host *host = dev_get_drvdata(gendev);
6295 int i;
6296
6297 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6298
6299 for (i = 0; i < host->n_ports; i++) {
6300 struct ata_port *ap = host->ports[i];
6301
6302 if (ap->ops->port_stop)
6303 ap->ops->port_stop(ap);
6304 }
6305
6306 if (host->ops->host_stop)
6307 host->ops->host_stop(host);
6308}
6309
6310
6311
6312
6313
6314
6315
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327
6328
6329
6330static void ata_finalize_port_ops(struct ata_port_operations *ops)
6331{
6332 static DEFINE_SPINLOCK(lock);
6333 const struct ata_port_operations *cur;
6334 void **begin = (void **)ops;
6335 void **end = (void **)&ops->inherits;
6336 void **pp;
6337
6338 if (!ops || !ops->inherits)
6339 return;
6340
6341 spin_lock(&lock);
6342
6343 for (cur = ops->inherits; cur; cur = cur->inherits) {
6344 void **inherit = (void **)cur;
6345
6346 for (pp = begin; pp < end; pp++, inherit++)
6347 if (!*pp)
6348 *pp = *inherit;
6349 }
6350
6351 for (pp = begin; pp < end; pp++)
6352 if (IS_ERR(*pp))
6353 *pp = NULL;
6354
6355 ops->inherits = NULL;
6356
6357 spin_unlock(&lock);
6358}
6359
6360
6361
6362
6363
6364
6365
6366
6367
6368
6369
6370
6371
6372
6373
6374
6375
6376int ata_host_start(struct ata_host *host)
6377{
6378 int have_stop = 0;
6379 void *start_dr = NULL;
6380 int i, rc;
6381
6382 if (host->flags & ATA_HOST_STARTED)
6383 return 0;
6384
6385 ata_finalize_port_ops(host->ops);
6386
6387 for (i = 0; i < host->n_ports; i++) {
6388 struct ata_port *ap = host->ports[i];
6389
6390 ata_finalize_port_ops(ap->ops);
6391
6392 if (!host->ops && !ata_port_is_dummy(ap))
6393 host->ops = ap->ops;
6394
6395 if (ap->ops->port_stop)
6396 have_stop = 1;
6397 }
6398
6399 if (host->ops->host_stop)
6400 have_stop = 1;
6401
6402 if (have_stop) {
6403 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6404 if (!start_dr)
6405 return -ENOMEM;
6406 }
6407
6408 for (i = 0; i < host->n_ports; i++) {
6409 struct ata_port *ap = host->ports[i];
6410
6411 if (ap->ops->port_start) {
6412 rc = ap->ops->port_start(ap);
6413 if (rc) {
6414 if (rc != -ENODEV)
6415 dev_err(host->dev,
6416 "failed to start port %d (errno=%d)\n",
6417 i, rc);
6418 goto err_out;
6419 }
6420 }
6421 ata_eh_freeze_port(ap);
6422 }
6423
6424 if (start_dr)
6425 devres_add(host->dev, start_dr);
6426 host->flags |= ATA_HOST_STARTED;
6427 return 0;
6428
6429 err_out:
6430 while (--i >= 0) {
6431 struct ata_port *ap = host->ports[i];
6432
6433 if (ap->ops->port_stop)
6434 ap->ops->port_stop(ap);
6435 }
6436 devres_free(start_dr);
6437 return rc;
6438}
6439
6440
6441
6442
6443
6444
6445
6446
6447void ata_host_init(struct ata_host *host, struct device *dev,
6448 struct ata_port_operations *ops)
6449{
6450 spin_lock_init(&host->lock);
6451 mutex_init(&host->eh_mutex);
6452 host->n_tags = ATA_MAX_QUEUE;
6453 host->dev = dev;
6454 host->ops = ops;
6455 kref_init(&host->kref);
6456}
6457
6458void __ata_port_probe(struct ata_port *ap)
6459{
6460 struct ata_eh_info *ehi = &ap->link.eh_info;
6461 unsigned long flags;
6462
6463
6464 spin_lock_irqsave(ap->lock, flags);
6465
6466 ehi->probe_mask |= ATA_ALL_DEVICES;
6467 ehi->action |= ATA_EH_RESET;
6468 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6469
6470 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6471 ap->pflags |= ATA_PFLAG_LOADING;
6472 ata_port_schedule_eh(ap);
6473
6474 spin_unlock_irqrestore(ap->lock, flags);
6475}
6476
6477int ata_port_probe(struct ata_port *ap)
6478{
6479 int rc = 0;
6480
6481 if (ap->ops->error_handler) {
6482 __ata_port_probe(ap);
6483 ata_port_wait_eh(ap);
6484 } else {
6485 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6486 rc = ata_bus_probe(ap);
6487 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6488 }
6489 return rc;
6490}
6491
6492
6493static void async_port_probe(void *data, async_cookie_t cookie)
6494{
6495 struct ata_port *ap = data;
6496
6497
6498
6499
6500
6501
6502
6503
6504 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6505 async_synchronize_cookie(cookie);
6506
6507 (void)ata_port_probe(ap);
6508
6509
6510 async_synchronize_cookie(cookie);
6511
6512 ata_scsi_scan_host(ap, 1);
6513}
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530
6531int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6532{
6533 int i, rc;
6534
6535 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
6536
6537
6538 if (!(host->flags & ATA_HOST_STARTED)) {
6539 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6540 WARN_ON(1);
6541 return -EINVAL;
6542 }
6543
6544
6545
6546
6547
6548 for (i = host->n_ports; host->ports[i]; i++)
6549 kfree(host->ports[i]);
6550
6551
6552 for (i = 0; i < host->n_ports; i++) {
6553 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6554 host->ports[i]->local_port_no = i + 1;
6555 }
6556
6557
6558 for (i = 0; i < host->n_ports; i++) {
6559 rc = ata_tport_add(host->dev,host->ports[i]);
6560 if (rc) {
6561 goto err_tadd;
6562 }
6563 }
6564
6565 rc = ata_scsi_add_hosts(host, sht);
6566 if (rc)
6567 goto err_tadd;
6568
6569
6570 for (i = 0; i < host->n_ports; i++) {
6571 struct ata_port *ap = host->ports[i];
6572 unsigned long xfer_mask;
6573
6574
6575 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6576 ap->cbl = ATA_CBL_SATA;
6577
6578
6579 sata_link_init_spd(&ap->link);
6580 if (ap->slave_link)
6581 sata_link_init_spd(ap->slave_link);
6582
6583
6584 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6585 ap->udma_mask);
6586
6587 if (!ata_port_is_dummy(ap)) {
6588 ata_port_info(ap, "%cATA max %s %s\n",
6589 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6590 ata_mode_string(xfer_mask),
6591 ap->link.eh_info.desc);
6592 ata_ehi_clear_desc(&ap->link.eh_info);
6593 } else
6594 ata_port_info(ap, "DUMMY\n");
6595 }
6596
6597
6598 for (i = 0; i < host->n_ports; i++) {
6599 struct ata_port *ap = host->ports[i];
6600 async_schedule(async_port_probe, ap);
6601 }
6602
6603 return 0;
6604
6605 err_tadd:
6606 while (--i >= 0) {
6607 ata_tport_delete(host->ports[i]);
6608 }
6609 return rc;
6610
6611}
6612
6613
6614
6615
6616
6617
6618
6619
6620
6621
6622
6623
6624
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636int ata_host_activate(struct ata_host *host, int irq,
6637 irq_handler_t irq_handler, unsigned long irq_flags,
6638 struct scsi_host_template *sht)
6639{
6640 int i, rc;
6641 char *irq_desc;
6642
6643 rc = ata_host_start(host);
6644 if (rc)
6645 return rc;
6646
6647
6648 if (!irq) {
6649 WARN_ON(irq_handler);
6650 return ata_host_register(host, sht);
6651 }
6652
6653 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6654 dev_driver_string(host->dev),
6655 dev_name(host->dev));
6656 if (!irq_desc)
6657 return -ENOMEM;
6658
6659 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6660 irq_desc, host);
6661 if (rc)
6662 return rc;
6663
6664 for (i = 0; i < host->n_ports; i++)
6665 ata_port_desc(host->ports[i], "irq %d", irq);
6666
6667 rc = ata_host_register(host, sht);
6668
6669 if (rc)
6670 devm_free_irq(host->dev, irq, host);
6671
6672 return rc;
6673}
6674
6675
6676
6677
6678
6679
6680
6681
6682
6683
6684
6685
6686static void ata_port_detach(struct ata_port *ap)
6687{
6688 unsigned long flags;
6689 struct ata_link *link;
6690 struct ata_device *dev;
6691
6692 if (!ap->ops->error_handler)
6693 goto skip_eh;
6694
6695
6696 spin_lock_irqsave(ap->lock, flags);
6697 ap->pflags |= ATA_PFLAG_UNLOADING;
6698 ata_port_schedule_eh(ap);
6699 spin_unlock_irqrestore(ap->lock, flags);
6700
6701
6702 ata_port_wait_eh(ap);
6703
6704
6705 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6706
6707 cancel_delayed_work_sync(&ap->hotplug_task);
6708
6709 skip_eh:
6710
6711 ata_for_each_link(link, ap, HOST_FIRST) {
6712 ata_for_each_dev(dev, link, ALL) {
6713 if (zpodd_dev_enabled(dev))
6714 zpodd_exit(dev);
6715 }
6716 }
6717 if (ap->pmp_link) {
6718 int i;
6719 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6720 ata_tlink_delete(&ap->pmp_link[i]);
6721 }
6722
6723 scsi_remove_host(ap->scsi_host);
6724 ata_tport_delete(ap);
6725}
6726
6727
6728
6729
6730
6731
6732
6733
6734
6735
6736void ata_host_detach(struct ata_host *host)
6737{
6738 int i;
6739
6740
6741 async_synchronize_full();
6742
6743 for (i = 0; i < host->n_ports; i++)
6744 ata_port_detach(host->ports[i]);
6745
6746
6747 ata_acpi_dissociate(host);
6748}
6749
6750#ifdef CONFIG_PCI
6751
6752
6753
6754
6755
6756
6757
6758
6759
6760
6761
6762
6763void ata_pci_remove_one(struct pci_dev *pdev)
6764{
6765 struct ata_host *host = pci_get_drvdata(pdev);
6766
6767 ata_host_detach(host);
6768}
6769
6770
6771int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6772{
6773 unsigned long tmp = 0;
6774
6775 switch (bits->width) {
6776 case 1: {
6777 u8 tmp8 = 0;
6778 pci_read_config_byte(pdev, bits->reg, &tmp8);
6779 tmp = tmp8;
6780 break;
6781 }
6782 case 2: {
6783 u16 tmp16 = 0;
6784 pci_read_config_word(pdev, bits->reg, &tmp16);
6785 tmp = tmp16;
6786 break;
6787 }
6788 case 4: {
6789 u32 tmp32 = 0;
6790 pci_read_config_dword(pdev, bits->reg, &tmp32);
6791 tmp = tmp32;
6792 break;
6793 }
6794
6795 default:
6796 return -EINVAL;
6797 }
6798
6799 tmp &= bits->mask;
6800
6801 return (tmp == bits->val) ? 1 : 0;
6802}
6803
6804#ifdef CONFIG_PM
6805void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6806{
6807 pci_save_state(pdev);
6808 pci_disable_device(pdev);
6809
6810 if (mesg.event & PM_EVENT_SLEEP)
6811 pci_set_power_state(pdev, PCI_D3hot);
6812}
6813
6814int ata_pci_device_do_resume(struct pci_dev *pdev)
6815{
6816 int rc;
6817
6818 pci_set_power_state(pdev, PCI_D0);
6819 pci_restore_state(pdev);
6820
6821 rc = pcim_enable_device(pdev);
6822 if (rc) {
6823 dev_err(&pdev->dev,
6824 "failed to enable device after resume (%d)\n", rc);
6825 return rc;
6826 }
6827
6828 pci_set_master(pdev);
6829 return 0;
6830}
6831
6832int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6833{
6834 struct ata_host *host = pci_get_drvdata(pdev);
6835 int rc = 0;
6836
6837 rc = ata_host_suspend(host, mesg);
6838 if (rc)
6839 return rc;
6840
6841 ata_pci_device_do_suspend(pdev, mesg);
6842
6843 return 0;
6844}
6845
6846int ata_pci_device_resume(struct pci_dev *pdev)
6847{
6848 struct ata_host *host = pci_get_drvdata(pdev);
6849 int rc;
6850
6851 rc = ata_pci_device_do_resume(pdev);
6852 if (rc == 0)
6853 ata_host_resume(host);
6854 return rc;
6855}
6856#endif
6857
6858#endif
6859
6860
6861
6862
6863
6864
6865
6866
6867
6868
6869
6870
6871int ata_platform_remove_one(struct platform_device *pdev)
6872{
6873 struct ata_host *host = platform_get_drvdata(pdev);
6874
6875 ata_host_detach(host);
6876
6877 return 0;
6878}
6879
6880static int __init ata_parse_force_one(char **cur,
6881 struct ata_force_ent *force_ent,
6882 const char **reason)
6883{
6884 static const struct ata_force_param force_tbl[] __initconst = {
6885 { "40c", .cbl = ATA_CBL_PATA40 },
6886 { "80c", .cbl = ATA_CBL_PATA80 },
6887 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6888 { "unk", .cbl = ATA_CBL_PATA_UNK },
6889 { "ign", .cbl = ATA_CBL_PATA_IGN },
6890 { "sata", .cbl = ATA_CBL_SATA },
6891 { "1.5Gbps", .spd_limit = 1 },
6892 { "3.0Gbps", .spd_limit = 2 },
6893 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6894 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6895 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
6896 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
6897 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6898 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6899 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6900 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6901 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6902 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6903 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6904 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6905 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6906 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6907 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6908 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6909 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6910 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6911 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6912 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6913 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6914 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6915 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6916 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6917 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6918 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6919 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6920 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6921 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6922 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6923 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6924 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6925 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6926 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6927 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6928 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6929 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6930 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6931 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6932 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6933 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6934 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6935 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6936 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6937 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
6938 };
6939 char *start = *cur, *p = *cur;
6940 char *id, *val, *endp;
6941 const struct ata_force_param *match_fp = NULL;
6942 int nr_matches = 0, i;
6943
6944
6945 while (*p != '\0' && *p != ',')
6946 p++;
6947
6948 if (*p == '\0')
6949 *cur = p;
6950 else
6951 *cur = p + 1;
6952
6953 *p = '\0';
6954
6955
6956 p = strchr(start, ':');
6957 if (!p) {
6958 val = strstrip(start);
6959 goto parse_val;
6960 }
6961 *p = '\0';
6962
6963 id = strstrip(start);
6964 val = strstrip(p + 1);
6965
6966
6967 p = strchr(id, '.');
6968 if (p) {
6969 *p++ = '\0';
6970 force_ent->device = simple_strtoul(p, &endp, 10);
6971 if (p == endp || *endp != '\0') {
6972 *reason = "invalid device";
6973 return -EINVAL;
6974 }
6975 }
6976
6977 force_ent->port = simple_strtoul(id, &endp, 10);
6978 if (id == endp || *endp != '\0') {
6979 *reason = "invalid port/link";
6980 return -EINVAL;
6981 }
6982
6983 parse_val:
6984
6985 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6986 const struct ata_force_param *fp = &force_tbl[i];
6987
6988 if (strncasecmp(val, fp->name, strlen(val)))
6989 continue;
6990
6991 nr_matches++;
6992 match_fp = fp;
6993
6994 if (strcasecmp(val, fp->name) == 0) {
6995 nr_matches = 1;
6996 break;
6997 }
6998 }
6999
7000 if (!nr_matches) {
7001 *reason = "unknown value";
7002 return -EINVAL;
7003 }
7004 if (nr_matches > 1) {
7005 *reason = "ambiguous value";
7006 return -EINVAL;
7007 }
7008
7009 force_ent->param = *match_fp;
7010
7011 return 0;
7012}
7013
7014static void __init ata_parse_force_param(void)
7015{
7016 int idx = 0, size = 1;
7017 int last_port = -1, last_device = -1;
7018 char *p, *cur, *next;
7019
7020
7021 for (p = ata_force_param_buf; *p; p++)
7022 if (*p == ',')
7023 size++;
7024
7025 ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
7026 if (!ata_force_tbl) {
7027 printk(KERN_WARNING "ata: failed to extend force table, "
7028 "libata.force ignored\n");
7029 return;
7030 }
7031
7032
7033 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7034 const char *reason = "";
7035 struct ata_force_ent te = { .port = -1, .device = -1 };
7036
7037 next = cur;
7038 if (ata_parse_force_one(&next, &te, &reason)) {
7039 printk(KERN_WARNING "ata: failed to parse force "
7040 "parameter \"%s\" (%s)\n",
7041 cur, reason);
7042 continue;
7043 }
7044
7045 if (te.port == -1) {
7046 te.port = last_port;
7047 te.device = last_device;
7048 }
7049
7050 ata_force_tbl[idx++] = te;
7051
7052 last_port = te.port;
7053 last_device = te.device;
7054 }
7055
7056 ata_force_tbl_size = idx;
7057}
7058
7059static int __init ata_init(void)
7060{
7061 int rc;
7062
7063 ata_parse_force_param();
7064
7065 rc = ata_sff_init();
7066 if (rc) {
7067 kfree(ata_force_tbl);
7068 return rc;
7069 }
7070
7071 libata_transport_init();
7072 ata_scsi_transport_template = ata_attach_transport();
7073 if (!ata_scsi_transport_template) {
7074 ata_sff_exit();
7075 rc = -ENOMEM;
7076 goto err_out;
7077 }
7078
7079 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7080 return 0;
7081
7082err_out:
7083 return rc;
7084}
7085
7086static void __exit ata_exit(void)
7087{
7088 ata_release_transport(ata_scsi_transport_template);
7089 libata_transport_exit();
7090 ata_sff_exit();
7091 kfree(ata_force_tbl);
7092}
7093
7094subsys_initcall(ata_init);
7095module_exit(ata_exit);
7096
7097static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
7098
7099int ata_ratelimit(void)
7100{
7101 return __ratelimit(&ratelimit);
7102}
7103
7104
7105
7106
7107
7108
7109
7110
7111
7112
7113
7114
7115
7116
7117
7118void ata_msleep(struct ata_port *ap, unsigned int msecs)
7119{
7120 bool owns_eh = ap && ap->host->eh_owner == current;
7121
7122 if (owns_eh)
7123 ata_eh_release(ap);
7124
7125 if (msecs < 20) {
7126 unsigned long usecs = msecs * USEC_PER_MSEC;
7127 usleep_range(usecs, usecs + 50);
7128 } else {
7129 msleep(msecs);
7130 }
7131
7132 if (owns_eh)
7133 ata_eh_acquire(ap);
7134}
7135
7136
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
7161 unsigned long interval, unsigned long timeout)
7162{
7163 unsigned long deadline;
7164 u32 tmp;
7165
7166 tmp = ioread32(reg);
7167
7168
7169
7170
7171
7172 deadline = ata_deadline(jiffies, timeout);
7173
7174 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
7175 ata_msleep(ap, interval);
7176 tmp = ioread32(reg);
7177 }
7178
7179 return tmp;
7180}
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191
7192
7193
7194bool sata_lpm_ignore_phy_events(struct ata_link *link)
7195{
7196 unsigned long lpm_timeout = link->last_lpm_change +
7197 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
7198
7199
7200 if (link->lpm_policy > ATA_LPM_MAX_POWER)
7201 return true;
7202
7203
7204
7205
7206 if ((link->flags & ATA_LFLAG_CHANGED) &&
7207 time_before(jiffies, lpm_timeout))
7208 return true;
7209
7210 return false;
7211}
7212EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
7213
7214
7215
7216
7217static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7218{
7219 return AC_ERR_SYSTEM;
7220}
7221
7222static void ata_dummy_error_handler(struct ata_port *ap)
7223{
7224
7225}
7226
7227struct ata_port_operations ata_dummy_port_ops = {
7228 .qc_prep = ata_noop_qc_prep,
7229 .qc_issue = ata_dummy_qc_issue,
7230 .error_handler = ata_dummy_error_handler,
7231 .sched_eh = ata_std_sched_eh,
7232 .end_eh = ata_std_end_eh,
7233};
7234
7235const struct ata_port_info ata_dummy_port_info = {
7236 .port_ops = &ata_dummy_port_ops,
7237};
7238
7239
7240
7241
7242void ata_port_printk(const struct ata_port *ap, const char *level,
7243 const char *fmt, ...)
7244{
7245 struct va_format vaf;
7246 va_list args;
7247
7248 va_start(args, fmt);
7249
7250 vaf.fmt = fmt;
7251 vaf.va = &args;
7252
7253 printk("%sata%u: %pV", level, ap->print_id, &vaf);
7254
7255 va_end(args);
7256}
7257EXPORT_SYMBOL(ata_port_printk);
7258
7259void ata_link_printk(const struct ata_link *link, const char *level,
7260 const char *fmt, ...)
7261{
7262 struct va_format vaf;
7263 va_list args;
7264
7265 va_start(args, fmt);
7266
7267 vaf.fmt = fmt;
7268 vaf.va = &args;
7269
7270 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
7271 printk("%sata%u.%02u: %pV",
7272 level, link->ap->print_id, link->pmp, &vaf);
7273 else
7274 printk("%sata%u: %pV",
7275 level, link->ap->print_id, &vaf);
7276
7277 va_end(args);
7278}
7279EXPORT_SYMBOL(ata_link_printk);
7280
7281void ata_dev_printk(const struct ata_device *dev, const char *level,
7282 const char *fmt, ...)
7283{
7284 struct va_format vaf;
7285 va_list args;
7286
7287 va_start(args, fmt);
7288
7289 vaf.fmt = fmt;
7290 vaf.va = &args;
7291
7292 printk("%sata%u.%02u: %pV",
7293 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
7294 &vaf);
7295
7296 va_end(args);
7297}
7298EXPORT_SYMBOL(ata_dev_printk);
7299
7300void ata_print_version(const struct device *dev, const char *version)
7301{
7302 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
7303}
7304EXPORT_SYMBOL(ata_print_version);
7305
7306
7307
7308
7309
7310
7311
7312EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7313EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7314EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7315EXPORT_SYMBOL_GPL(ata_base_port_ops);
7316EXPORT_SYMBOL_GPL(sata_port_ops);
7317EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7318EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7319EXPORT_SYMBOL_GPL(ata_link_next);
7320EXPORT_SYMBOL_GPL(ata_dev_next);
7321EXPORT_SYMBOL_GPL(ata_std_bios_param);
7322EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
7323EXPORT_SYMBOL_GPL(ata_host_init);
7324EXPORT_SYMBOL_GPL(ata_host_alloc);
7325EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7326EXPORT_SYMBOL_GPL(ata_slave_link_init);
7327EXPORT_SYMBOL_GPL(ata_host_start);
7328EXPORT_SYMBOL_GPL(ata_host_register);
7329EXPORT_SYMBOL_GPL(ata_host_activate);
7330EXPORT_SYMBOL_GPL(ata_host_detach);
7331EXPORT_SYMBOL_GPL(ata_sg_init);
7332EXPORT_SYMBOL_GPL(ata_qc_complete);
7333EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7334EXPORT_SYMBOL_GPL(atapi_cmd_type);
7335EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7336EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7337EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7338EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7339EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7340EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7341EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7342EXPORT_SYMBOL_GPL(ata_mode_string);
7343EXPORT_SYMBOL_GPL(ata_id_xfermask);
7344EXPORT_SYMBOL_GPL(ata_do_set_mode);
7345EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7346EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7347EXPORT_SYMBOL_GPL(ata_dev_disable);
7348EXPORT_SYMBOL_GPL(sata_set_spd);
7349EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7350EXPORT_SYMBOL_GPL(sata_link_debounce);
7351EXPORT_SYMBOL_GPL(sata_link_resume);
7352EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
7353EXPORT_SYMBOL_GPL(ata_std_prereset);
7354EXPORT_SYMBOL_GPL(sata_link_hardreset);
7355EXPORT_SYMBOL_GPL(sata_std_hardreset);
7356EXPORT_SYMBOL_GPL(ata_std_postreset);
7357EXPORT_SYMBOL_GPL(ata_dev_classify);
7358EXPORT_SYMBOL_GPL(ata_dev_pair);
7359EXPORT_SYMBOL_GPL(ata_ratelimit);
7360EXPORT_SYMBOL_GPL(ata_msleep);
7361EXPORT_SYMBOL_GPL(ata_wait_register);
7362EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7363EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7364EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7365EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7366EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
7367EXPORT_SYMBOL_GPL(sata_scr_valid);
7368EXPORT_SYMBOL_GPL(sata_scr_read);
7369EXPORT_SYMBOL_GPL(sata_scr_write);
7370EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7371EXPORT_SYMBOL_GPL(ata_link_online);
7372EXPORT_SYMBOL_GPL(ata_link_offline);
7373#ifdef CONFIG_PM
7374EXPORT_SYMBOL_GPL(ata_host_suspend);
7375EXPORT_SYMBOL_GPL(ata_host_resume);
7376#endif
7377EXPORT_SYMBOL_GPL(ata_id_string);
7378EXPORT_SYMBOL_GPL(ata_id_c_string);
7379EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
7380EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7381
7382EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7383EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7384EXPORT_SYMBOL_GPL(ata_timing_compute);
7385EXPORT_SYMBOL_GPL(ata_timing_merge);
7386EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7387
7388#ifdef CONFIG_PCI
7389EXPORT_SYMBOL_GPL(pci_test_config_bits);
7390EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7391#ifdef CONFIG_PM
7392EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7393EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7394EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7395EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7396#endif
7397#endif
7398
7399EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7400
7401EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7402EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7403EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7404EXPORT_SYMBOL_GPL(ata_port_desc);
7405#ifdef CONFIG_PCI
7406EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7407#endif
7408EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7409EXPORT_SYMBOL_GPL(ata_link_abort);
7410EXPORT_SYMBOL_GPL(ata_port_abort);
7411EXPORT_SYMBOL_GPL(ata_port_freeze);
7412EXPORT_SYMBOL_GPL(sata_async_notification);
7413EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7414EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7415EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7416EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7417EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7418EXPORT_SYMBOL_GPL(ata_do_eh);
7419EXPORT_SYMBOL_GPL(ata_std_error_handler);
7420
7421EXPORT_SYMBOL_GPL(ata_cable_40wire);
7422EXPORT_SYMBOL_GPL(ata_cable_80wire);
7423EXPORT_SYMBOL_GPL(ata_cable_unknown);
7424EXPORT_SYMBOL_GPL(ata_cable_ignore);
7425EXPORT_SYMBOL_GPL(ata_cable_sata);
7426EXPORT_SYMBOL_GPL(ata_host_get);
7427EXPORT_SYMBOL_GPL(ata_host_put);
7428