1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/kernel.h>
36#include <linux/blkdev.h>
37#include <linux/pci.h>
38#include <scsi/scsi.h>
39#include <scsi/scsi_host.h>
40#include <scsi/scsi_eh.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_dbg.h>
44#include "../scsi/scsi_transport_api.h"
45
46#include <linux/libata.h>
47
48#include "libata.h"
49
50enum {
51
52 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
53 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
54 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
55 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
56
57
58 ATA_EFLAG_IS_IO = (1 << 0),
59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
60 ATA_EFLAG_OLD_ER = (1 << 31),
61
62
63 ATA_ECAT_NONE = 0,
64 ATA_ECAT_ATA_BUS = 1,
65 ATA_ECAT_TOUT_HSM = 2,
66 ATA_ECAT_UNK_DEV = 3,
67 ATA_ECAT_DUBIOUS_NONE = 4,
68 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
69 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
70 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
71 ATA_ECAT_NR = 8,
72
73 ATA_EH_CMD_DFL_TIMEOUT = 5000,
74
75
76 ATA_EH_RESET_COOL_DOWN = 5000,
77
78
79
80
81
82
83 ATA_EH_PRERESET_TIMEOUT = 10000,
84 ATA_EH_FASTDRAIN_INTERVAL = 3000,
85
86 ATA_EH_UA_TRIES = 5,
87
88
89 ATA_EH_PROBE_TRIAL_INTERVAL = 60000,
90 ATA_EH_PROBE_TRIALS = 2,
91};
92
93
94
95
96
97
98
99static const unsigned long ata_eh_reset_timeouts[] = {
100 10000,
101 10000,
102 35000,
103 5000,
104 ULONG_MAX,
105};
106
107static const unsigned long ata_eh_identify_timeouts[] = {
108 5000,
109 10000,
110 30000,
111 ULONG_MAX,
112};
113
114static const unsigned long ata_eh_flush_timeouts[] = {
115 15000,
116 15000,
117 30000,
118 ULONG_MAX,
119};
120
121static const unsigned long ata_eh_other_timeouts[] = {
122 5000,
123 10000,
124
125 ULONG_MAX,
126};
127
128struct ata_eh_cmd_timeout_ent {
129 const u8 *commands;
130 const unsigned long *timeouts;
131};
132
133
134
135
136
137
138
139
140
141
142
143
144
145#define CMDS(cmds...) (const u8 []){ cmds, 0 }
146static const struct ata_eh_cmd_timeout_ent
147ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
148 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
149 .timeouts = ata_eh_identify_timeouts, },
150 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
151 .timeouts = ata_eh_other_timeouts, },
152 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
153 .timeouts = ata_eh_other_timeouts, },
154 { .commands = CMDS(ATA_CMD_SET_FEATURES),
155 .timeouts = ata_eh_other_timeouts, },
156 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
157 .timeouts = ata_eh_other_timeouts, },
158 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
159 .timeouts = ata_eh_flush_timeouts },
160};
161#undef CMDS
162
163static void __ata_port_freeze(struct ata_port *ap);
164#ifdef CONFIG_PM
165static void ata_eh_handle_port_suspend(struct ata_port *ap);
166static void ata_eh_handle_port_resume(struct ata_port *ap);
167#else
168static void ata_eh_handle_port_suspend(struct ata_port *ap)
169{ }
170
171static void ata_eh_handle_port_resume(struct ata_port *ap)
172{ }
173#endif
174
175static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
176 va_list args)
177{
178 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
179 ATA_EH_DESC_LEN - ehi->desc_len,
180 fmt, args);
181}
182
183
184
185
186
187
188
189
190
191
192
193void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
194{
195 va_list args;
196
197 va_start(args, fmt);
198 __ata_ehi_pushv_desc(ehi, fmt, args);
199 va_end(args);
200}
201
202
203
204
205
206
207
208
209
210
211
212
213void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
214{
215 va_list args;
216
217 if (ehi->desc_len)
218 __ata_ehi_push_desc(ehi, ", ");
219
220 va_start(args, fmt);
221 __ata_ehi_pushv_desc(ehi, fmt, args);
222 va_end(args);
223}
224
225
226
227
228
229
230
231
232
233
234void ata_ehi_clear_desc(struct ata_eh_info *ehi)
235{
236 ehi->desc[0] = '\0';
237 ehi->desc_len = 0;
238}
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
254{
255 va_list args;
256
257 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
258
259 if (ap->link.eh_info.desc_len)
260 __ata_ehi_push_desc(&ap->link.eh_info, " ");
261
262 va_start(args, fmt);
263 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
264 va_end(args);
265}
266
267#ifdef CONFIG_PCI
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
285 const char *name)
286{
287 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
288 char *type = "";
289 unsigned long long start, len;
290
291 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
292 type = "m";
293 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
294 type = "i";
295
296 start = (unsigned long long)pci_resource_start(pdev, bar);
297 len = (unsigned long long)pci_resource_len(pdev, bar);
298
299 if (offset < 0)
300 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
301 else
302 ata_port_desc(ap, "%s 0x%llx", name,
303 start + (unsigned long long)offset);
304}
305
306#endif
307
308static int ata_lookup_timeout_table(u8 cmd)
309{
310 int i;
311
312 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
313 const u8 *cur;
314
315 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
316 if (*cur == cmd)
317 return i;
318 }
319
320 return -1;
321}
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
337{
338 struct ata_eh_context *ehc = &dev->link->eh_context;
339 int ent = ata_lookup_timeout_table(cmd);
340 int idx;
341
342 if (ent < 0)
343 return ATA_EH_CMD_DFL_TIMEOUT;
344
345 idx = ehc->cmd_timeout_idx[dev->devno][ent];
346 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
347}
348
349
350
351
352
353
354
355
356
357
358
359
360
361void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
362{
363 struct ata_eh_context *ehc = &dev->link->eh_context;
364 int ent = ata_lookup_timeout_table(cmd);
365 int idx;
366
367 if (ent < 0)
368 return;
369
370 idx = ehc->cmd_timeout_idx[dev->devno][ent];
371 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
372 ehc->cmd_timeout_idx[dev->devno][ent]++;
373}
374
375static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
376 unsigned int err_mask)
377{
378 struct ata_ering_entry *ent;
379
380 WARN_ON(!err_mask);
381
382 ering->cursor++;
383 ering->cursor %= ATA_ERING_SIZE;
384
385 ent = &ering->ring[ering->cursor];
386 ent->eflags = eflags;
387 ent->err_mask = err_mask;
388 ent->timestamp = get_jiffies_64();
389}
390
391static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
392{
393 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
394
395 if (ent->err_mask)
396 return ent;
397 return NULL;
398}
399
400int ata_ering_map(struct ata_ering *ering,
401 int (*map_fn)(struct ata_ering_entry *, void *),
402 void *arg)
403{
404 int idx, rc = 0;
405 struct ata_ering_entry *ent;
406
407 idx = ering->cursor;
408 do {
409 ent = &ering->ring[idx];
410 if (!ent->err_mask)
411 break;
412 rc = map_fn(ent, arg);
413 if (rc)
414 break;
415 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
416 } while (idx != ering->cursor);
417
418 return rc;
419}
420
421int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
422{
423 ent->eflags |= ATA_EFLAG_OLD_ER;
424 return 0;
425}
426
427static void ata_ering_clear(struct ata_ering *ering)
428{
429 ata_ering_map(ering, ata_ering_clear_cb, NULL);
430}
431
432static unsigned int ata_eh_dev_action(struct ata_device *dev)
433{
434 struct ata_eh_context *ehc = &dev->link->eh_context;
435
436 return ehc->i.action | ehc->i.dev_action[dev->devno];
437}
438
439static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
440 struct ata_eh_info *ehi, unsigned int action)
441{
442 struct ata_device *tdev;
443
444 if (!dev) {
445 ehi->action &= ~action;
446 ata_for_each_dev(tdev, link, ALL)
447 ehi->dev_action[tdev->devno] &= ~action;
448 } else {
449
450 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
451
452
453 if (ehi->action & action) {
454 ata_for_each_dev(tdev, link, ALL)
455 ehi->dev_action[tdev->devno] |=
456 ehi->action & action;
457 ehi->action &= ~action;
458 }
459
460
461 ehi->dev_action[dev->devno] &= ~action;
462 }
463}
464
465
466
467
468
469
470
471
472
473
474
475
476void ata_eh_acquire(struct ata_port *ap)
477{
478 mutex_lock(&ap->host->eh_mutex);
479 WARN_ON_ONCE(ap->host->eh_owner);
480 ap->host->eh_owner = current;
481}
482
483
484
485
486
487
488
489
490
491
492
493void ata_eh_release(struct ata_port *ap)
494{
495 WARN_ON_ONCE(ap->host->eh_owner != current);
496 ap->host->eh_owner = NULL;
497 mutex_unlock(&ap->host->eh_mutex);
498}
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
520{
521 struct Scsi_Host *host = cmd->device->host;
522 struct ata_port *ap = ata_shost_to_port(host);
523 unsigned long flags;
524 struct ata_queued_cmd *qc;
525 enum blk_eh_timer_return ret;
526
527 DPRINTK("ENTER\n");
528
529 if (ap->ops->error_handler) {
530 ret = BLK_EH_NOT_HANDLED;
531 goto out;
532 }
533
534 ret = BLK_EH_HANDLED;
535 spin_lock_irqsave(ap->lock, flags);
536 qc = ata_qc_from_tag(ap, ap->link.active_tag);
537 if (qc) {
538 WARN_ON(qc->scsicmd != cmd);
539 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
540 qc->err_mask |= AC_ERR_TIMEOUT;
541 ret = BLK_EH_NOT_HANDLED;
542 }
543 spin_unlock_irqrestore(ap->lock, flags);
544
545 out:
546 DPRINTK("EXIT, ret=%d\n", ret);
547 return ret;
548}
549
550static void ata_eh_unload(struct ata_port *ap)
551{
552 struct ata_link *link;
553 struct ata_device *dev;
554 unsigned long flags;
555
556
557
558
559 ata_for_each_link(link, ap, PMP_FIRST) {
560 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
561 ata_for_each_dev(dev, link, ALL)
562 ata_dev_disable(dev);
563 }
564
565
566 spin_lock_irqsave(ap->lock, flags);
567
568 ata_port_freeze(ap);
569 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
570 ap->pflags |= ATA_PFLAG_UNLOADED;
571
572 spin_unlock_irqrestore(ap->lock, flags);
573}
574
575
576
577
578
579
580
581
582
583
584
585
586
587void ata_scsi_error(struct Scsi_Host *host)
588{
589 struct ata_port *ap = ata_shost_to_port(host);
590 int i;
591 unsigned long flags;
592
593 DPRINTK("ENTER\n");
594
595
596 ata_sff_flush_pio_task(ap);
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612 if (ap->ops->error_handler) {
613 struct scsi_cmnd *scmd, *tmp;
614 int nr_timedout = 0;
615
616 spin_lock_irqsave(ap->lock, flags);
617
618
619
620
621
622
623
624
625
626
627 if (ap->ops->lost_interrupt)
628 ap->ops->lost_interrupt(ap);
629
630 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
631 struct ata_queued_cmd *qc;
632
633 for (i = 0; i < ATA_MAX_QUEUE; i++) {
634 qc = __ata_qc_from_tag(ap, i);
635 if (qc->flags & ATA_QCFLAG_ACTIVE &&
636 qc->scsicmd == scmd)
637 break;
638 }
639
640 if (i < ATA_MAX_QUEUE) {
641
642 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
643
644 qc->err_mask |= AC_ERR_TIMEOUT;
645 qc->flags |= ATA_QCFLAG_FAILED;
646 nr_timedout++;
647 }
648 } else {
649
650
651
652
653 scmd->retries = scmd->allowed;
654 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
655 }
656 }
657
658
659
660
661
662
663
664 if (nr_timedout)
665 __ata_port_freeze(ap);
666
667 spin_unlock_irqrestore(ap->lock, flags);
668
669
670 ap->eh_tries = ATA_EH_MAX_TRIES;
671 } else
672 spin_unlock_wait(ap->lock);
673
674
675
676
677
678 if (ap->ops->error_handler) {
679 struct ata_link *link;
680
681
682 ata_eh_acquire(ap);
683 repeat:
684
685 del_timer_sync(&ap->fastdrain_timer);
686
687
688 ata_eh_handle_port_resume(ap);
689
690
691 spin_lock_irqsave(ap->lock, flags);
692
693 ata_for_each_link(link, ap, HOST_FIRST) {
694 struct ata_eh_context *ehc = &link->eh_context;
695 struct ata_device *dev;
696
697 memset(&link->eh_context, 0, sizeof(link->eh_context));
698 link->eh_context.i = link->eh_info;
699 memset(&link->eh_info, 0, sizeof(link->eh_info));
700
701 ata_for_each_dev(dev, link, ENABLED) {
702 int devno = dev->devno;
703
704 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
705 if (ata_ncq_enabled(dev))
706 ehc->saved_ncq_enabled |= 1 << devno;
707 }
708 }
709
710 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
711 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
712 ap->excl_link = NULL;
713
714 spin_unlock_irqrestore(ap->lock, flags);
715
716
717 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
718 ap->ops->error_handler(ap);
719 else {
720
721 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
722 !(ap->pflags & ATA_PFLAG_UNLOADED))
723 ata_eh_unload(ap);
724 ata_eh_finish(ap);
725 }
726
727
728 ata_eh_handle_port_suspend(ap);
729
730
731
732
733
734 spin_lock_irqsave(ap->lock, flags);
735
736 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
737 if (--ap->eh_tries) {
738 spin_unlock_irqrestore(ap->lock, flags);
739 goto repeat;
740 }
741 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
742 "tries, giving up\n", ATA_EH_MAX_TRIES);
743 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
744 }
745
746
747 ata_for_each_link(link, ap, HOST_FIRST)
748 memset(&link->eh_info, 0, sizeof(link->eh_info));
749
750
751
752
753
754
755 host->host_eh_scheduled = 0;
756
757 spin_unlock_irqrestore(ap->lock, flags);
758 ata_eh_release(ap);
759 } else {
760 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
761 ap->ops->eng_timeout(ap);
762 }
763
764
765 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
766
767 scsi_eh_flush_done_q(&ap->eh_done_q);
768
769
770 spin_lock_irqsave(ap->lock, flags);
771
772 if (ap->pflags & ATA_PFLAG_LOADING)
773 ap->pflags &= ~ATA_PFLAG_LOADING;
774 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
775 schedule_delayed_work(&ap->hotplug_task, 0);
776
777 if (ap->pflags & ATA_PFLAG_RECOVERED)
778 ata_port_printk(ap, KERN_INFO, "EH complete\n");
779
780 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
781
782
783 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
784 wake_up_all(&ap->eh_wait_q);
785
786 spin_unlock_irqrestore(ap->lock, flags);
787
788 DPRINTK("EXIT\n");
789}
790
791
792
793
794
795
796
797
798
799
800void ata_port_wait_eh(struct ata_port *ap)
801{
802 unsigned long flags;
803 DEFINE_WAIT(wait);
804
805 retry:
806 spin_lock_irqsave(ap->lock, flags);
807
808 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
809 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
810 spin_unlock_irqrestore(ap->lock, flags);
811 schedule();
812 spin_lock_irqsave(ap->lock, flags);
813 }
814 finish_wait(&ap->eh_wait_q, &wait);
815
816 spin_unlock_irqrestore(ap->lock, flags);
817
818
819 if (scsi_host_in_recovery(ap->scsi_host)) {
820 ata_msleep(ap, 10);
821 goto retry;
822 }
823}
824
825static int ata_eh_nr_in_flight(struct ata_port *ap)
826{
827 unsigned int tag;
828 int nr = 0;
829
830
831 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
832 if (ata_qc_from_tag(ap, tag))
833 nr++;
834
835 return nr;
836}
837
838void ata_eh_fastdrain_timerfn(unsigned long arg)
839{
840 struct ata_port *ap = (void *)arg;
841 unsigned long flags;
842 int cnt;
843
844 spin_lock_irqsave(ap->lock, flags);
845
846 cnt = ata_eh_nr_in_flight(ap);
847
848
849 if (!cnt)
850 goto out_unlock;
851
852 if (cnt == ap->fastdrain_cnt) {
853 unsigned int tag;
854
855
856
857
858 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
859 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
860 if (qc)
861 qc->err_mask |= AC_ERR_TIMEOUT;
862 }
863
864 ata_port_freeze(ap);
865 } else {
866
867 ap->fastdrain_cnt = cnt;
868 ap->fastdrain_timer.expires =
869 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
870 add_timer(&ap->fastdrain_timer);
871 }
872
873 out_unlock:
874 spin_unlock_irqrestore(ap->lock, flags);
875}
876
877
878
879
880
881
882
883
884
885
886
887
888
889static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
890{
891 int cnt;
892
893
894 if (ap->pflags & ATA_PFLAG_EH_PENDING)
895 return;
896
897 ap->pflags |= ATA_PFLAG_EH_PENDING;
898
899 if (!fastdrain)
900 return;
901
902
903 cnt = ata_eh_nr_in_flight(ap);
904 if (!cnt)
905 return;
906
907
908 ap->fastdrain_cnt = cnt;
909 ap->fastdrain_timer.expires =
910 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
911 add_timer(&ap->fastdrain_timer);
912}
913
914
915
916
917
918
919
920
921
922
923
924void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
925{
926 struct ata_port *ap = qc->ap;
927 struct request_queue *q = qc->scsicmd->device->request_queue;
928 unsigned long flags;
929
930 WARN_ON(!ap->ops->error_handler);
931
932 qc->flags |= ATA_QCFLAG_FAILED;
933 ata_eh_set_pending(ap, 1);
934
935
936
937
938
939
940 spin_lock_irqsave(q->queue_lock, flags);
941 blk_abort_request(qc->scsicmd->request);
942 spin_unlock_irqrestore(q->queue_lock, flags);
943}
944
945
946
947
948
949
950
951
952
953
954
955void ata_port_schedule_eh(struct ata_port *ap)
956{
957 WARN_ON(!ap->ops->error_handler);
958
959 if (ap->pflags & ATA_PFLAG_INITIALIZING)
960 return;
961
962 ata_eh_set_pending(ap, 1);
963 scsi_schedule_eh(ap->scsi_host);
964
965 DPRINTK("port EH scheduled\n");
966}
967
968static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
969{
970 int tag, nr_aborted = 0;
971
972 WARN_ON(!ap->ops->error_handler);
973
974
975 ata_eh_set_pending(ap, 0);
976
977 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
978 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
979
980 if (qc && (!link || qc->dev->link == link)) {
981 qc->flags |= ATA_QCFLAG_FAILED;
982 ata_qc_complete(qc);
983 nr_aborted++;
984 }
985 }
986
987 if (!nr_aborted)
988 ata_port_schedule_eh(ap);
989
990 return nr_aborted;
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005int ata_link_abort(struct ata_link *link)
1006{
1007 return ata_do_link_abort(link->ap, link);
1008}
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022int ata_port_abort(struct ata_port *ap)
1023{
1024 return ata_do_link_abort(ap, NULL);
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045static void __ata_port_freeze(struct ata_port *ap)
1046{
1047 WARN_ON(!ap->ops->error_handler);
1048
1049 if (ap->ops->freeze)
1050 ap->ops->freeze(ap);
1051
1052 ap->pflags |= ATA_PFLAG_FROZEN;
1053
1054 DPRINTK("ata%u port frozen\n", ap->print_id);
1055}
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071int ata_port_freeze(struct ata_port *ap)
1072{
1073 int nr_aborted;
1074
1075 WARN_ON(!ap->ops->error_handler);
1076
1077 __ata_port_freeze(ap);
1078 nr_aborted = ata_port_abort(ap);
1079
1080 return nr_aborted;
1081}
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096int sata_async_notification(struct ata_port *ap)
1097{
1098 u32 sntf;
1099 int rc;
1100
1101 if (!(ap->flags & ATA_FLAG_AN))
1102 return 0;
1103
1104 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1105 if (rc == 0)
1106 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1107
1108 if (!sata_pmp_attached(ap) || rc) {
1109
1110 if (!sata_pmp_attached(ap)) {
1111
1112
1113
1114
1115 struct ata_device *dev = ap->link.device;
1116
1117 if ((dev->class == ATA_DEV_ATAPI) &&
1118 (dev->flags & ATA_DFLAG_AN))
1119 ata_scsi_media_change_notify(dev);
1120 return 0;
1121 } else {
1122
1123
1124
1125
1126
1127 ata_port_schedule_eh(ap);
1128 return 1;
1129 }
1130 } else {
1131
1132 struct ata_link *link;
1133
1134
1135 ata_for_each_link(link, ap, EDGE) {
1136 if (!(sntf & (1 << link->pmp)))
1137 continue;
1138
1139 if ((link->device->class == ATA_DEV_ATAPI) &&
1140 (link->device->flags & ATA_DFLAG_AN))
1141 ata_scsi_media_change_notify(link->device);
1142 }
1143
1144
1145
1146
1147 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1148 ata_port_schedule_eh(ap);
1149 return 1;
1150 }
1151
1152 return 0;
1153 }
1154}
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165void ata_eh_freeze_port(struct ata_port *ap)
1166{
1167 unsigned long flags;
1168
1169 if (!ap->ops->error_handler)
1170 return;
1171
1172 spin_lock_irqsave(ap->lock, flags);
1173 __ata_port_freeze(ap);
1174 spin_unlock_irqrestore(ap->lock, flags);
1175}
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186void ata_eh_thaw_port(struct ata_port *ap)
1187{
1188 unsigned long flags;
1189
1190 if (!ap->ops->error_handler)
1191 return;
1192
1193 spin_lock_irqsave(ap->lock, flags);
1194
1195 ap->pflags &= ~ATA_PFLAG_FROZEN;
1196
1197 if (ap->ops->thaw)
1198 ap->ops->thaw(ap);
1199
1200 spin_unlock_irqrestore(ap->lock, flags);
1201
1202 DPRINTK("ata%u port thawed\n", ap->print_id);
1203}
1204
1205static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1206{
1207
1208}
1209
1210static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1211{
1212 struct ata_port *ap = qc->ap;
1213 struct scsi_cmnd *scmd = qc->scsicmd;
1214 unsigned long flags;
1215
1216 spin_lock_irqsave(ap->lock, flags);
1217 qc->scsidone = ata_eh_scsidone;
1218 __ata_qc_complete(qc);
1219 WARN_ON(ata_tag_valid(qc->tag));
1220 spin_unlock_irqrestore(ap->lock, flags);
1221
1222 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1223}
1224
1225
1226
1227
1228
1229
1230
1231
1232void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1233{
1234 struct scsi_cmnd *scmd = qc->scsicmd;
1235 scmd->retries = scmd->allowed;
1236 __ata_eh_qc_complete(qc);
1237}
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1251{
1252 struct scsi_cmnd *scmd = qc->scsicmd;
1253 if (!qc->err_mask && scmd->retries)
1254 scmd->retries--;
1255 __ata_eh_qc_complete(qc);
1256}
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267void ata_dev_disable(struct ata_device *dev)
1268{
1269 if (!ata_dev_enabled(dev))
1270 return;
1271
1272 if (ata_msg_drv(dev->link->ap))
1273 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1274 ata_acpi_on_disable(dev);
1275 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1276 dev->class++;
1277
1278
1279
1280
1281 ata_ering_clear(&dev->ering);
1282}
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293void ata_eh_detach_dev(struct ata_device *dev)
1294{
1295 struct ata_link *link = dev->link;
1296 struct ata_port *ap = link->ap;
1297 struct ata_eh_context *ehc = &link->eh_context;
1298 unsigned long flags;
1299
1300 ata_dev_disable(dev);
1301
1302 spin_lock_irqsave(ap->lock, flags);
1303
1304 dev->flags &= ~ATA_DFLAG_DETACH;
1305
1306 if (ata_scsi_offline_dev(dev)) {
1307 dev->flags |= ATA_DFLAG_DETACHED;
1308 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1309 }
1310
1311
1312 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1313 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1314 ehc->saved_xfer_mode[dev->devno] = 0;
1315 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1316
1317 spin_unlock_irqrestore(ap->lock, flags);
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1334 unsigned int action)
1335{
1336 struct ata_port *ap = link->ap;
1337 struct ata_eh_info *ehi = &link->eh_info;
1338 struct ata_eh_context *ehc = &link->eh_context;
1339 unsigned long flags;
1340
1341 spin_lock_irqsave(ap->lock, flags);
1342
1343 ata_eh_clear_action(link, dev, ehi, action);
1344
1345
1346
1347
1348 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1349 ap->pflags |= ATA_PFLAG_RECOVERED;
1350
1351 spin_unlock_irqrestore(ap->lock, flags);
1352}
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1367 unsigned int action)
1368{
1369 struct ata_eh_context *ehc = &link->eh_context;
1370
1371 ata_eh_clear_action(link, dev, &ehc->i, action);
1372}
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388static const char *ata_err_string(unsigned int err_mask)
1389{
1390 if (err_mask & AC_ERR_HOST_BUS)
1391 return "host bus error";
1392 if (err_mask & AC_ERR_ATA_BUS)
1393 return "ATA bus error";
1394 if (err_mask & AC_ERR_TIMEOUT)
1395 return "timeout";
1396 if (err_mask & AC_ERR_HSM)
1397 return "HSM violation";
1398 if (err_mask & AC_ERR_SYSTEM)
1399 return "internal error";
1400 if (err_mask & AC_ERR_MEDIA)
1401 return "media error";
1402 if (err_mask & AC_ERR_INVALID)
1403 return "invalid argument";
1404 if (err_mask & AC_ERR_DEV)
1405 return "device error";
1406 return "unknown error";
1407}
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424static unsigned int ata_read_log_page(struct ata_device *dev,
1425 u8 page, void *buf, unsigned int sectors)
1426{
1427 struct ata_taskfile tf;
1428 unsigned int err_mask;
1429
1430 DPRINTK("read log page - page %d\n", page);
1431
1432 ata_tf_init(dev, &tf);
1433 tf.command = ATA_CMD_READ_LOG_EXT;
1434 tf.lbal = page;
1435 tf.nsect = sectors;
1436 tf.hob_nsect = sectors >> 8;
1437 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1438 tf.protocol = ATA_PROT_PIO;
1439
1440 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1441 buf, sectors * ATA_SECT_SIZE, 0);
1442
1443 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1444 return err_mask;
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462static int ata_eh_read_log_10h(struct ata_device *dev,
1463 int *tag, struct ata_taskfile *tf)
1464{
1465 u8 *buf = dev->link->ap->sector_buf;
1466 unsigned int err_mask;
1467 u8 csum;
1468 int i;
1469
1470 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1471 if (err_mask)
1472 return -EIO;
1473
1474 csum = 0;
1475 for (i = 0; i < ATA_SECT_SIZE; i++)
1476 csum += buf[i];
1477 if (csum)
1478 ata_dev_printk(dev, KERN_WARNING,
1479 "invalid checksum 0x%x on log page 10h\n", csum);
1480
1481 if (buf[0] & 0x80)
1482 return -ENOENT;
1483
1484 *tag = buf[0] & 0x1f;
1485
1486 tf->command = buf[2];
1487 tf->feature = buf[3];
1488 tf->lbal = buf[4];
1489 tf->lbam = buf[5];
1490 tf->lbah = buf[6];
1491 tf->device = buf[7];
1492 tf->hob_lbal = buf[8];
1493 tf->hob_lbam = buf[9];
1494 tf->hob_lbah = buf[10];
1495 tf->nsect = buf[12];
1496 tf->hob_nsect = buf[13];
1497
1498 return 0;
1499}
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1515{
1516 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1517 struct ata_taskfile tf;
1518 unsigned int err_mask;
1519
1520 ata_tf_init(dev, &tf);
1521
1522 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1523 tf.command = ATA_CMD_PACKET;
1524 tf.protocol = ATAPI_PROT_NODATA;
1525
1526 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1527 if (err_mask == AC_ERR_DEV)
1528 *r_sense_key = tf.feature >> 4;
1529 return err_mask;
1530}
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1548 u8 *sense_buf, u8 dfl_sense_key)
1549{
1550 u8 cdb[ATAPI_CDB_LEN] =
1551 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1552 struct ata_port *ap = dev->link->ap;
1553 struct ata_taskfile tf;
1554
1555 DPRINTK("ATAPI request sense\n");
1556
1557
1558 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1559
1560
1561
1562
1563 sense_buf[0] = 0x70;
1564 sense_buf[2] = dfl_sense_key;
1565
1566
1567 ata_tf_init(dev, &tf);
1568
1569 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1570 tf.command = ATA_CMD_PACKET;
1571
1572
1573 if (ap->flags & ATA_FLAG_PIO_DMA) {
1574 tf.protocol = ATAPI_PROT_DMA;
1575 tf.feature |= ATAPI_PKT_DMA;
1576 } else {
1577 tf.protocol = ATAPI_PROT_PIO;
1578 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1579 tf.lbah = 0;
1580 }
1581
1582 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1583 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1584}
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596static void ata_eh_analyze_serror(struct ata_link *link)
1597{
1598 struct ata_eh_context *ehc = &link->eh_context;
1599 u32 serror = ehc->i.serror;
1600 unsigned int err_mask = 0, action = 0;
1601 u32 hotplug_mask;
1602
1603 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1604 err_mask |= AC_ERR_ATA_BUS;
1605 action |= ATA_EH_RESET;
1606 }
1607 if (serror & SERR_PROTOCOL) {
1608 err_mask |= AC_ERR_HSM;
1609 action |= ATA_EH_RESET;
1610 }
1611 if (serror & SERR_INTERNAL) {
1612 err_mask |= AC_ERR_SYSTEM;
1613 action |= ATA_EH_RESET;
1614 }
1615
1616
1617
1618
1619
1620
1621 if (link->lpm_policy != ATA_LPM_MAX_POWER)
1622 hotplug_mask = 0;
1623 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1624 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1625 else
1626 hotplug_mask = SERR_PHYRDY_CHG;
1627
1628 if (serror & hotplug_mask)
1629 ata_ehi_hotplugged(&ehc->i);
1630
1631 ehc->i.err_mask |= err_mask;
1632 ehc->i.action |= action;
1633}
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647void ata_eh_analyze_ncq_error(struct ata_link *link)
1648{
1649 struct ata_port *ap = link->ap;
1650 struct ata_eh_context *ehc = &link->eh_context;
1651 struct ata_device *dev = link->device;
1652 struct ata_queued_cmd *qc;
1653 struct ata_taskfile tf;
1654 int tag, rc;
1655
1656
1657 if (ap->pflags & ATA_PFLAG_FROZEN)
1658 return;
1659
1660
1661 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1662 return;
1663
1664
1665 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1666 qc = __ata_qc_from_tag(ap, tag);
1667
1668 if (!(qc->flags & ATA_QCFLAG_FAILED))
1669 continue;
1670
1671 if (qc->err_mask)
1672 return;
1673 }
1674
1675
1676 memset(&tf, 0, sizeof(tf));
1677 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1678 if (rc) {
1679 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1680 "(errno=%d)\n", rc);
1681 return;
1682 }
1683
1684 if (!(link->sactive & (1 << tag))) {
1685 ata_link_printk(link, KERN_ERR, "log page 10h reported "
1686 "inactive tag %d\n", tag);
1687 return;
1688 }
1689
1690
1691 qc = __ata_qc_from_tag(ap, tag);
1692 memcpy(&qc->result_tf, &tf, sizeof(tf));
1693 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1694 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1695 ehc->i.err_mask &= ~AC_ERR_DEV;
1696}
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1714 const struct ata_taskfile *tf)
1715{
1716 unsigned int tmp, action = 0;
1717 u8 stat = tf->command, err = tf->feature;
1718
1719 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1720 qc->err_mask |= AC_ERR_HSM;
1721 return ATA_EH_RESET;
1722 }
1723
1724 if (stat & (ATA_ERR | ATA_DF))
1725 qc->err_mask |= AC_ERR_DEV;
1726 else
1727 return 0;
1728
1729 switch (qc->dev->class) {
1730 case ATA_DEV_ATA:
1731 if (err & ATA_ICRC)
1732 qc->err_mask |= AC_ERR_ATA_BUS;
1733 if (err & ATA_UNC)
1734 qc->err_mask |= AC_ERR_MEDIA;
1735 if (err & ATA_IDNF)
1736 qc->err_mask |= AC_ERR_INVALID;
1737 break;
1738
1739 case ATA_DEV_ATAPI:
1740 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1741 tmp = atapi_eh_request_sense(qc->dev,
1742 qc->scsicmd->sense_buffer,
1743 qc->result_tf.feature >> 4);
1744 if (!tmp) {
1745
1746
1747
1748
1749
1750
1751
1752 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1753 } else
1754 qc->err_mask |= tmp;
1755 }
1756 }
1757
1758 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1759 action |= ATA_EH_RESET;
1760
1761 return action;
1762}
1763
1764static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1765 int *xfer_ok)
1766{
1767 int base = 0;
1768
1769 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1770 *xfer_ok = 1;
1771
1772 if (!*xfer_ok)
1773 base = ATA_ECAT_DUBIOUS_NONE;
1774
1775 if (err_mask & AC_ERR_ATA_BUS)
1776 return base + ATA_ECAT_ATA_BUS;
1777
1778 if (err_mask & AC_ERR_TIMEOUT)
1779 return base + ATA_ECAT_TOUT_HSM;
1780
1781 if (eflags & ATA_EFLAG_IS_IO) {
1782 if (err_mask & AC_ERR_HSM)
1783 return base + ATA_ECAT_TOUT_HSM;
1784 if ((err_mask &
1785 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1786 return base + ATA_ECAT_UNK_DEV;
1787 }
1788
1789 return 0;
1790}
1791
1792struct speed_down_verdict_arg {
1793 u64 since;
1794 int xfer_ok;
1795 int nr_errors[ATA_ECAT_NR];
1796};
1797
1798static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1799{
1800 struct speed_down_verdict_arg *arg = void_arg;
1801 int cat;
1802
1803 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1804 return -1;
1805
1806 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1807 &arg->xfer_ok);
1808 arg->nr_errors[cat]++;
1809
1810 return 0;
1811}
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1871{
1872 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1873 u64 j64 = get_jiffies_64();
1874 struct speed_down_verdict_arg arg;
1875 unsigned int verdict = 0;
1876
1877
1878 memset(&arg, 0, sizeof(arg));
1879 arg.since = j64 - min(j64, j5mins);
1880 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1881
1882 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1883 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1884 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1885 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1886
1887 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1888 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1889 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1890
1891 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1892 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1893 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1894 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1895
1896
1897 memset(&arg, 0, sizeof(arg));
1898 arg.since = j64 - min(j64, j10mins);
1899 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1900
1901 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1902 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1903 verdict |= ATA_EH_SPDN_NCQ_OFF;
1904
1905 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1906 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1907 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1908 verdict |= ATA_EH_SPDN_SPEED_DOWN;
1909
1910 return verdict;
1911}
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930static unsigned int ata_eh_speed_down(struct ata_device *dev,
1931 unsigned int eflags, unsigned int err_mask)
1932{
1933 struct ata_link *link = ata_dev_phys_link(dev);
1934 int xfer_ok = 0;
1935 unsigned int verdict;
1936 unsigned int action = 0;
1937
1938
1939 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1940 return 0;
1941
1942
1943 ata_ering_record(&dev->ering, eflags, err_mask);
1944 verdict = ata_eh_speed_down_verdict(dev);
1945
1946
1947 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1948 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1949 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1950 dev->flags |= ATA_DFLAG_NCQ_OFF;
1951 ata_dev_printk(dev, KERN_WARNING,
1952 "NCQ disabled due to excessive errors\n");
1953 goto done;
1954 }
1955
1956
1957 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1958
1959 if (sata_down_spd_limit(link, 0) == 0) {
1960 action |= ATA_EH_RESET;
1961 goto done;
1962 }
1963
1964
1965 if (dev->spdn_cnt < 2) {
1966 static const int dma_dnxfer_sel[] =
1967 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1968 static const int pio_dnxfer_sel[] =
1969 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1970 int sel;
1971
1972 if (dev->xfer_shift != ATA_SHIFT_PIO)
1973 sel = dma_dnxfer_sel[dev->spdn_cnt];
1974 else
1975 sel = pio_dnxfer_sel[dev->spdn_cnt];
1976
1977 dev->spdn_cnt++;
1978
1979 if (ata_down_xfermask_limit(dev, sel) == 0) {
1980 action |= ATA_EH_RESET;
1981 goto done;
1982 }
1983 }
1984 }
1985
1986
1987
1988
1989 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1990 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
1991 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1992 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1993 dev->spdn_cnt = 0;
1994 action |= ATA_EH_RESET;
1995 goto done;
1996 }
1997 }
1998
1999 return 0;
2000 done:
2001
2002 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
2003 ata_ering_clear(&dev->ering);
2004 return action;
2005}
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018static void ata_eh_link_autopsy(struct ata_link *link)
2019{
2020 struct ata_port *ap = link->ap;
2021 struct ata_eh_context *ehc = &link->eh_context;
2022 struct ata_device *dev;
2023 unsigned int all_err_mask = 0, eflags = 0;
2024 int tag;
2025 u32 serror;
2026 int rc;
2027
2028 DPRINTK("ENTER\n");
2029
2030 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2031 return;
2032
2033
2034 rc = sata_scr_read(link, SCR_ERROR, &serror);
2035 if (rc == 0) {
2036 ehc->i.serror |= serror;
2037 ata_eh_analyze_serror(link);
2038 } else if (rc != -EOPNOTSUPP) {
2039
2040 ehc->i.probe_mask |= ATA_ALL_DEVICES;
2041 ehc->i.action |= ATA_EH_RESET;
2042 ehc->i.err_mask |= AC_ERR_OTHER;
2043 }
2044
2045
2046 ata_eh_analyze_ncq_error(link);
2047
2048
2049 if (ehc->i.err_mask & ~AC_ERR_OTHER)
2050 ehc->i.err_mask &= ~AC_ERR_OTHER;
2051
2052 all_err_mask |= ehc->i.err_mask;
2053
2054 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2055 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2056
2057 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2058 ata_dev_phys_link(qc->dev) != link)
2059 continue;
2060
2061
2062 qc->err_mask |= ehc->i.err_mask;
2063
2064
2065 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2066
2067
2068 if (qc->err_mask & AC_ERR_ATA_BUS)
2069 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2070 AC_ERR_INVALID);
2071
2072
2073 if (qc->err_mask & ~AC_ERR_OTHER)
2074 qc->err_mask &= ~AC_ERR_OTHER;
2075
2076
2077 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2078 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2079
2080
2081 if (qc->flags & ATA_QCFLAG_IO ||
2082 (!(qc->err_mask & AC_ERR_INVALID) &&
2083 qc->err_mask != AC_ERR_DEV))
2084 qc->flags |= ATA_QCFLAG_RETRY;
2085
2086
2087 ehc->i.dev = qc->dev;
2088 all_err_mask |= qc->err_mask;
2089 if (qc->flags & ATA_QCFLAG_IO)
2090 eflags |= ATA_EFLAG_IS_IO;
2091 }
2092
2093
2094 if (ap->pflags & ATA_PFLAG_FROZEN ||
2095 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2096 ehc->i.action |= ATA_EH_RESET;
2097 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2098 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2099 ehc->i.action |= ATA_EH_REVALIDATE;
2100
2101
2102
2103
2104 if (ehc->i.dev) {
2105 ehc->i.dev_action[ehc->i.dev->devno] |=
2106 ehc->i.action & ATA_EH_PERDEV_MASK;
2107 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2108 }
2109
2110
2111 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2112 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2113
2114
2115 dev = ehc->i.dev;
2116 if (!dev && ((ata_link_max_devices(link) == 1 &&
2117 ata_dev_enabled(link->device))))
2118 dev = link->device;
2119
2120 if (dev) {
2121 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2122 eflags |= ATA_EFLAG_DUBIOUS_XFER;
2123 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2124 }
2125
2126 DPRINTK("EXIT\n");
2127}
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139void ata_eh_autopsy(struct ata_port *ap)
2140{
2141 struct ata_link *link;
2142
2143 ata_for_each_link(link, ap, EDGE)
2144 ata_eh_link_autopsy(link);
2145
2146
2147
2148
2149
2150 if (ap->slave_link) {
2151 struct ata_eh_context *mehc = &ap->link.eh_context;
2152 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2153
2154
2155 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2156
2157
2158 ata_eh_link_autopsy(ap->slave_link);
2159
2160
2161 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2162 mehc->i.action |= sehc->i.action;
2163 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2164 mehc->i.flags |= sehc->i.flags;
2165 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2166 }
2167
2168
2169
2170
2171 if (sata_pmp_attached(ap))
2172 ata_eh_link_autopsy(&ap->link);
2173}
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185const char *ata_get_cmd_descript(u8 command)
2186{
2187#ifdef CONFIG_ATA_VERBOSE_ERROR
2188 static const struct
2189 {
2190 u8 command;
2191 const char *text;
2192 } cmd_descr[] = {
2193 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2194 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2195 { ATA_CMD_STANDBY, "STANDBY" },
2196 { ATA_CMD_IDLE, "IDLE" },
2197 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2198 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2199 { ATA_CMD_NOP, "NOP" },
2200 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2201 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2202 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2203 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2204 { ATA_CMD_SERVICE, "SERVICE" },
2205 { ATA_CMD_READ, "READ DMA" },
2206 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2207 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2208 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2209 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2210 { ATA_CMD_WRITE, "WRITE DMA" },
2211 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2212 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2213 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2214 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2215 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2216 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2217 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2218 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2219 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2220 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2221 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2222 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2223 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2224 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2225 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2226 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2227 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2228 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2229 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2230 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2231 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2232 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2233 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2234 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2235 { ATA_CMD_SLEEP, "SLEEP" },
2236 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2237 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2238 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2239 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2240 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2241 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2242 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2243 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2244 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2245 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2246 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2247 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2248 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2249 { ATA_CMD_PMP_READ, "READ BUFFER" },
2250 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2251 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2252 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2253 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2254 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2255 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2256 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2257 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2258 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2259 { ATA_CMD_SMART, "SMART" },
2260 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2261 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
2262 { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
2263 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2264 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2265 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2266 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2267 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2268 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2269 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2270 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2271 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2272 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2273 { ATA_CMD_RESTORE, "RECALIBRATE" },
2274 { 0, NULL }
2275 };
2276
2277 unsigned int i;
2278 for (i = 0; cmd_descr[i].text; i++)
2279 if (cmd_descr[i].command == command)
2280 return cmd_descr[i].text;
2281#endif
2282
2283 return NULL;
2284}
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295static void ata_eh_link_report(struct ata_link *link)
2296{
2297 struct ata_port *ap = link->ap;
2298 struct ata_eh_context *ehc = &link->eh_context;
2299 const char *frozen, *desc;
2300 char tries_buf[6];
2301 int tag, nr_failed = 0;
2302
2303 if (ehc->i.flags & ATA_EHI_QUIET)
2304 return;
2305
2306 desc = NULL;
2307 if (ehc->i.desc[0] != '\0')
2308 desc = ehc->i.desc;
2309
2310 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2311 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2312
2313 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2314 ata_dev_phys_link(qc->dev) != link ||
2315 ((qc->flags & ATA_QCFLAG_QUIET) &&
2316 qc->err_mask == AC_ERR_DEV))
2317 continue;
2318 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2319 continue;
2320
2321 nr_failed++;
2322 }
2323
2324 if (!nr_failed && !ehc->i.err_mask)
2325 return;
2326
2327 frozen = "";
2328 if (ap->pflags & ATA_PFLAG_FROZEN)
2329 frozen = " frozen";
2330
2331 memset(tries_buf, 0, sizeof(tries_buf));
2332 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2333 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2334 ap->eh_tries);
2335
2336 if (ehc->i.dev) {
2337 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2338 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2339 ehc->i.err_mask, link->sactive, ehc->i.serror,
2340 ehc->i.action, frozen, tries_buf);
2341 if (desc)
2342 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2343 } else {
2344 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2345 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2346 ehc->i.err_mask, link->sactive, ehc->i.serror,
2347 ehc->i.action, frozen, tries_buf);
2348 if (desc)
2349 ata_link_printk(link, KERN_ERR, "%s\n", desc);
2350 }
2351
2352#ifdef CONFIG_ATA_VERBOSE_ERROR
2353 if (ehc->i.serror)
2354 ata_link_printk(link, KERN_ERR,
2355 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2356 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2357 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2358 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2359 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2360 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2361 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2362 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2363 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2364 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2365 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2366 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2367 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2368 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2369 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2370 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2371 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2372 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2373#endif
2374
2375 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2376 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2377 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2378 const u8 *cdb = qc->cdb;
2379 char data_buf[20] = "";
2380 char cdb_buf[70] = "";
2381
2382 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2383 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2384 continue;
2385
2386 if (qc->dma_dir != DMA_NONE) {
2387 static const char *dma_str[] = {
2388 [DMA_BIDIRECTIONAL] = "bidi",
2389 [DMA_TO_DEVICE] = "out",
2390 [DMA_FROM_DEVICE] = "in",
2391 };
2392 static const char *prot_str[] = {
2393 [ATA_PROT_PIO] = "pio",
2394 [ATA_PROT_DMA] = "dma",
2395 [ATA_PROT_NCQ] = "ncq",
2396 [ATAPI_PROT_PIO] = "pio",
2397 [ATAPI_PROT_DMA] = "dma",
2398 };
2399
2400 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2401 prot_str[qc->tf.protocol], qc->nbytes,
2402 dma_str[qc->dma_dir]);
2403 }
2404
2405 if (ata_is_atapi(qc->tf.protocol)) {
2406 if (qc->scsicmd)
2407 scsi_print_command(qc->scsicmd);
2408 else
2409 snprintf(cdb_buf, sizeof(cdb_buf),
2410 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2411 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2412 cdb[0], cdb[1], cdb[2], cdb[3],
2413 cdb[4], cdb[5], cdb[6], cdb[7],
2414 cdb[8], cdb[9], cdb[10], cdb[11],
2415 cdb[12], cdb[13], cdb[14], cdb[15]);
2416 } else {
2417 const char *descr = ata_get_cmd_descript(cmd->command);
2418 if (descr)
2419 ata_dev_printk(qc->dev, KERN_ERR,
2420 "failed command: %s\n", descr);
2421 }
2422
2423 ata_dev_printk(qc->dev, KERN_ERR,
2424 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2425 "tag %d%s\n %s"
2426 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2427 "Emask 0x%x (%s)%s\n",
2428 cmd->command, cmd->feature, cmd->nsect,
2429 cmd->lbal, cmd->lbam, cmd->lbah,
2430 cmd->hob_feature, cmd->hob_nsect,
2431 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2432 cmd->device, qc->tag, data_buf, cdb_buf,
2433 res->command, res->feature, res->nsect,
2434 res->lbal, res->lbam, res->lbah,
2435 res->hob_feature, res->hob_nsect,
2436 res->hob_lbal, res->hob_lbam, res->hob_lbah,
2437 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2438 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2439
2440#ifdef CONFIG_ATA_VERBOSE_ERROR
2441 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2442 ATA_ERR)) {
2443 if (res->command & ATA_BUSY)
2444 ata_dev_printk(qc->dev, KERN_ERR,
2445 "status: { Busy }\n");
2446 else
2447 ata_dev_printk(qc->dev, KERN_ERR,
2448 "status: { %s%s%s%s}\n",
2449 res->command & ATA_DRDY ? "DRDY " : "",
2450 res->command & ATA_DF ? "DF " : "",
2451 res->command & ATA_DRQ ? "DRQ " : "",
2452 res->command & ATA_ERR ? "ERR " : "");
2453 }
2454
2455 if (cmd->command != ATA_CMD_PACKET &&
2456 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2457 ATA_ABORTED)))
2458 ata_dev_printk(qc->dev, KERN_ERR,
2459 "error: { %s%s%s%s}\n",
2460 res->feature & ATA_ICRC ? "ICRC " : "",
2461 res->feature & ATA_UNC ? "UNC " : "",
2462 res->feature & ATA_IDNF ? "IDNF " : "",
2463 res->feature & ATA_ABORTED ? "ABRT " : "");
2464#endif
2465 }
2466}
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477void ata_eh_report(struct ata_port *ap)
2478{
2479 struct ata_link *link;
2480
2481 ata_for_each_link(link, ap, HOST_FIRST)
2482 ata_eh_link_report(link);
2483}
2484
2485static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2486 unsigned int *classes, unsigned long deadline,
2487 bool clear_classes)
2488{
2489 struct ata_device *dev;
2490
2491 if (clear_classes)
2492 ata_for_each_dev(dev, link, ALL)
2493 classes[dev->devno] = ATA_DEV_UNKNOWN;
2494
2495 return reset(link, classes, deadline);
2496}
2497
2498static int ata_eh_followup_srst_needed(struct ata_link *link,
2499 int rc, const unsigned int *classes)
2500{
2501 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2502 return 0;
2503 if (rc == -EAGAIN)
2504 return 1;
2505 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2506 return 1;
2507 return 0;
2508}
2509
2510int ata_eh_reset(struct ata_link *link, int classify,
2511 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2512 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2513{
2514 struct ata_port *ap = link->ap;
2515 struct ata_link *slave = ap->slave_link;
2516 struct ata_eh_context *ehc = &link->eh_context;
2517 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2518 unsigned int *classes = ehc->classes;
2519 unsigned int lflags = link->flags;
2520 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2521 int max_tries = 0, try = 0;
2522 struct ata_link *failed_link;
2523 struct ata_device *dev;
2524 unsigned long deadline, now;
2525 ata_reset_fn_t reset;
2526 unsigned long flags;
2527 u32 sstatus;
2528 int nr_unknown, rc;
2529
2530
2531
2532
2533 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2534 max_tries++;
2535 if (link->flags & ATA_LFLAG_NO_HRST)
2536 hardreset = NULL;
2537 if (link->flags & ATA_LFLAG_NO_SRST)
2538 softreset = NULL;
2539
2540
2541 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2542 now = jiffies;
2543 WARN_ON(time_after(ehc->last_reset, now));
2544 deadline = ata_deadline(ehc->last_reset,
2545 ATA_EH_RESET_COOL_DOWN);
2546 if (time_before(now, deadline))
2547 schedule_timeout_uninterruptible(deadline - now);
2548 }
2549
2550 spin_lock_irqsave(ap->lock, flags);
2551 ap->pflags |= ATA_PFLAG_RESETTING;
2552 spin_unlock_irqrestore(ap->lock, flags);
2553
2554 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2555
2556 ata_for_each_dev(dev, link, ALL) {
2557
2558
2559
2560
2561
2562
2563
2564 dev->pio_mode = XFER_PIO_0;
2565
2566
2567
2568
2569
2570
2571 if (ap->ops->set_piomode)
2572 ap->ops->set_piomode(ap, dev);
2573 }
2574
2575
2576 reset = NULL;
2577 ehc->i.action &= ~ATA_EH_RESET;
2578 if (hardreset) {
2579 reset = hardreset;
2580 ehc->i.action |= ATA_EH_HARDRESET;
2581 } else if (softreset) {
2582 reset = softreset;
2583 ehc->i.action |= ATA_EH_SOFTRESET;
2584 }
2585
2586 if (prereset) {
2587 unsigned long deadline = ata_deadline(jiffies,
2588 ATA_EH_PRERESET_TIMEOUT);
2589
2590 if (slave) {
2591 sehc->i.action &= ~ATA_EH_RESET;
2592 sehc->i.action |= ehc->i.action;
2593 }
2594
2595 rc = prereset(link, deadline);
2596
2597
2598
2599
2600
2601 if (slave && (rc == 0 || rc == -ENOENT)) {
2602 int tmp;
2603
2604 tmp = prereset(slave, deadline);
2605 if (tmp != -ENOENT)
2606 rc = tmp;
2607
2608 ehc->i.action |= sehc->i.action;
2609 }
2610
2611 if (rc) {
2612 if (rc == -ENOENT) {
2613 ata_link_printk(link, KERN_DEBUG,
2614 "port disabled. ignoring.\n");
2615 ehc->i.action &= ~ATA_EH_RESET;
2616
2617 ata_for_each_dev(dev, link, ALL)
2618 classes[dev->devno] = ATA_DEV_NONE;
2619
2620 rc = 0;
2621 } else
2622 ata_link_printk(link, KERN_ERR,
2623 "prereset failed (errno=%d)\n", rc);
2624 goto out;
2625 }
2626
2627
2628
2629
2630 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2631 ata_for_each_dev(dev, link, ALL)
2632 classes[dev->devno] = ATA_DEV_NONE;
2633 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2634 ata_is_host_link(link))
2635 ata_eh_thaw_port(ap);
2636 rc = 0;
2637 goto out;
2638 }
2639 }
2640
2641 retry:
2642
2643
2644
2645 if (ata_is_host_link(link))
2646 ata_eh_freeze_port(ap);
2647
2648 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2649
2650 if (reset) {
2651 if (verbose)
2652 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2653 reset == softreset ? "soft" : "hard");
2654
2655
2656 ehc->last_reset = jiffies;
2657 if (reset == hardreset)
2658 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2659 else
2660 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2661
2662 rc = ata_do_reset(link, reset, classes, deadline, true);
2663 if (rc && rc != -EAGAIN) {
2664 failed_link = link;
2665 goto fail;
2666 }
2667
2668
2669 if (slave && reset == hardreset) {
2670 int tmp;
2671
2672 if (verbose)
2673 ata_link_printk(slave, KERN_INFO,
2674 "hard resetting link\n");
2675
2676 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2677 tmp = ata_do_reset(slave, reset, classes, deadline,
2678 false);
2679 switch (tmp) {
2680 case -EAGAIN:
2681 rc = -EAGAIN;
2682 case 0:
2683 break;
2684 default:
2685 failed_link = slave;
2686 rc = tmp;
2687 goto fail;
2688 }
2689 }
2690
2691
2692 if (reset == hardreset &&
2693 ata_eh_followup_srst_needed(link, rc, classes)) {
2694 reset = softreset;
2695
2696 if (!reset) {
2697 ata_link_printk(link, KERN_ERR,
2698 "follow-up softreset required "
2699 "but no softreset avaliable\n");
2700 failed_link = link;
2701 rc = -EINVAL;
2702 goto fail;
2703 }
2704
2705 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2706 rc = ata_do_reset(link, reset, classes, deadline, true);
2707 if (rc) {
2708 failed_link = link;
2709 goto fail;
2710 }
2711 }
2712 } else {
2713 if (verbose)
2714 ata_link_printk(link, KERN_INFO, "no reset method "
2715 "available, skipping reset\n");
2716 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2717 lflags |= ATA_LFLAG_ASSUME_ATA;
2718 }
2719
2720
2721
2722
2723 ata_for_each_dev(dev, link, ALL) {
2724
2725
2726
2727
2728 dev->pio_mode = XFER_PIO_0;
2729 dev->flags &= ~ATA_DFLAG_SLEEPING;
2730
2731 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2732 continue;
2733
2734
2735 if (lflags & ATA_LFLAG_ASSUME_ATA)
2736 classes[dev->devno] = ATA_DEV_ATA;
2737 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2738 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2739 }
2740
2741
2742 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2743 link->sata_spd = (sstatus >> 4) & 0xf;
2744 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2745 slave->sata_spd = (sstatus >> 4) & 0xf;
2746
2747
2748 if (ata_is_host_link(link))
2749 ata_eh_thaw_port(ap);
2750
2751
2752
2753
2754
2755
2756
2757
2758 if (postreset) {
2759 postreset(link, classes);
2760 if (slave)
2761 postreset(slave, classes);
2762 }
2763
2764
2765
2766
2767
2768
2769
2770 spin_lock_irqsave(link->ap->lock, flags);
2771 memset(&link->eh_info, 0, sizeof(link->eh_info));
2772 if (slave)
2773 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2774 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2775 spin_unlock_irqrestore(link->ap->lock, flags);
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785 nr_unknown = 0;
2786 ata_for_each_dev(dev, link, ALL) {
2787 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2788 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2789 ata_dev_printk(dev, KERN_DEBUG, "link online "
2790 "but device misclassifed\n");
2791 classes[dev->devno] = ATA_DEV_NONE;
2792 nr_unknown++;
2793 }
2794 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2795 if (ata_class_enabled(classes[dev->devno]))
2796 ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2797 "clearing class %d to NONE\n",
2798 classes[dev->devno]);
2799 classes[dev->devno] = ATA_DEV_NONE;
2800 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2801 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2802 "clearing UNKNOWN to NONE\n");
2803 classes[dev->devno] = ATA_DEV_NONE;
2804 }
2805 }
2806
2807 if (classify && nr_unknown) {
2808 if (try < max_tries) {
2809 ata_link_printk(link, KERN_WARNING, "link online but "
2810 "%d devices misclassified, retrying\n",
2811 nr_unknown);
2812 failed_link = link;
2813 rc = -EAGAIN;
2814 goto fail;
2815 }
2816 ata_link_printk(link, KERN_WARNING,
2817 "link online but %d devices misclassified, "
2818 "device detection might fail\n", nr_unknown);
2819 }
2820
2821
2822 ata_eh_done(link, NULL, ATA_EH_RESET);
2823 if (slave)
2824 ata_eh_done(slave, NULL, ATA_EH_RESET);
2825 ehc->last_reset = jiffies;
2826 ehc->i.action |= ATA_EH_REVALIDATE;
2827 link->lpm_policy = ATA_LPM_UNKNOWN;
2828
2829 rc = 0;
2830 out:
2831
2832 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2833 if (slave)
2834 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2835
2836 spin_lock_irqsave(ap->lock, flags);
2837 ap->pflags &= ~ATA_PFLAG_RESETTING;
2838 spin_unlock_irqrestore(ap->lock, flags);
2839
2840 return rc;
2841
2842 fail:
2843
2844 if (!ata_is_host_link(link) &&
2845 sata_scr_read(link, SCR_STATUS, &sstatus))
2846 rc = -ERESTART;
2847
2848 if (rc == -ERESTART || try >= max_tries)
2849 goto out;
2850
2851 now = jiffies;
2852 if (time_before(now, deadline)) {
2853 unsigned long delta = deadline - now;
2854
2855 ata_link_printk(failed_link, KERN_WARNING,
2856 "reset failed (errno=%d), retrying in %u secs\n",
2857 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2858
2859 ata_eh_release(ap);
2860 while (delta)
2861 delta = schedule_timeout_uninterruptible(delta);
2862 ata_eh_acquire(ap);
2863 }
2864
2865 if (try == max_tries - 1) {
2866 sata_down_spd_limit(link, 0);
2867 if (slave)
2868 sata_down_spd_limit(slave, 0);
2869 } else if (rc == -EPIPE)
2870 sata_down_spd_limit(failed_link, 0);
2871
2872 if (hardreset)
2873 reset = hardreset;
2874 goto retry;
2875}
2876
2877static inline void ata_eh_pull_park_action(struct ata_port *ap)
2878{
2879 struct ata_link *link;
2880 struct ata_device *dev;
2881 unsigned long flags;
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909 spin_lock_irqsave(ap->lock, flags);
2910 INIT_COMPLETION(ap->park_req_pending);
2911 ata_for_each_link(link, ap, EDGE) {
2912 ata_for_each_dev(dev, link, ALL) {
2913 struct ata_eh_info *ehi = &link->eh_info;
2914
2915 link->eh_context.i.dev_action[dev->devno] |=
2916 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2917 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2918 }
2919 }
2920 spin_unlock_irqrestore(ap->lock, flags);
2921}
2922
2923static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2924{
2925 struct ata_eh_context *ehc = &dev->link->eh_context;
2926 struct ata_taskfile tf;
2927 unsigned int err_mask;
2928
2929 ata_tf_init(dev, &tf);
2930 if (park) {
2931 ehc->unloaded_mask |= 1 << dev->devno;
2932 tf.command = ATA_CMD_IDLEIMMEDIATE;
2933 tf.feature = 0x44;
2934 tf.lbal = 0x4c;
2935 tf.lbam = 0x4e;
2936 tf.lbah = 0x55;
2937 } else {
2938 ehc->unloaded_mask &= ~(1 << dev->devno);
2939 tf.command = ATA_CMD_CHK_POWER;
2940 }
2941
2942 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2943 tf.protocol |= ATA_PROT_NODATA;
2944 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2945 if (park && (err_mask || tf.lbal != 0xc4)) {
2946 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2947 ehc->unloaded_mask &= ~(1 << dev->devno);
2948 }
2949}
2950
2951static int ata_eh_revalidate_and_attach(struct ata_link *link,
2952 struct ata_device **r_failed_dev)
2953{
2954 struct ata_port *ap = link->ap;
2955 struct ata_eh_context *ehc = &link->eh_context;
2956 struct ata_device *dev;
2957 unsigned int new_mask = 0;
2958 unsigned long flags;
2959 int rc = 0;
2960
2961 DPRINTK("ENTER\n");
2962
2963
2964
2965
2966
2967 ata_for_each_dev(dev, link, ALL_REVERSE) {
2968 unsigned int action = ata_eh_dev_action(dev);
2969 unsigned int readid_flags = 0;
2970
2971 if (ehc->i.flags & ATA_EHI_DID_RESET)
2972 readid_flags |= ATA_READID_POSTRESET;
2973
2974 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2975 WARN_ON(dev->class == ATA_DEV_PMP);
2976
2977 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2978 rc = -EIO;
2979 goto err;
2980 }
2981
2982 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2983 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2984 readid_flags);
2985 if (rc)
2986 goto err;
2987
2988 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2989
2990
2991
2992
2993 ehc->i.flags |= ATA_EHI_SETMODE;
2994
2995
2996 schedule_work(&(ap->scsi_rescan_task));
2997 } else if (dev->class == ATA_DEV_UNKNOWN &&
2998 ehc->tries[dev->devno] &&
2999 ata_class_enabled(ehc->classes[dev->devno])) {
3000
3001
3002
3003
3004
3005
3006 dev->class = ehc->classes[dev->devno];
3007
3008 if (dev->class == ATA_DEV_PMP)
3009 rc = sata_pmp_attach(dev);
3010 else
3011 rc = ata_dev_read_id(dev, &dev->class,
3012 readid_flags, dev->id);
3013
3014
3015 ehc->classes[dev->devno] = dev->class;
3016 dev->class = ATA_DEV_UNKNOWN;
3017
3018 switch (rc) {
3019 case 0:
3020
3021 ata_ering_clear(&dev->ering);
3022 new_mask |= 1 << dev->devno;
3023 break;
3024 case -ENOENT:
3025
3026
3027
3028
3029 ata_eh_thaw_port(ap);
3030 break;
3031 default:
3032 goto err;
3033 }
3034 }
3035 }
3036
3037
3038 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
3039 if (ap->ops->cable_detect)
3040 ap->cbl = ap->ops->cable_detect(ap);
3041 ata_force_cbl(ap);
3042 }
3043
3044
3045
3046
3047 ata_for_each_dev(dev, link, ALL) {
3048 if (!(new_mask & (1 << dev->devno)))
3049 continue;
3050
3051 dev->class = ehc->classes[dev->devno];
3052
3053 if (dev->class == ATA_DEV_PMP)
3054 continue;
3055
3056 ehc->i.flags |= ATA_EHI_PRINTINFO;
3057 rc = ata_dev_configure(dev);
3058 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3059 if (rc) {
3060 dev->class = ATA_DEV_UNKNOWN;
3061 goto err;
3062 }
3063
3064 spin_lock_irqsave(ap->lock, flags);
3065 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3066 spin_unlock_irqrestore(ap->lock, flags);
3067
3068
3069 ehc->i.flags |= ATA_EHI_SETMODE;
3070 }
3071
3072 return 0;
3073
3074 err:
3075 *r_failed_dev = dev;
3076 DPRINTK("EXIT rc=%d\n", rc);
3077 return rc;
3078}
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3096{
3097 struct ata_port *ap = link->ap;
3098 struct ata_device *dev;
3099 int rc;
3100
3101
3102 ata_for_each_dev(dev, link, ENABLED) {
3103 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3104 struct ata_ering_entry *ent;
3105
3106 ent = ata_ering_top(&dev->ering);
3107 if (ent)
3108 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3109 }
3110 }
3111
3112
3113 if (ap->ops->set_mode)
3114 rc = ap->ops->set_mode(link, r_failed_dev);
3115 else
3116 rc = ata_do_set_mode(link, r_failed_dev);
3117
3118
3119 ata_for_each_dev(dev, link, ENABLED) {
3120 struct ata_eh_context *ehc = &link->eh_context;
3121 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3122 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3123
3124 if (dev->xfer_mode != saved_xfer_mode ||
3125 ata_ncq_enabled(dev) != saved_ncq)
3126 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3127 }
3128
3129 return rc;
3130}
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146static int atapi_eh_clear_ua(struct ata_device *dev)
3147{
3148 int i;
3149
3150 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3151 u8 *sense_buffer = dev->link->ap->sector_buf;
3152 u8 sense_key = 0;
3153 unsigned int err_mask;
3154
3155 err_mask = atapi_eh_tur(dev, &sense_key);
3156 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3157 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
3158 "failed (err_mask=0x%x)\n", err_mask);
3159 return -EIO;
3160 }
3161
3162 if (!err_mask || sense_key != UNIT_ATTENTION)
3163 return 0;
3164
3165 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3166 if (err_mask) {
3167 ata_dev_printk(dev, KERN_WARNING, "failed to clear "
3168 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3169 return -EIO;
3170 }
3171 }
3172
3173 ata_dev_printk(dev, KERN_WARNING,
3174 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
3175
3176 return 0;
3177}
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3197{
3198 struct ata_link *link = dev->link;
3199 struct ata_port *ap = link->ap;
3200 struct ata_queued_cmd *qc;
3201 struct ata_taskfile tf;
3202 unsigned int err_mask;
3203 int rc = 0;
3204
3205
3206 if (!ata_tag_valid(link->active_tag))
3207 return 0;
3208
3209 qc = __ata_qc_from_tag(ap, link->active_tag);
3210 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3211 qc->tf.command != ATA_CMD_FLUSH))
3212 return 0;
3213
3214
3215 if (qc->err_mask & AC_ERR_DEV)
3216 return 0;
3217
3218
3219 ata_tf_init(dev, &tf);
3220
3221 tf.command = qc->tf.command;
3222 tf.flags |= ATA_TFLAG_DEVICE;
3223 tf.protocol = ATA_PROT_NODATA;
3224
3225 ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
3226 tf.command, qc->err_mask);
3227
3228 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3229 if (!err_mask) {
3230
3231
3232
3233
3234
3235
3236
3237
3238 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3239 } else {
3240 ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
3241 err_mask);
3242 rc = -EIO;
3243
3244
3245 if (err_mask & AC_ERR_DEV) {
3246 qc->err_mask |= AC_ERR_DEV;
3247 qc->result_tf = tf;
3248 if (!(ap->pflags & ATA_PFLAG_FROZEN))
3249 rc = 0;
3250 }
3251 }
3252 return rc;
3253}
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3273 struct ata_device **r_failed_dev)
3274{
3275 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
3276 struct ata_eh_context *ehc = &link->eh_context;
3277 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3278 enum ata_lpm_policy old_policy = link->lpm_policy;
3279 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3280 unsigned int err_mask;
3281 int rc;
3282
3283
3284 if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3285 return 0;
3286
3287
3288
3289
3290
3291
3292
3293 ata_for_each_dev(dev, link, ENABLED) {
3294 bool hipm = ata_id_has_hipm(dev->id);
3295 bool dipm = ata_id_has_dipm(dev->id);
3296
3297
3298 if (!link_dev)
3299 link_dev = dev;
3300
3301 if (!lpm_dev && (hipm || dipm))
3302 lpm_dev = dev;
3303
3304 hints &= ~ATA_LPM_EMPTY;
3305 if (!hipm)
3306 hints &= ~ATA_LPM_HIPM;
3307
3308
3309 if (policy != ATA_LPM_MIN_POWER && dipm) {
3310 err_mask = ata_dev_set_feature(dev,
3311 SETFEATURES_SATA_DISABLE, SATA_DIPM);
3312 if (err_mask && err_mask != AC_ERR_DEV) {
3313 ata_dev_printk(dev, KERN_WARNING,
3314 "failed to disable DIPM, Emask 0x%x\n",
3315 err_mask);
3316 rc = -EIO;
3317 goto fail;
3318 }
3319 }
3320 }
3321
3322 if (ap) {
3323 rc = ap->ops->set_lpm(link, policy, hints);
3324 if (!rc && ap->slave_link)
3325 rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3326 } else
3327 rc = sata_pmp_set_lpm(link, policy, hints);
3328
3329
3330
3331
3332
3333 if (rc) {
3334 if (rc == -EOPNOTSUPP) {
3335 link->flags |= ATA_LFLAG_NO_LPM;
3336 return 0;
3337 }
3338 dev = lpm_dev ? lpm_dev : link_dev;
3339 goto fail;
3340 }
3341
3342
3343
3344
3345
3346 link->lpm_policy = policy;
3347 if (ap && ap->slave_link)
3348 ap->slave_link->lpm_policy = policy;
3349
3350
3351 ata_for_each_dev(dev, link, ENABLED) {
3352 if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) {
3353 err_mask = ata_dev_set_feature(dev,
3354 SETFEATURES_SATA_ENABLE, SATA_DIPM);
3355 if (err_mask && err_mask != AC_ERR_DEV) {
3356 ata_dev_printk(dev, KERN_WARNING,
3357 "failed to enable DIPM, Emask 0x%x\n",
3358 err_mask);
3359 rc = -EIO;
3360 goto fail;
3361 }
3362 }
3363 }
3364
3365 return 0;
3366
3367fail:
3368
3369 link->lpm_policy = old_policy;
3370 if (ap && ap->slave_link)
3371 ap->slave_link->lpm_policy = old_policy;
3372
3373
3374 if (!dev || ehc->tries[dev->devno] <= 2) {
3375 ata_link_printk(link, KERN_WARNING,
3376 "disabling LPM on the link\n");
3377 link->flags |= ATA_LFLAG_NO_LPM;
3378 }
3379 if (r_failed_dev)
3380 *r_failed_dev = dev;
3381 return rc;
3382}
3383
3384static int ata_link_nr_enabled(struct ata_link *link)
3385{
3386 struct ata_device *dev;
3387 int cnt = 0;
3388
3389 ata_for_each_dev(dev, link, ENABLED)
3390 cnt++;
3391 return cnt;
3392}
3393
3394static int ata_link_nr_vacant(struct ata_link *link)
3395{
3396 struct ata_device *dev;
3397 int cnt = 0;
3398
3399 ata_for_each_dev(dev, link, ALL)
3400 if (dev->class == ATA_DEV_UNKNOWN)
3401 cnt++;
3402 return cnt;
3403}
3404
3405static int ata_eh_skip_recovery(struct ata_link *link)
3406{
3407 struct ata_port *ap = link->ap;
3408 struct ata_eh_context *ehc = &link->eh_context;
3409 struct ata_device *dev;
3410
3411
3412 if (link->flags & ATA_LFLAG_DISABLED)
3413 return 1;
3414
3415
3416 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3417 return 1;
3418
3419
3420 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3421 return 0;
3422
3423
3424 if ((ehc->i.action & ATA_EH_RESET) &&
3425 !(ehc->i.flags & ATA_EHI_DID_RESET))
3426 return 0;
3427
3428
3429 ata_for_each_dev(dev, link, ALL) {
3430 if (dev->class == ATA_DEV_UNKNOWN &&
3431 ehc->classes[dev->devno] != ATA_DEV_NONE)
3432 return 0;
3433 }
3434
3435 return 1;
3436}
3437
3438static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3439{
3440 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3441 u64 now = get_jiffies_64();
3442 int *trials = void_arg;
3443
3444 if (ent->timestamp < now - min(now, interval))
3445 return -1;
3446
3447 (*trials)++;
3448 return 0;
3449}
3450
3451static int ata_eh_schedule_probe(struct ata_device *dev)
3452{
3453 struct ata_eh_context *ehc = &dev->link->eh_context;
3454 struct ata_link *link = ata_dev_phys_link(dev);
3455 int trials = 0;
3456
3457 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3458 (ehc->did_probe_mask & (1 << dev->devno)))
3459 return 0;
3460
3461 ata_eh_detach_dev(dev);
3462 ata_dev_init(dev);
3463 ehc->did_probe_mask |= (1 << dev->devno);
3464 ehc->i.action |= ATA_EH_RESET;
3465 ehc->saved_xfer_mode[dev->devno] = 0;
3466 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3467
3468
3469 if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3470 if (ata_is_host_link(link))
3471 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3472 ATA_LPM_EMPTY);
3473 else
3474 sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3475 ATA_LPM_EMPTY);
3476 }
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3492 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3493
3494 if (trials > ATA_EH_PROBE_TRIALS)
3495 sata_down_spd_limit(link, 1);
3496
3497 return 1;
3498}
3499
3500static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3501{
3502 struct ata_eh_context *ehc = &dev->link->eh_context;
3503
3504
3505
3506
3507 if (err != -EAGAIN)
3508 ehc->tries[dev->devno]--;
3509
3510 switch (err) {
3511 case -ENODEV:
3512
3513 ehc->i.probe_mask |= (1 << dev->devno);
3514 case -EINVAL:
3515
3516 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3517 case -EIO:
3518 if (ehc->tries[dev->devno] == 1) {
3519
3520
3521
3522 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3523 if (dev->pio_mode > XFER_PIO_0)
3524 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3525 }
3526 }
3527
3528 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3529
3530 ata_dev_disable(dev);
3531
3532
3533 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3534 ata_eh_detach_dev(dev);
3535
3536
3537 if (ata_eh_schedule_probe(dev)) {
3538 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3539 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3540 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3541 }
3542
3543 return 1;
3544 } else {
3545 ehc->i.action |= ATA_EH_RESET;
3546 return 0;
3547 }
3548}
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3573 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3574 ata_postreset_fn_t postreset,
3575 struct ata_link **r_failed_link)
3576{
3577 struct ata_link *link;
3578 struct ata_device *dev;
3579 int rc, nr_fails;
3580 unsigned long flags, deadline;
3581
3582 DPRINTK("ENTER\n");
3583
3584
3585 ata_for_each_link(link, ap, EDGE) {
3586 struct ata_eh_context *ehc = &link->eh_context;
3587
3588
3589 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3590 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3591 spin_lock_irqsave(ap->lock, flags);
3592 link->flags &= ~ATA_LFLAG_DISABLED;
3593 spin_unlock_irqrestore(ap->lock, flags);
3594 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3595 }
3596
3597 ata_for_each_dev(dev, link, ALL) {
3598 if (link->flags & ATA_LFLAG_NO_RETRY)
3599 ehc->tries[dev->devno] = 1;
3600 else
3601 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3602
3603
3604 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3605 ~ATA_EH_PERDEV_MASK;
3606 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3607
3608
3609 if (dev->flags & ATA_DFLAG_DETACH)
3610 ata_eh_detach_dev(dev);
3611
3612
3613 if (!ata_dev_enabled(dev))
3614 ata_eh_schedule_probe(dev);
3615 }
3616 }
3617
3618 retry:
3619 rc = 0;
3620
3621
3622 if (ap->pflags & ATA_PFLAG_UNLOADING)
3623 goto out;
3624
3625
3626 ata_for_each_link(link, ap, EDGE) {
3627 struct ata_eh_context *ehc = &link->eh_context;
3628
3629
3630 if (ata_eh_skip_recovery(link))
3631 ehc->i.action = 0;
3632
3633 ata_for_each_dev(dev, link, ALL)
3634 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3635 }
3636
3637
3638 ata_for_each_link(link, ap, EDGE) {
3639 struct ata_eh_context *ehc = &link->eh_context;
3640
3641 if (!(ehc->i.action & ATA_EH_RESET))
3642 continue;
3643
3644 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3645 prereset, softreset, hardreset, postreset);
3646 if (rc) {
3647 ata_link_printk(link, KERN_ERR,
3648 "reset failed, giving up\n");
3649 goto out;
3650 }
3651 }
3652
3653 do {
3654 unsigned long now;
3655
3656
3657
3658
3659
3660 ata_eh_pull_park_action(ap);
3661
3662 deadline = jiffies;
3663 ata_for_each_link(link, ap, EDGE) {
3664 ata_for_each_dev(dev, link, ALL) {
3665 struct ata_eh_context *ehc = &link->eh_context;
3666 unsigned long tmp;
3667
3668 if (dev->class != ATA_DEV_ATA)
3669 continue;
3670 if (!(ehc->i.dev_action[dev->devno] &
3671 ATA_EH_PARK))
3672 continue;
3673 tmp = dev->unpark_deadline;
3674 if (time_before(deadline, tmp))
3675 deadline = tmp;
3676 else if (time_before_eq(tmp, jiffies))
3677 continue;
3678 if (ehc->unloaded_mask & (1 << dev->devno))
3679 continue;
3680
3681 ata_eh_park_issue_cmd(dev, 1);
3682 }
3683 }
3684
3685 now = jiffies;
3686 if (time_before_eq(deadline, now))
3687 break;
3688
3689 ata_eh_release(ap);
3690 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3691 deadline - now);
3692 ata_eh_acquire(ap);
3693 } while (deadline);
3694 ata_for_each_link(link, ap, EDGE) {
3695 ata_for_each_dev(dev, link, ALL) {
3696 if (!(link->eh_context.unloaded_mask &
3697 (1 << dev->devno)))
3698 continue;
3699
3700 ata_eh_park_issue_cmd(dev, 0);
3701 ata_eh_done(link, dev, ATA_EH_PARK);
3702 }
3703 }
3704
3705
3706 nr_fails = 0;
3707 ata_for_each_link(link, ap, PMP_FIRST) {
3708 struct ata_eh_context *ehc = &link->eh_context;
3709
3710 if (sata_pmp_attached(ap) && ata_is_host_link(link))
3711 goto config_lpm;
3712
3713
3714 rc = ata_eh_revalidate_and_attach(link, &dev);
3715 if (rc)
3716 goto rest_fail;
3717
3718
3719 if (link->device->class == ATA_DEV_PMP) {
3720 ehc->i.action = 0;
3721 return 0;
3722 }
3723
3724
3725 if (ehc->i.flags & ATA_EHI_SETMODE) {
3726 rc = ata_set_mode(link, &dev);
3727 if (rc)
3728 goto rest_fail;
3729 ehc->i.flags &= ~ATA_EHI_SETMODE;
3730 }
3731
3732
3733
3734
3735 if (ehc->i.flags & ATA_EHI_DID_RESET) {
3736 ata_for_each_dev(dev, link, ALL) {
3737 if (dev->class != ATA_DEV_ATAPI)
3738 continue;
3739 rc = atapi_eh_clear_ua(dev);
3740 if (rc)
3741 goto rest_fail;
3742 }
3743 }
3744
3745
3746 ata_for_each_dev(dev, link, ALL) {
3747 if (dev->class != ATA_DEV_ATA)
3748 continue;
3749 rc = ata_eh_maybe_retry_flush(dev);
3750 if (rc)
3751 goto rest_fail;
3752 }
3753
3754 config_lpm:
3755
3756 if (link->lpm_policy != ap->target_lpm_policy) {
3757 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3758 if (rc)
3759 goto rest_fail;
3760 }
3761
3762
3763 ehc->i.flags = 0;
3764 continue;
3765
3766 rest_fail:
3767 nr_fails++;
3768 if (dev)
3769 ata_eh_handle_dev_fail(dev, rc);
3770
3771 if (ap->pflags & ATA_PFLAG_FROZEN) {
3772
3773
3774
3775 if (sata_pmp_attached(ap))
3776 goto out;
3777 break;
3778 }
3779 }
3780
3781 if (nr_fails)
3782 goto retry;
3783
3784 out:
3785 if (rc && r_failed_link)
3786 *r_failed_link = link;
3787
3788 DPRINTK("EXIT, rc=%d\n", rc);
3789 return rc;
3790}
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802void ata_eh_finish(struct ata_port *ap)
3803{
3804 int tag;
3805
3806
3807 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3808 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3809
3810 if (!(qc->flags & ATA_QCFLAG_FAILED))
3811 continue;
3812
3813 if (qc->err_mask) {
3814
3815
3816
3817
3818 if (qc->flags & ATA_QCFLAG_RETRY)
3819 ata_eh_qc_retry(qc);
3820 else
3821 ata_eh_qc_complete(qc);
3822 } else {
3823 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3824 ata_eh_qc_complete(qc);
3825 } else {
3826
3827 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3828 ata_eh_qc_retry(qc);
3829 }
3830 }
3831 }
3832
3833
3834 WARN_ON(ap->nr_active_links);
3835 ap->nr_active_links = 0;
3836}
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3853 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3854 ata_postreset_fn_t postreset)
3855{
3856 struct ata_device *dev;
3857 int rc;
3858
3859 ata_eh_autopsy(ap);
3860 ata_eh_report(ap);
3861
3862 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3863 NULL);
3864 if (rc) {
3865 ata_for_each_dev(dev, &ap->link, ALL)
3866 ata_dev_disable(dev);
3867 }
3868
3869 ata_eh_finish(ap);
3870}
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881void ata_std_error_handler(struct ata_port *ap)
3882{
3883 struct ata_port_operations *ops = ap->ops;
3884 ata_reset_fn_t hardreset = ops->hardreset;
3885
3886
3887 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
3888 hardreset = NULL;
3889
3890 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3891}
3892
3893#ifdef CONFIG_PM
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903static void ata_eh_handle_port_suspend(struct ata_port *ap)
3904{
3905 unsigned long flags;
3906 int rc = 0;
3907
3908
3909 spin_lock_irqsave(ap->lock, flags);
3910 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3911 ap->pm_mesg.event == PM_EVENT_ON) {
3912 spin_unlock_irqrestore(ap->lock, flags);
3913 return;
3914 }
3915 spin_unlock_irqrestore(ap->lock, flags);
3916
3917 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3918
3919
3920 rc = ata_acpi_on_suspend(ap);
3921 if (rc)
3922 goto out;
3923
3924
3925 ata_eh_freeze_port(ap);
3926
3927 if (ap->ops->port_suspend)
3928 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3929
3930 ata_acpi_set_state(ap, PMSG_SUSPEND);
3931 out:
3932
3933 spin_lock_irqsave(ap->lock, flags);
3934
3935 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3936 if (rc == 0)
3937 ap->pflags |= ATA_PFLAG_SUSPENDED;
3938 else if (ap->pflags & ATA_PFLAG_FROZEN)
3939 ata_port_schedule_eh(ap);
3940
3941 if (ap->pm_result) {
3942 *ap->pm_result = rc;
3943 ap->pm_result = NULL;
3944 }
3945
3946 spin_unlock_irqrestore(ap->lock, flags);
3947
3948 return;
3949}
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960static void ata_eh_handle_port_resume(struct ata_port *ap)
3961{
3962 struct ata_link *link;
3963 struct ata_device *dev;
3964 unsigned long flags;
3965 int rc = 0;
3966
3967
3968 spin_lock_irqsave(ap->lock, flags);
3969 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3970 ap->pm_mesg.event != PM_EVENT_ON) {
3971 spin_unlock_irqrestore(ap->lock, flags);
3972 return;
3973 }
3974 spin_unlock_irqrestore(ap->lock, flags);
3975
3976 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3977
3978
3979
3980
3981
3982
3983
3984
3985 ata_for_each_link(link, ap, HOST_FIRST)
3986 ata_for_each_dev(dev, link, ALL)
3987 ata_ering_clear(&dev->ering);
3988
3989 ata_acpi_set_state(ap, PMSG_ON);
3990
3991 if (ap->ops->port_resume)
3992 rc = ap->ops->port_resume(ap);
3993
3994
3995 ata_acpi_on_resume(ap);
3996
3997
3998 spin_lock_irqsave(ap->lock, flags);
3999 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4000 if (ap->pm_result) {
4001 *ap->pm_result = rc;
4002 ap->pm_result = NULL;
4003 }
4004 spin_unlock_irqrestore(ap->lock, flags);
4005}
4006#endif
4007