1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/kernel.h>
36#include <linux/blkdev.h>
37#include <linux/pci.h>
38#include <scsi/scsi.h>
39#include <scsi/scsi_host.h>
40#include <scsi/scsi_eh.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_dbg.h>
44#include "../scsi/scsi_transport_api.h"
45
46#include <linux/libata.h>
47
48#include "libata.h"
49
50enum {
51
52 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
53 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
54 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
55 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
56
57
58 ATA_EFLAG_IS_IO = (1 << 0),
59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
60
61
62 ATA_ECAT_NONE = 0,
63 ATA_ECAT_ATA_BUS = 1,
64 ATA_ECAT_TOUT_HSM = 2,
65 ATA_ECAT_UNK_DEV = 3,
66 ATA_ECAT_DUBIOUS_NONE = 4,
67 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
68 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
69 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
70 ATA_ECAT_NR = 8,
71
72 ATA_EH_CMD_DFL_TIMEOUT = 5000,
73
74
75 ATA_EH_RESET_COOL_DOWN = 5000,
76
77
78
79
80
81
82 ATA_EH_PRERESET_TIMEOUT = 10000,
83 ATA_EH_FASTDRAIN_INTERVAL = 3000,
84
85 ATA_EH_UA_TRIES = 5,
86
87
88 ATA_EH_PROBE_TRIAL_INTERVAL = 60000,
89 ATA_EH_PROBE_TRIALS = 2,
90};
91
92
93
94
95
96
97
98static const unsigned long ata_eh_reset_timeouts[] = {
99 10000,
100 10000,
101 35000,
102 5000,
103 ULONG_MAX,
104};
105
106static const unsigned long ata_eh_identify_timeouts[] = {
107 5000,
108 10000,
109 30000,
110 ULONG_MAX,
111};
112
113static const unsigned long ata_eh_other_timeouts[] = {
114 5000,
115 10000,
116
117 ULONG_MAX,
118};
119
120struct ata_eh_cmd_timeout_ent {
121 const u8 *commands;
122 const unsigned long *timeouts;
123};
124
125
126
127
128
129
130
131
132
133
134
135
136
137#define CMDS(cmds...) (const u8 []){ cmds, 0 }
138static const struct ata_eh_cmd_timeout_ent
139ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
140 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
141 .timeouts = ata_eh_identify_timeouts, },
142 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
143 .timeouts = ata_eh_other_timeouts, },
144 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
145 .timeouts = ata_eh_other_timeouts, },
146 { .commands = CMDS(ATA_CMD_SET_FEATURES),
147 .timeouts = ata_eh_other_timeouts, },
148 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
149 .timeouts = ata_eh_other_timeouts, },
150};
151#undef CMDS
152
153static void __ata_port_freeze(struct ata_port *ap);
154#ifdef CONFIG_PM
155static void ata_eh_handle_port_suspend(struct ata_port *ap);
156static void ata_eh_handle_port_resume(struct ata_port *ap);
157#else
158static void ata_eh_handle_port_suspend(struct ata_port *ap)
159{ }
160
161static void ata_eh_handle_port_resume(struct ata_port *ap)
162{ }
163#endif
164
165static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
166 va_list args)
167{
168 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
169 ATA_EH_DESC_LEN - ehi->desc_len,
170 fmt, args);
171}
172
173
174
175
176
177
178
179
180
181
182
183void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
184{
185 va_list args;
186
187 va_start(args, fmt);
188 __ata_ehi_pushv_desc(ehi, fmt, args);
189 va_end(args);
190}
191
192
193
194
195
196
197
198
199
200
201
202
203void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
204{
205 va_list args;
206
207 if (ehi->desc_len)
208 __ata_ehi_push_desc(ehi, ", ");
209
210 va_start(args, fmt);
211 __ata_ehi_pushv_desc(ehi, fmt, args);
212 va_end(args);
213}
214
215
216
217
218
219
220
221
222
223
224void ata_ehi_clear_desc(struct ata_eh_info *ehi)
225{
226 ehi->desc[0] = '\0';
227 ehi->desc_len = 0;
228}
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
244{
245 va_list args;
246
247 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
248
249 if (ap->link.eh_info.desc_len)
250 __ata_ehi_push_desc(&ap->link.eh_info, " ");
251
252 va_start(args, fmt);
253 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
254 va_end(args);
255}
256
257#ifdef CONFIG_PCI
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
275 const char *name)
276{
277 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
278 char *type = "";
279 unsigned long long start, len;
280
281 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
282 type = "m";
283 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
284 type = "i";
285
286 start = (unsigned long long)pci_resource_start(pdev, bar);
287 len = (unsigned long long)pci_resource_len(pdev, bar);
288
289 if (offset < 0)
290 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
291 else
292 ata_port_desc(ap, "%s 0x%llx", name,
293 start + (unsigned long long)offset);
294}
295
296#endif
297
298static int ata_lookup_timeout_table(u8 cmd)
299{
300 int i;
301
302 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
303 const u8 *cur;
304
305 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
306 if (*cur == cmd)
307 return i;
308 }
309
310 return -1;
311}
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
327{
328 struct ata_eh_context *ehc = &dev->link->eh_context;
329 int ent = ata_lookup_timeout_table(cmd);
330 int idx;
331
332 if (ent < 0)
333 return ATA_EH_CMD_DFL_TIMEOUT;
334
335 idx = ehc->cmd_timeout_idx[dev->devno][ent];
336 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
337}
338
339
340
341
342
343
344
345
346
347
348
349
350
351void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
352{
353 struct ata_eh_context *ehc = &dev->link->eh_context;
354 int ent = ata_lookup_timeout_table(cmd);
355 int idx;
356
357 if (ent < 0)
358 return;
359
360 idx = ehc->cmd_timeout_idx[dev->devno][ent];
361 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
362 ehc->cmd_timeout_idx[dev->devno][ent]++;
363}
364
365static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
366 unsigned int err_mask)
367{
368 struct ata_ering_entry *ent;
369
370 WARN_ON(!err_mask);
371
372 ering->cursor++;
373 ering->cursor %= ATA_ERING_SIZE;
374
375 ent = &ering->ring[ering->cursor];
376 ent->eflags = eflags;
377 ent->err_mask = err_mask;
378 ent->timestamp = get_jiffies_64();
379}
380
381static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
382{
383 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
384
385 if (ent->err_mask)
386 return ent;
387 return NULL;
388}
389
390static void ata_ering_clear(struct ata_ering *ering)
391{
392 memset(ering, 0, sizeof(*ering));
393}
394
395static int ata_ering_map(struct ata_ering *ering,
396 int (*map_fn)(struct ata_ering_entry *, void *),
397 void *arg)
398{
399 int idx, rc = 0;
400 struct ata_ering_entry *ent;
401
402 idx = ering->cursor;
403 do {
404 ent = &ering->ring[idx];
405 if (!ent->err_mask)
406 break;
407 rc = map_fn(ent, arg);
408 if (rc)
409 break;
410 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
411 } while (idx != ering->cursor);
412
413 return rc;
414}
415
416static unsigned int ata_eh_dev_action(struct ata_device *dev)
417{
418 struct ata_eh_context *ehc = &dev->link->eh_context;
419
420 return ehc->i.action | ehc->i.dev_action[dev->devno];
421}
422
423static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
424 struct ata_eh_info *ehi, unsigned int action)
425{
426 struct ata_device *tdev;
427
428 if (!dev) {
429 ehi->action &= ~action;
430 ata_for_each_dev(tdev, link, ALL)
431 ehi->dev_action[tdev->devno] &= ~action;
432 } else {
433
434 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
435
436
437 if (ehi->action & action) {
438 ata_for_each_dev(tdev, link, ALL)
439 ehi->dev_action[tdev->devno] |=
440 ehi->action & action;
441 ehi->action &= ~action;
442 }
443
444
445 ehi->dev_action[dev->devno] &= ~action;
446 }
447}
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
469{
470 struct Scsi_Host *host = cmd->device->host;
471 struct ata_port *ap = ata_shost_to_port(host);
472 unsigned long flags;
473 struct ata_queued_cmd *qc;
474 enum blk_eh_timer_return ret;
475
476 DPRINTK("ENTER\n");
477
478 if (ap->ops->error_handler) {
479 ret = BLK_EH_NOT_HANDLED;
480 goto out;
481 }
482
483 ret = BLK_EH_HANDLED;
484 spin_lock_irqsave(ap->lock, flags);
485 qc = ata_qc_from_tag(ap, ap->link.active_tag);
486 if (qc) {
487 WARN_ON(qc->scsicmd != cmd);
488 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
489 qc->err_mask |= AC_ERR_TIMEOUT;
490 ret = BLK_EH_NOT_HANDLED;
491 }
492 spin_unlock_irqrestore(ap->lock, flags);
493
494 out:
495 DPRINTK("EXIT, ret=%d\n", ret);
496 return ret;
497}
498
499static void ata_eh_unload(struct ata_port *ap)
500{
501 struct ata_link *link;
502 struct ata_device *dev;
503 unsigned long flags;
504
505
506
507
508 ata_for_each_link(link, ap, PMP_FIRST) {
509 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
510 ata_for_each_dev(dev, link, ALL)
511 ata_dev_disable(dev);
512 }
513
514
515 spin_lock_irqsave(ap->lock, flags);
516
517 ata_port_freeze(ap);
518 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
519 ap->pflags |= ATA_PFLAG_UNLOADED;
520
521 spin_unlock_irqrestore(ap->lock, flags);
522}
523
524
525
526
527
528
529
530
531
532
533
534
535
536void ata_scsi_error(struct Scsi_Host *host)
537{
538 struct ata_port *ap = ata_shost_to_port(host);
539 int i;
540 unsigned long flags;
541
542 DPRINTK("ENTER\n");
543
544
545 ata_port_flush_task(ap);
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561 if (ap->ops->error_handler) {
562 struct scsi_cmnd *scmd, *tmp;
563 int nr_timedout = 0;
564
565 spin_lock_irqsave(ap->lock, flags);
566
567
568
569
570
571
572
573
574
575
576 if (ap->ops->lost_interrupt)
577 ap->ops->lost_interrupt(ap);
578
579 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
580 struct ata_queued_cmd *qc;
581
582 for (i = 0; i < ATA_MAX_QUEUE; i++) {
583 qc = __ata_qc_from_tag(ap, i);
584 if (qc->flags & ATA_QCFLAG_ACTIVE &&
585 qc->scsicmd == scmd)
586 break;
587 }
588
589 if (i < ATA_MAX_QUEUE) {
590
591 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
592
593 qc->err_mask |= AC_ERR_TIMEOUT;
594 qc->flags |= ATA_QCFLAG_FAILED;
595 nr_timedout++;
596 }
597 } else {
598
599
600
601
602 scmd->retries = scmd->allowed;
603 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
604 }
605 }
606
607
608
609
610
611
612
613 if (nr_timedout)
614 __ata_port_freeze(ap);
615
616 spin_unlock_irqrestore(ap->lock, flags);
617
618
619 ap->eh_tries = ATA_EH_MAX_TRIES;
620 } else
621 spin_unlock_wait(ap->lock);
622
623
624
625
626 repeat:
627
628 if (ap->ops->error_handler) {
629 struct ata_link *link;
630
631
632 del_timer_sync(&ap->fastdrain_timer);
633
634
635 ata_eh_handle_port_resume(ap);
636
637
638 spin_lock_irqsave(ap->lock, flags);
639
640 ata_for_each_link(link, ap, HOST_FIRST) {
641 struct ata_eh_context *ehc = &link->eh_context;
642 struct ata_device *dev;
643
644 memset(&link->eh_context, 0, sizeof(link->eh_context));
645 link->eh_context.i = link->eh_info;
646 memset(&link->eh_info, 0, sizeof(link->eh_info));
647
648 ata_for_each_dev(dev, link, ENABLED) {
649 int devno = dev->devno;
650
651 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
652 if (ata_ncq_enabled(dev))
653 ehc->saved_ncq_enabled |= 1 << devno;
654 }
655 }
656
657 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
658 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
659 ap->excl_link = NULL;
660
661 spin_unlock_irqrestore(ap->lock, flags);
662
663
664 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
665 ap->ops->error_handler(ap);
666 else {
667
668 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
669 !(ap->pflags & ATA_PFLAG_UNLOADED))
670 ata_eh_unload(ap);
671 ata_eh_finish(ap);
672 }
673
674
675 ata_eh_handle_port_suspend(ap);
676
677
678
679
680
681 spin_lock_irqsave(ap->lock, flags);
682
683 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
684 if (--ap->eh_tries) {
685 spin_unlock_irqrestore(ap->lock, flags);
686 goto repeat;
687 }
688 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
689 "tries, giving up\n", ATA_EH_MAX_TRIES);
690 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
691 }
692
693
694 ata_for_each_link(link, ap, HOST_FIRST)
695 memset(&link->eh_info, 0, sizeof(link->eh_info));
696
697
698
699
700
701
702 host->host_eh_scheduled = 0;
703
704 spin_unlock_irqrestore(ap->lock, flags);
705 } else {
706 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
707 ap->ops->eng_timeout(ap);
708 }
709
710
711 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
712
713 scsi_eh_flush_done_q(&ap->eh_done_q);
714
715
716 spin_lock_irqsave(ap->lock, flags);
717
718 if (ap->pflags & ATA_PFLAG_LOADING)
719 ap->pflags &= ~ATA_PFLAG_LOADING;
720 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
721 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
722
723 if (ap->pflags & ATA_PFLAG_RECOVERED)
724 ata_port_printk(ap, KERN_INFO, "EH complete\n");
725
726 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
727
728
729 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
730 wake_up_all(&ap->eh_wait_q);
731
732 spin_unlock_irqrestore(ap->lock, flags);
733
734 DPRINTK("EXIT\n");
735}
736
737
738
739
740
741
742
743
744
745
746void ata_port_wait_eh(struct ata_port *ap)
747{
748 unsigned long flags;
749 DEFINE_WAIT(wait);
750
751 retry:
752 spin_lock_irqsave(ap->lock, flags);
753
754 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
755 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
756 spin_unlock_irqrestore(ap->lock, flags);
757 schedule();
758 spin_lock_irqsave(ap->lock, flags);
759 }
760 finish_wait(&ap->eh_wait_q, &wait);
761
762 spin_unlock_irqrestore(ap->lock, flags);
763
764
765 if (scsi_host_in_recovery(ap->scsi_host)) {
766 msleep(10);
767 goto retry;
768 }
769}
770
771static int ata_eh_nr_in_flight(struct ata_port *ap)
772{
773 unsigned int tag;
774 int nr = 0;
775
776
777 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
778 if (ata_qc_from_tag(ap, tag))
779 nr++;
780
781 return nr;
782}
783
784void ata_eh_fastdrain_timerfn(unsigned long arg)
785{
786 struct ata_port *ap = (void *)arg;
787 unsigned long flags;
788 int cnt;
789
790 spin_lock_irqsave(ap->lock, flags);
791
792 cnt = ata_eh_nr_in_flight(ap);
793
794
795 if (!cnt)
796 goto out_unlock;
797
798 if (cnt == ap->fastdrain_cnt) {
799 unsigned int tag;
800
801
802
803
804 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
805 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
806 if (qc)
807 qc->err_mask |= AC_ERR_TIMEOUT;
808 }
809
810 ata_port_freeze(ap);
811 } else {
812
813 ap->fastdrain_cnt = cnt;
814 ap->fastdrain_timer.expires =
815 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
816 add_timer(&ap->fastdrain_timer);
817 }
818
819 out_unlock:
820 spin_unlock_irqrestore(ap->lock, flags);
821}
822
823
824
825
826
827
828
829
830
831
832
833
834
835static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
836{
837 int cnt;
838
839
840 if (ap->pflags & ATA_PFLAG_EH_PENDING)
841 return;
842
843 ap->pflags |= ATA_PFLAG_EH_PENDING;
844
845 if (!fastdrain)
846 return;
847
848
849 cnt = ata_eh_nr_in_flight(ap);
850 if (!cnt)
851 return;
852
853
854 ap->fastdrain_cnt = cnt;
855 ap->fastdrain_timer.expires =
856 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
857 add_timer(&ap->fastdrain_timer);
858}
859
860
861
862
863
864
865
866
867
868
869
870void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
871{
872 struct ata_port *ap = qc->ap;
873
874 WARN_ON(!ap->ops->error_handler);
875
876 qc->flags |= ATA_QCFLAG_FAILED;
877 ata_eh_set_pending(ap, 1);
878
879
880
881
882
883
884 blk_abort_request(qc->scsicmd->request);
885}
886
887
888
889
890
891
892
893
894
895
896
897void ata_port_schedule_eh(struct ata_port *ap)
898{
899 WARN_ON(!ap->ops->error_handler);
900
901 if (ap->pflags & ATA_PFLAG_INITIALIZING)
902 return;
903
904 ata_eh_set_pending(ap, 1);
905 scsi_schedule_eh(ap->scsi_host);
906
907 DPRINTK("port EH scheduled\n");
908}
909
910static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
911{
912 int tag, nr_aborted = 0;
913
914 WARN_ON(!ap->ops->error_handler);
915
916
917 ata_eh_set_pending(ap, 0);
918
919 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
920 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
921
922 if (qc && (!link || qc->dev->link == link)) {
923 qc->flags |= ATA_QCFLAG_FAILED;
924 ata_qc_complete(qc);
925 nr_aborted++;
926 }
927 }
928
929 if (!nr_aborted)
930 ata_port_schedule_eh(ap);
931
932 return nr_aborted;
933}
934
935
936
937
938
939
940
941
942
943
944
945
946
947int ata_link_abort(struct ata_link *link)
948{
949 return ata_do_link_abort(link->ap, link);
950}
951
952
953
954
955
956
957
958
959
960
961
962
963
964int ata_port_abort(struct ata_port *ap)
965{
966 return ata_do_link_abort(ap, NULL);
967}
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987static void __ata_port_freeze(struct ata_port *ap)
988{
989 WARN_ON(!ap->ops->error_handler);
990
991 if (ap->ops->freeze)
992 ap->ops->freeze(ap);
993
994 ap->pflags |= ATA_PFLAG_FROZEN;
995
996 DPRINTK("ata%u port frozen\n", ap->print_id);
997}
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013int ata_port_freeze(struct ata_port *ap)
1014{
1015 int nr_aborted;
1016
1017 WARN_ON(!ap->ops->error_handler);
1018
1019 __ata_port_freeze(ap);
1020 nr_aborted = ata_port_abort(ap);
1021
1022 return nr_aborted;
1023}
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038int sata_async_notification(struct ata_port *ap)
1039{
1040 u32 sntf;
1041 int rc;
1042
1043 if (!(ap->flags & ATA_FLAG_AN))
1044 return 0;
1045
1046 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1047 if (rc == 0)
1048 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1049
1050 if (!sata_pmp_attached(ap) || rc) {
1051
1052 if (!sata_pmp_attached(ap)) {
1053
1054
1055
1056
1057 struct ata_device *dev = ap->link.device;
1058
1059 if ((dev->class == ATA_DEV_ATAPI) &&
1060 (dev->flags & ATA_DFLAG_AN))
1061 ata_scsi_media_change_notify(dev);
1062 return 0;
1063 } else {
1064
1065
1066
1067
1068
1069 ata_port_schedule_eh(ap);
1070 return 1;
1071 }
1072 } else {
1073
1074 struct ata_link *link;
1075
1076
1077 ata_for_each_link(link, ap, EDGE) {
1078 if (!(sntf & (1 << link->pmp)))
1079 continue;
1080
1081 if ((link->device->class == ATA_DEV_ATAPI) &&
1082 (link->device->flags & ATA_DFLAG_AN))
1083 ata_scsi_media_change_notify(link->device);
1084 }
1085
1086
1087
1088
1089 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1090 ata_port_schedule_eh(ap);
1091 return 1;
1092 }
1093
1094 return 0;
1095 }
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107void ata_eh_freeze_port(struct ata_port *ap)
1108{
1109 unsigned long flags;
1110
1111 if (!ap->ops->error_handler)
1112 return;
1113
1114 spin_lock_irqsave(ap->lock, flags);
1115 __ata_port_freeze(ap);
1116 spin_unlock_irqrestore(ap->lock, flags);
1117}
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128void ata_eh_thaw_port(struct ata_port *ap)
1129{
1130 unsigned long flags;
1131
1132 if (!ap->ops->error_handler)
1133 return;
1134
1135 spin_lock_irqsave(ap->lock, flags);
1136
1137 ap->pflags &= ~ATA_PFLAG_FROZEN;
1138
1139 if (ap->ops->thaw)
1140 ap->ops->thaw(ap);
1141
1142 spin_unlock_irqrestore(ap->lock, flags);
1143
1144 DPRINTK("ata%u port thawed\n", ap->print_id);
1145}
1146
1147static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1148{
1149
1150}
1151
1152static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1153{
1154 struct ata_port *ap = qc->ap;
1155 struct scsi_cmnd *scmd = qc->scsicmd;
1156 unsigned long flags;
1157
1158 spin_lock_irqsave(ap->lock, flags);
1159 qc->scsidone = ata_eh_scsidone;
1160 __ata_qc_complete(qc);
1161 WARN_ON(ata_tag_valid(qc->tag));
1162 spin_unlock_irqrestore(ap->lock, flags);
1163
1164 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1165}
1166
1167
1168
1169
1170
1171
1172
1173
1174void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1175{
1176 struct scsi_cmnd *scmd = qc->scsicmd;
1177 scmd->retries = scmd->allowed;
1178 __ata_eh_qc_complete(qc);
1179}
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1193{
1194 struct scsi_cmnd *scmd = qc->scsicmd;
1195 if (!qc->err_mask && scmd->retries)
1196 scmd->retries--;
1197 __ata_eh_qc_complete(qc);
1198}
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209void ata_dev_disable(struct ata_device *dev)
1210{
1211 if (!ata_dev_enabled(dev))
1212 return;
1213
1214 if (ata_msg_drv(dev->link->ap))
1215 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1216 ata_acpi_on_disable(dev);
1217 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1218 dev->class++;
1219
1220
1221
1222
1223 ata_ering_clear(&dev->ering);
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235void ata_eh_detach_dev(struct ata_device *dev)
1236{
1237 struct ata_link *link = dev->link;
1238 struct ata_port *ap = link->ap;
1239 struct ata_eh_context *ehc = &link->eh_context;
1240 unsigned long flags;
1241
1242 ata_dev_disable(dev);
1243
1244 spin_lock_irqsave(ap->lock, flags);
1245
1246 dev->flags &= ~ATA_DFLAG_DETACH;
1247
1248 if (ata_scsi_offline_dev(dev)) {
1249 dev->flags |= ATA_DFLAG_DETACHED;
1250 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1251 }
1252
1253
1254 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1255 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1256 ehc->saved_xfer_mode[dev->devno] = 0;
1257 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1258
1259 spin_unlock_irqrestore(ap->lock, flags);
1260}
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1276 unsigned int action)
1277{
1278 struct ata_port *ap = link->ap;
1279 struct ata_eh_info *ehi = &link->eh_info;
1280 struct ata_eh_context *ehc = &link->eh_context;
1281 unsigned long flags;
1282
1283 spin_lock_irqsave(ap->lock, flags);
1284
1285 ata_eh_clear_action(link, dev, ehi, action);
1286
1287
1288
1289
1290 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1291 ap->pflags |= ATA_PFLAG_RECOVERED;
1292
1293 spin_unlock_irqrestore(ap->lock, flags);
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1309 unsigned int action)
1310{
1311 struct ata_eh_context *ehc = &link->eh_context;
1312
1313 ata_eh_clear_action(link, dev, &ehc->i, action);
1314}
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330static const char *ata_err_string(unsigned int err_mask)
1331{
1332 if (err_mask & AC_ERR_HOST_BUS)
1333 return "host bus error";
1334 if (err_mask & AC_ERR_ATA_BUS)
1335 return "ATA bus error";
1336 if (err_mask & AC_ERR_TIMEOUT)
1337 return "timeout";
1338 if (err_mask & AC_ERR_HSM)
1339 return "HSM violation";
1340 if (err_mask & AC_ERR_SYSTEM)
1341 return "internal error";
1342 if (err_mask & AC_ERR_MEDIA)
1343 return "media error";
1344 if (err_mask & AC_ERR_INVALID)
1345 return "invalid argument";
1346 if (err_mask & AC_ERR_DEV)
1347 return "device error";
1348 return "unknown error";
1349}
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366static unsigned int ata_read_log_page(struct ata_device *dev,
1367 u8 page, void *buf, unsigned int sectors)
1368{
1369 struct ata_taskfile tf;
1370 unsigned int err_mask;
1371
1372 DPRINTK("read log page - page %d\n", page);
1373
1374 ata_tf_init(dev, &tf);
1375 tf.command = ATA_CMD_READ_LOG_EXT;
1376 tf.lbal = page;
1377 tf.nsect = sectors;
1378 tf.hob_nsect = sectors >> 8;
1379 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1380 tf.protocol = ATA_PROT_PIO;
1381
1382 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1383 buf, sectors * ATA_SECT_SIZE, 0);
1384
1385 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1386 return err_mask;
1387}
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404static int ata_eh_read_log_10h(struct ata_device *dev,
1405 int *tag, struct ata_taskfile *tf)
1406{
1407 u8 *buf = dev->link->ap->sector_buf;
1408 unsigned int err_mask;
1409 u8 csum;
1410 int i;
1411
1412 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1413 if (err_mask)
1414 return -EIO;
1415
1416 csum = 0;
1417 for (i = 0; i < ATA_SECT_SIZE; i++)
1418 csum += buf[i];
1419 if (csum)
1420 ata_dev_printk(dev, KERN_WARNING,
1421 "invalid checksum 0x%x on log page 10h\n", csum);
1422
1423 if (buf[0] & 0x80)
1424 return -ENOENT;
1425
1426 *tag = buf[0] & 0x1f;
1427
1428 tf->command = buf[2];
1429 tf->feature = buf[3];
1430 tf->lbal = buf[4];
1431 tf->lbam = buf[5];
1432 tf->lbah = buf[6];
1433 tf->device = buf[7];
1434 tf->hob_lbal = buf[8];
1435 tf->hob_lbam = buf[9];
1436 tf->hob_lbah = buf[10];
1437 tf->nsect = buf[12];
1438 tf->hob_nsect = buf[13];
1439
1440 return 0;
1441}
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1457{
1458 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1459 struct ata_taskfile tf;
1460 unsigned int err_mask;
1461
1462 ata_tf_init(dev, &tf);
1463
1464 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1465 tf.command = ATA_CMD_PACKET;
1466 tf.protocol = ATAPI_PROT_NODATA;
1467
1468 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1469 if (err_mask == AC_ERR_DEV)
1470 *r_sense_key = tf.feature >> 4;
1471 return err_mask;
1472}
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1490 u8 *sense_buf, u8 dfl_sense_key)
1491{
1492 u8 cdb[ATAPI_CDB_LEN] =
1493 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1494 struct ata_port *ap = dev->link->ap;
1495 struct ata_taskfile tf;
1496
1497 DPRINTK("ATAPI request sense\n");
1498
1499
1500 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1501
1502
1503
1504
1505 sense_buf[0] = 0x70;
1506 sense_buf[2] = dfl_sense_key;
1507
1508
1509 ata_tf_init(dev, &tf);
1510
1511 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1512 tf.command = ATA_CMD_PACKET;
1513
1514
1515 if (ap->flags & ATA_FLAG_PIO_DMA) {
1516 tf.protocol = ATAPI_PROT_DMA;
1517 tf.feature |= ATAPI_PKT_DMA;
1518 } else {
1519 tf.protocol = ATAPI_PROT_PIO;
1520 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1521 tf.lbah = 0;
1522 }
1523
1524 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1525 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1526}
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538static void ata_eh_analyze_serror(struct ata_link *link)
1539{
1540 struct ata_eh_context *ehc = &link->eh_context;
1541 u32 serror = ehc->i.serror;
1542 unsigned int err_mask = 0, action = 0;
1543 u32 hotplug_mask;
1544
1545 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1546 err_mask |= AC_ERR_ATA_BUS;
1547 action |= ATA_EH_RESET;
1548 }
1549 if (serror & SERR_PROTOCOL) {
1550 err_mask |= AC_ERR_HSM;
1551 action |= ATA_EH_RESET;
1552 }
1553 if (serror & SERR_INTERNAL) {
1554 err_mask |= AC_ERR_SYSTEM;
1555 action |= ATA_EH_RESET;
1556 }
1557
1558
1559
1560
1561
1562
1563 hotplug_mask = 0;
1564
1565 if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1566 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1567 else
1568 hotplug_mask = SERR_PHYRDY_CHG;
1569
1570 if (serror & hotplug_mask)
1571 ata_ehi_hotplugged(&ehc->i);
1572
1573 ehc->i.err_mask |= err_mask;
1574 ehc->i.action |= action;
1575}
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589void ata_eh_analyze_ncq_error(struct ata_link *link)
1590{
1591 struct ata_port *ap = link->ap;
1592 struct ata_eh_context *ehc = &link->eh_context;
1593 struct ata_device *dev = link->device;
1594 struct ata_queued_cmd *qc;
1595 struct ata_taskfile tf;
1596 int tag, rc;
1597
1598
1599 if (ap->pflags & ATA_PFLAG_FROZEN)
1600 return;
1601
1602
1603 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1604 return;
1605
1606
1607 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1608 qc = __ata_qc_from_tag(ap, tag);
1609
1610 if (!(qc->flags & ATA_QCFLAG_FAILED))
1611 continue;
1612
1613 if (qc->err_mask)
1614 return;
1615 }
1616
1617
1618 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1619 if (rc) {
1620 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1621 "(errno=%d)\n", rc);
1622 return;
1623 }
1624
1625 if (!(link->sactive & (1 << tag))) {
1626 ata_link_printk(link, KERN_ERR, "log page 10h reported "
1627 "inactive tag %d\n", tag);
1628 return;
1629 }
1630
1631
1632 qc = __ata_qc_from_tag(ap, tag);
1633 memcpy(&qc->result_tf, &tf, sizeof(tf));
1634 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1635 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1636 ehc->i.err_mask &= ~AC_ERR_DEV;
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1655 const struct ata_taskfile *tf)
1656{
1657 unsigned int tmp, action = 0;
1658 u8 stat = tf->command, err = tf->feature;
1659
1660 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1661 qc->err_mask |= AC_ERR_HSM;
1662 return ATA_EH_RESET;
1663 }
1664
1665 if (stat & (ATA_ERR | ATA_DF))
1666 qc->err_mask |= AC_ERR_DEV;
1667 else
1668 return 0;
1669
1670 switch (qc->dev->class) {
1671 case ATA_DEV_ATA:
1672 if (err & ATA_ICRC)
1673 qc->err_mask |= AC_ERR_ATA_BUS;
1674 if (err & ATA_UNC)
1675 qc->err_mask |= AC_ERR_MEDIA;
1676 if (err & ATA_IDNF)
1677 qc->err_mask |= AC_ERR_INVALID;
1678 break;
1679
1680 case ATA_DEV_ATAPI:
1681 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1682 tmp = atapi_eh_request_sense(qc->dev,
1683 qc->scsicmd->sense_buffer,
1684 qc->result_tf.feature >> 4);
1685 if (!tmp) {
1686
1687
1688
1689
1690
1691
1692
1693 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1694 } else
1695 qc->err_mask |= tmp;
1696 }
1697 }
1698
1699 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1700 action |= ATA_EH_RESET;
1701
1702 return action;
1703}
1704
1705static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1706 int *xfer_ok)
1707{
1708 int base = 0;
1709
1710 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1711 *xfer_ok = 1;
1712
1713 if (!*xfer_ok)
1714 base = ATA_ECAT_DUBIOUS_NONE;
1715
1716 if (err_mask & AC_ERR_ATA_BUS)
1717 return base + ATA_ECAT_ATA_BUS;
1718
1719 if (err_mask & AC_ERR_TIMEOUT)
1720 return base + ATA_ECAT_TOUT_HSM;
1721
1722 if (eflags & ATA_EFLAG_IS_IO) {
1723 if (err_mask & AC_ERR_HSM)
1724 return base + ATA_ECAT_TOUT_HSM;
1725 if ((err_mask &
1726 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1727 return base + ATA_ECAT_UNK_DEV;
1728 }
1729
1730 return 0;
1731}
1732
1733struct speed_down_verdict_arg {
1734 u64 since;
1735 int xfer_ok;
1736 int nr_errors[ATA_ECAT_NR];
1737};
1738
1739static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1740{
1741 struct speed_down_verdict_arg *arg = void_arg;
1742 int cat;
1743
1744 if (ent->timestamp < arg->since)
1745 return -1;
1746
1747 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1748 &arg->xfer_ok);
1749 arg->nr_errors[cat]++;
1750
1751 return 0;
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1812{
1813 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1814 u64 j64 = get_jiffies_64();
1815 struct speed_down_verdict_arg arg;
1816 unsigned int verdict = 0;
1817
1818
1819 memset(&arg, 0, sizeof(arg));
1820 arg.since = j64 - min(j64, j5mins);
1821 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1822
1823 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1824 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1825 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1826 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1827
1828 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1829 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1830 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1831
1832 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1833 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1834 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1835 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1836
1837
1838 memset(&arg, 0, sizeof(arg));
1839 arg.since = j64 - min(j64, j10mins);
1840 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1841
1842 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1843 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1844 verdict |= ATA_EH_SPDN_NCQ_OFF;
1845
1846 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1847 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1848 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1849 verdict |= ATA_EH_SPDN_SPEED_DOWN;
1850
1851 return verdict;
1852}
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871static unsigned int ata_eh_speed_down(struct ata_device *dev,
1872 unsigned int eflags, unsigned int err_mask)
1873{
1874 struct ata_link *link = ata_dev_phys_link(dev);
1875 int xfer_ok = 0;
1876 unsigned int verdict;
1877 unsigned int action = 0;
1878
1879
1880 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1881 return 0;
1882
1883
1884 ata_ering_record(&dev->ering, eflags, err_mask);
1885 verdict = ata_eh_speed_down_verdict(dev);
1886
1887
1888 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1889 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1890 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1891 dev->flags |= ATA_DFLAG_NCQ_OFF;
1892 ata_dev_printk(dev, KERN_WARNING,
1893 "NCQ disabled due to excessive errors\n");
1894 goto done;
1895 }
1896
1897
1898 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1899
1900 if (sata_down_spd_limit(link, 0) == 0) {
1901 action |= ATA_EH_RESET;
1902 goto done;
1903 }
1904
1905
1906 if (dev->spdn_cnt < 2) {
1907 static const int dma_dnxfer_sel[] =
1908 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1909 static const int pio_dnxfer_sel[] =
1910 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1911 int sel;
1912
1913 if (dev->xfer_shift != ATA_SHIFT_PIO)
1914 sel = dma_dnxfer_sel[dev->spdn_cnt];
1915 else
1916 sel = pio_dnxfer_sel[dev->spdn_cnt];
1917
1918 dev->spdn_cnt++;
1919
1920 if (ata_down_xfermask_limit(dev, sel) == 0) {
1921 action |= ATA_EH_RESET;
1922 goto done;
1923 }
1924 }
1925 }
1926
1927
1928
1929
1930 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1931 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
1932 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1933 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1934 dev->spdn_cnt = 0;
1935 action |= ATA_EH_RESET;
1936 goto done;
1937 }
1938 }
1939
1940 return 0;
1941 done:
1942
1943 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1944 ata_ering_clear(&dev->ering);
1945 return action;
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959static void ata_eh_link_autopsy(struct ata_link *link)
1960{
1961 struct ata_port *ap = link->ap;
1962 struct ata_eh_context *ehc = &link->eh_context;
1963 struct ata_device *dev;
1964 unsigned int all_err_mask = 0, eflags = 0;
1965 int tag;
1966 u32 serror;
1967 int rc;
1968
1969 DPRINTK("ENTER\n");
1970
1971 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1972 return;
1973
1974
1975 rc = sata_scr_read(link, SCR_ERROR, &serror);
1976 if (rc == 0) {
1977 ehc->i.serror |= serror;
1978 ata_eh_analyze_serror(link);
1979 } else if (rc != -EOPNOTSUPP) {
1980
1981 ehc->i.probe_mask |= ATA_ALL_DEVICES;
1982 ehc->i.action |= ATA_EH_RESET;
1983 ehc->i.err_mask |= AC_ERR_OTHER;
1984 }
1985
1986
1987 ata_eh_analyze_ncq_error(link);
1988
1989
1990 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1991 ehc->i.err_mask &= ~AC_ERR_OTHER;
1992
1993 all_err_mask |= ehc->i.err_mask;
1994
1995 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1996 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1997
1998 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
1999 ata_dev_phys_link(qc->dev) != link)
2000 continue;
2001
2002
2003 qc->err_mask |= ehc->i.err_mask;
2004
2005
2006 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2007
2008
2009 if (qc->err_mask & AC_ERR_ATA_BUS)
2010 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2011 AC_ERR_INVALID);
2012
2013
2014 if (qc->err_mask & ~AC_ERR_OTHER)
2015 qc->err_mask &= ~AC_ERR_OTHER;
2016
2017
2018 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2019 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2020
2021
2022 if (!(qc->err_mask & AC_ERR_INVALID) &&
2023 ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV))
2024 qc->flags |= ATA_QCFLAG_RETRY;
2025
2026
2027 ehc->i.dev = qc->dev;
2028 all_err_mask |= qc->err_mask;
2029 if (qc->flags & ATA_QCFLAG_IO)
2030 eflags |= ATA_EFLAG_IS_IO;
2031 }
2032
2033
2034 if (ap->pflags & ATA_PFLAG_FROZEN ||
2035 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2036 ehc->i.action |= ATA_EH_RESET;
2037 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2038 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2039 ehc->i.action |= ATA_EH_REVALIDATE;
2040
2041
2042
2043
2044 if (ehc->i.dev) {
2045 ehc->i.dev_action[ehc->i.dev->devno] |=
2046 ehc->i.action & ATA_EH_PERDEV_MASK;
2047 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2048 }
2049
2050
2051 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2052 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2053
2054
2055 dev = ehc->i.dev;
2056 if (!dev && ((ata_link_max_devices(link) == 1 &&
2057 ata_dev_enabled(link->device))))
2058 dev = link->device;
2059
2060 if (dev) {
2061 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2062 eflags |= ATA_EFLAG_DUBIOUS_XFER;
2063 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2064 }
2065
2066 DPRINTK("EXIT\n");
2067}
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079void ata_eh_autopsy(struct ata_port *ap)
2080{
2081 struct ata_link *link;
2082
2083 ata_for_each_link(link, ap, EDGE)
2084 ata_eh_link_autopsy(link);
2085
2086
2087
2088
2089
2090 if (ap->slave_link) {
2091 struct ata_eh_context *mehc = &ap->link.eh_context;
2092 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2093
2094
2095 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2096
2097
2098 ata_eh_link_autopsy(ap->slave_link);
2099
2100
2101 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2102 mehc->i.action |= sehc->i.action;
2103 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2104 mehc->i.flags |= sehc->i.flags;
2105 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2106 }
2107
2108
2109
2110
2111 if (sata_pmp_attached(ap))
2112 ata_eh_link_autopsy(&ap->link);
2113}
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125const char *ata_get_cmd_descript(u8 command)
2126{
2127#ifdef CONFIG_ATA_VERBOSE_ERROR
2128 static const struct
2129 {
2130 u8 command;
2131 const char *text;
2132 } cmd_descr[] = {
2133 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2134 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2135 { ATA_CMD_STANDBY, "STANDBY" },
2136 { ATA_CMD_IDLE, "IDLE" },
2137 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2138 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2139 { ATA_CMD_NOP, "NOP" },
2140 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2141 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2142 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2143 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2144 { ATA_CMD_SERVICE, "SERVICE" },
2145 { ATA_CMD_READ, "READ DMA" },
2146 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2147 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2148 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2149 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2150 { ATA_CMD_WRITE, "WRITE DMA" },
2151 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2152 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2153 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2154 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2155 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2156 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2157 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2158 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2159 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2160 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2161 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2162 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2163 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2164 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2165 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2166 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2167 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2168 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2169 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2170 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2171 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2172 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2173 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2174 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2175 { ATA_CMD_SLEEP, "SLEEP" },
2176 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2177 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2178 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2179 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2180 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2181 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2182 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2183 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2184 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2185 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2186 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2187 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2188 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2189 { ATA_CMD_PMP_READ, "READ BUFFER" },
2190 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2191 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2192 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2193 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2194 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2195 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2196 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2197 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2198 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2199 { ATA_CMD_SMART, "SMART" },
2200 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2201 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
2202 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2203 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2204 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2205 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2206 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2207 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2208 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2209 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2210 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2211 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2212 { ATA_CMD_RESTORE, "RECALIBRATE" },
2213 { 0, NULL }
2214 };
2215
2216 unsigned int i;
2217 for (i = 0; cmd_descr[i].text; i++)
2218 if (cmd_descr[i].command == command)
2219 return cmd_descr[i].text;
2220#endif
2221
2222 return NULL;
2223}
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234static void ata_eh_link_report(struct ata_link *link)
2235{
2236 struct ata_port *ap = link->ap;
2237 struct ata_eh_context *ehc = &link->eh_context;
2238 const char *frozen, *desc;
2239 char tries_buf[6];
2240 int tag, nr_failed = 0;
2241
2242 if (ehc->i.flags & ATA_EHI_QUIET)
2243 return;
2244
2245 desc = NULL;
2246 if (ehc->i.desc[0] != '\0')
2247 desc = ehc->i.desc;
2248
2249 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2250 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2251
2252 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2253 ata_dev_phys_link(qc->dev) != link ||
2254 ((qc->flags & ATA_QCFLAG_QUIET) &&
2255 qc->err_mask == AC_ERR_DEV))
2256 continue;
2257 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2258 continue;
2259
2260 nr_failed++;
2261 }
2262
2263 if (!nr_failed && !ehc->i.err_mask)
2264 return;
2265
2266 frozen = "";
2267 if (ap->pflags & ATA_PFLAG_FROZEN)
2268 frozen = " frozen";
2269
2270 memset(tries_buf, 0, sizeof(tries_buf));
2271 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2272 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2273 ap->eh_tries);
2274
2275 if (ehc->i.dev) {
2276 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2277 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2278 ehc->i.err_mask, link->sactive, ehc->i.serror,
2279 ehc->i.action, frozen, tries_buf);
2280 if (desc)
2281 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2282 } else {
2283 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2284 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2285 ehc->i.err_mask, link->sactive, ehc->i.serror,
2286 ehc->i.action, frozen, tries_buf);
2287 if (desc)
2288 ata_link_printk(link, KERN_ERR, "%s\n", desc);
2289 }
2290
2291#ifdef CONFIG_ATA_VERBOSE_ERROR
2292 if (ehc->i.serror)
2293 ata_link_printk(link, KERN_ERR,
2294 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2295 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2296 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2297 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2298 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2299 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2300 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2301 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2302 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2303 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2304 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2305 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2306 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2307 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2308 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2309 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2310 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2311 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2312#endif
2313
2314 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2315 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2316 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2317 const u8 *cdb = qc->cdb;
2318 char data_buf[20] = "";
2319 char cdb_buf[70] = "";
2320
2321 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2322 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2323 continue;
2324
2325 if (qc->dma_dir != DMA_NONE) {
2326 static const char *dma_str[] = {
2327 [DMA_BIDIRECTIONAL] = "bidi",
2328 [DMA_TO_DEVICE] = "out",
2329 [DMA_FROM_DEVICE] = "in",
2330 };
2331 static const char *prot_str[] = {
2332 [ATA_PROT_PIO] = "pio",
2333 [ATA_PROT_DMA] = "dma",
2334 [ATA_PROT_NCQ] = "ncq",
2335 [ATAPI_PROT_PIO] = "pio",
2336 [ATAPI_PROT_DMA] = "dma",
2337 };
2338
2339 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2340 prot_str[qc->tf.protocol], qc->nbytes,
2341 dma_str[qc->dma_dir]);
2342 }
2343
2344 if (ata_is_atapi(qc->tf.protocol)) {
2345 if (qc->scsicmd)
2346 scsi_print_command(qc->scsicmd);
2347 else
2348 snprintf(cdb_buf, sizeof(cdb_buf),
2349 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2350 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2351 cdb[0], cdb[1], cdb[2], cdb[3],
2352 cdb[4], cdb[5], cdb[6], cdb[7],
2353 cdb[8], cdb[9], cdb[10], cdb[11],
2354 cdb[12], cdb[13], cdb[14], cdb[15]);
2355 } else {
2356 const char *descr = ata_get_cmd_descript(cmd->command);
2357 if (descr)
2358 ata_dev_printk(qc->dev, KERN_ERR,
2359 "failed command: %s\n", descr);
2360 }
2361
2362 ata_dev_printk(qc->dev, KERN_ERR,
2363 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2364 "tag %d%s\n %s"
2365 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2366 "Emask 0x%x (%s)%s\n",
2367 cmd->command, cmd->feature, cmd->nsect,
2368 cmd->lbal, cmd->lbam, cmd->lbah,
2369 cmd->hob_feature, cmd->hob_nsect,
2370 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2371 cmd->device, qc->tag, data_buf, cdb_buf,
2372 res->command, res->feature, res->nsect,
2373 res->lbal, res->lbam, res->lbah,
2374 res->hob_feature, res->hob_nsect,
2375 res->hob_lbal, res->hob_lbam, res->hob_lbah,
2376 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2377 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2378
2379#ifdef CONFIG_ATA_VERBOSE_ERROR
2380 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2381 ATA_ERR)) {
2382 if (res->command & ATA_BUSY)
2383 ata_dev_printk(qc->dev, KERN_ERR,
2384 "status: { Busy }\n");
2385 else
2386 ata_dev_printk(qc->dev, KERN_ERR,
2387 "status: { %s%s%s%s}\n",
2388 res->command & ATA_DRDY ? "DRDY " : "",
2389 res->command & ATA_DF ? "DF " : "",
2390 res->command & ATA_DRQ ? "DRQ " : "",
2391 res->command & ATA_ERR ? "ERR " : "");
2392 }
2393
2394 if (cmd->command != ATA_CMD_PACKET &&
2395 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2396 ATA_ABORTED)))
2397 ata_dev_printk(qc->dev, KERN_ERR,
2398 "error: { %s%s%s%s}\n",
2399 res->feature & ATA_ICRC ? "ICRC " : "",
2400 res->feature & ATA_UNC ? "UNC " : "",
2401 res->feature & ATA_IDNF ? "IDNF " : "",
2402 res->feature & ATA_ABORTED ? "ABRT " : "");
2403#endif
2404 }
2405}
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416void ata_eh_report(struct ata_port *ap)
2417{
2418 struct ata_link *link;
2419
2420 ata_for_each_link(link, ap, HOST_FIRST)
2421 ata_eh_link_report(link);
2422}
2423
2424static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2425 unsigned int *classes, unsigned long deadline,
2426 bool clear_classes)
2427{
2428 struct ata_device *dev;
2429
2430 if (clear_classes)
2431 ata_for_each_dev(dev, link, ALL)
2432 classes[dev->devno] = ATA_DEV_UNKNOWN;
2433
2434 return reset(link, classes, deadline);
2435}
2436
2437static int ata_eh_followup_srst_needed(struct ata_link *link,
2438 int rc, const unsigned int *classes)
2439{
2440 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2441 return 0;
2442 if (rc == -EAGAIN)
2443 return 1;
2444 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2445 return 1;
2446 return 0;
2447}
2448
2449int ata_eh_reset(struct ata_link *link, int classify,
2450 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2451 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2452{
2453 struct ata_port *ap = link->ap;
2454 struct ata_link *slave = ap->slave_link;
2455 struct ata_eh_context *ehc = &link->eh_context;
2456 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2457 unsigned int *classes = ehc->classes;
2458 unsigned int lflags = link->flags;
2459 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2460 int max_tries = 0, try = 0;
2461 struct ata_link *failed_link;
2462 struct ata_device *dev;
2463 unsigned long deadline, now;
2464 ata_reset_fn_t reset;
2465 unsigned long flags;
2466 u32 sstatus;
2467 int nr_unknown, rc;
2468
2469
2470
2471
2472 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2473 max_tries++;
2474 if (link->flags & ATA_LFLAG_NO_HRST)
2475 hardreset = NULL;
2476 if (link->flags & ATA_LFLAG_NO_SRST)
2477 softreset = NULL;
2478
2479
2480 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2481 now = jiffies;
2482 WARN_ON(time_after(ehc->last_reset, now));
2483 deadline = ata_deadline(ehc->last_reset,
2484 ATA_EH_RESET_COOL_DOWN);
2485 if (time_before(now, deadline))
2486 schedule_timeout_uninterruptible(deadline - now);
2487 }
2488
2489 spin_lock_irqsave(ap->lock, flags);
2490 ap->pflags |= ATA_PFLAG_RESETTING;
2491 spin_unlock_irqrestore(ap->lock, flags);
2492
2493 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2494
2495 ata_for_each_dev(dev, link, ALL) {
2496
2497
2498
2499
2500
2501
2502
2503 dev->pio_mode = XFER_PIO_0;
2504
2505
2506
2507
2508
2509
2510 if (ap->ops->set_piomode)
2511 ap->ops->set_piomode(ap, dev);
2512 }
2513
2514
2515 reset = NULL;
2516 ehc->i.action &= ~ATA_EH_RESET;
2517 if (hardreset) {
2518 reset = hardreset;
2519 ehc->i.action |= ATA_EH_HARDRESET;
2520 } else if (softreset) {
2521 reset = softreset;
2522 ehc->i.action |= ATA_EH_SOFTRESET;
2523 }
2524
2525 if (prereset) {
2526 unsigned long deadline = ata_deadline(jiffies,
2527 ATA_EH_PRERESET_TIMEOUT);
2528
2529 if (slave) {
2530 sehc->i.action &= ~ATA_EH_RESET;
2531 sehc->i.action |= ehc->i.action;
2532 }
2533
2534 rc = prereset(link, deadline);
2535
2536
2537
2538
2539
2540 if (slave && (rc == 0 || rc == -ENOENT)) {
2541 int tmp;
2542
2543 tmp = prereset(slave, deadline);
2544 if (tmp != -ENOENT)
2545 rc = tmp;
2546
2547 ehc->i.action |= sehc->i.action;
2548 }
2549
2550 if (rc) {
2551 if (rc == -ENOENT) {
2552 ata_link_printk(link, KERN_DEBUG,
2553 "port disabled. ignoring.\n");
2554 ehc->i.action &= ~ATA_EH_RESET;
2555
2556 ata_for_each_dev(dev, link, ALL)
2557 classes[dev->devno] = ATA_DEV_NONE;
2558
2559 rc = 0;
2560 } else
2561 ata_link_printk(link, KERN_ERR,
2562 "prereset failed (errno=%d)\n", rc);
2563 goto out;
2564 }
2565
2566
2567
2568
2569 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2570 ata_for_each_dev(dev, link, ALL)
2571 classes[dev->devno] = ATA_DEV_NONE;
2572 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2573 ata_is_host_link(link))
2574 ata_eh_thaw_port(ap);
2575 rc = 0;
2576 goto out;
2577 }
2578 }
2579
2580 retry:
2581
2582
2583
2584 if (ata_is_host_link(link))
2585 ata_eh_freeze_port(ap);
2586
2587 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2588
2589 if (reset) {
2590 if (verbose)
2591 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2592 reset == softreset ? "soft" : "hard");
2593
2594
2595 ehc->last_reset = jiffies;
2596 if (reset == hardreset)
2597 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2598 else
2599 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2600
2601 rc = ata_do_reset(link, reset, classes, deadline, true);
2602 if (rc && rc != -EAGAIN) {
2603 failed_link = link;
2604 goto fail;
2605 }
2606
2607
2608 if (slave && reset == hardreset) {
2609 int tmp;
2610
2611 if (verbose)
2612 ata_link_printk(slave, KERN_INFO,
2613 "hard resetting link\n");
2614
2615 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2616 tmp = ata_do_reset(slave, reset, classes, deadline,
2617 false);
2618 switch (tmp) {
2619 case -EAGAIN:
2620 rc = -EAGAIN;
2621 case 0:
2622 break;
2623 default:
2624 failed_link = slave;
2625 rc = tmp;
2626 goto fail;
2627 }
2628 }
2629
2630
2631 if (reset == hardreset &&
2632 ata_eh_followup_srst_needed(link, rc, classes)) {
2633 reset = softreset;
2634
2635 if (!reset) {
2636 ata_link_printk(link, KERN_ERR,
2637 "follow-up softreset required "
2638 "but no softreset avaliable\n");
2639 failed_link = link;
2640 rc = -EINVAL;
2641 goto fail;
2642 }
2643
2644 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2645 rc = ata_do_reset(link, reset, classes, deadline, true);
2646 if (rc) {
2647 failed_link = link;
2648 goto fail;
2649 }
2650 }
2651 } else {
2652 if (verbose)
2653 ata_link_printk(link, KERN_INFO, "no reset method "
2654 "available, skipping reset\n");
2655 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2656 lflags |= ATA_LFLAG_ASSUME_ATA;
2657 }
2658
2659
2660
2661
2662 ata_for_each_dev(dev, link, ALL) {
2663
2664
2665
2666
2667 dev->pio_mode = XFER_PIO_0;
2668 dev->flags &= ~ATA_DFLAG_SLEEPING;
2669
2670 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2671 continue;
2672
2673
2674 if (lflags & ATA_LFLAG_ASSUME_ATA)
2675 classes[dev->devno] = ATA_DEV_ATA;
2676 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2677 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2678 }
2679
2680
2681 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2682 link->sata_spd = (sstatus >> 4) & 0xf;
2683 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2684 slave->sata_spd = (sstatus >> 4) & 0xf;
2685
2686
2687 if (ata_is_host_link(link))
2688 ata_eh_thaw_port(ap);
2689
2690
2691
2692
2693
2694
2695
2696
2697 if (postreset) {
2698 postreset(link, classes);
2699 if (slave)
2700 postreset(slave, classes);
2701 }
2702
2703
2704
2705
2706
2707
2708
2709 spin_lock_irqsave(link->ap->lock, flags);
2710 memset(&link->eh_info, 0, sizeof(link->eh_info));
2711 if (slave)
2712 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2713 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2714 spin_unlock_irqrestore(link->ap->lock, flags);
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724 nr_unknown = 0;
2725 ata_for_each_dev(dev, link, ALL) {
2726 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2727 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2728 ata_dev_printk(dev, KERN_DEBUG, "link online "
2729 "but device misclassifed\n");
2730 classes[dev->devno] = ATA_DEV_NONE;
2731 nr_unknown++;
2732 }
2733 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2734 if (ata_class_enabled(classes[dev->devno]))
2735 ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2736 "clearing class %d to NONE\n",
2737 classes[dev->devno]);
2738 classes[dev->devno] = ATA_DEV_NONE;
2739 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2740 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2741 "clearing UNKNOWN to NONE\n");
2742 classes[dev->devno] = ATA_DEV_NONE;
2743 }
2744 }
2745
2746 if (classify && nr_unknown) {
2747 if (try < max_tries) {
2748 ata_link_printk(link, KERN_WARNING, "link online but "
2749 "%d devices misclassified, retrying\n",
2750 nr_unknown);
2751 failed_link = link;
2752 rc = -EAGAIN;
2753 goto fail;
2754 }
2755 ata_link_printk(link, KERN_WARNING,
2756 "link online but %d devices misclassified, "
2757 "device detection might fail\n", nr_unknown);
2758 }
2759
2760
2761 ata_eh_done(link, NULL, ATA_EH_RESET);
2762 if (slave)
2763 ata_eh_done(slave, NULL, ATA_EH_RESET);
2764 ehc->last_reset = jiffies;
2765 ehc->i.action |= ATA_EH_REVALIDATE;
2766
2767 rc = 0;
2768 out:
2769
2770 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2771 if (slave)
2772 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2773
2774 spin_lock_irqsave(ap->lock, flags);
2775 ap->pflags &= ~ATA_PFLAG_RESETTING;
2776 spin_unlock_irqrestore(ap->lock, flags);
2777
2778 return rc;
2779
2780 fail:
2781
2782 if (!ata_is_host_link(link) &&
2783 sata_scr_read(link, SCR_STATUS, &sstatus))
2784 rc = -ERESTART;
2785
2786 if (rc == -ERESTART || try >= max_tries)
2787 goto out;
2788
2789 now = jiffies;
2790 if (time_before(now, deadline)) {
2791 unsigned long delta = deadline - now;
2792
2793 ata_link_printk(failed_link, KERN_WARNING,
2794 "reset failed (errno=%d), retrying in %u secs\n",
2795 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2796
2797 while (delta)
2798 delta = schedule_timeout_uninterruptible(delta);
2799 }
2800
2801 if (try == max_tries - 1) {
2802 sata_down_spd_limit(link, 0);
2803 if (slave)
2804 sata_down_spd_limit(slave, 0);
2805 } else if (rc == -EPIPE)
2806 sata_down_spd_limit(failed_link, 0);
2807
2808 if (hardreset)
2809 reset = hardreset;
2810 goto retry;
2811}
2812
2813static inline void ata_eh_pull_park_action(struct ata_port *ap)
2814{
2815 struct ata_link *link;
2816 struct ata_device *dev;
2817 unsigned long flags;
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845 spin_lock_irqsave(ap->lock, flags);
2846 INIT_COMPLETION(ap->park_req_pending);
2847 ata_for_each_link(link, ap, EDGE) {
2848 ata_for_each_dev(dev, link, ALL) {
2849 struct ata_eh_info *ehi = &link->eh_info;
2850
2851 link->eh_context.i.dev_action[dev->devno] |=
2852 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2853 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2854 }
2855 }
2856 spin_unlock_irqrestore(ap->lock, flags);
2857}
2858
2859static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2860{
2861 struct ata_eh_context *ehc = &dev->link->eh_context;
2862 struct ata_taskfile tf;
2863 unsigned int err_mask;
2864
2865 ata_tf_init(dev, &tf);
2866 if (park) {
2867 ehc->unloaded_mask |= 1 << dev->devno;
2868 tf.command = ATA_CMD_IDLEIMMEDIATE;
2869 tf.feature = 0x44;
2870 tf.lbal = 0x4c;
2871 tf.lbam = 0x4e;
2872 tf.lbah = 0x55;
2873 } else {
2874 ehc->unloaded_mask &= ~(1 << dev->devno);
2875 tf.command = ATA_CMD_CHK_POWER;
2876 }
2877
2878 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2879 tf.protocol |= ATA_PROT_NODATA;
2880 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2881 if (park && (err_mask || tf.lbal != 0xc4)) {
2882 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2883 ehc->unloaded_mask &= ~(1 << dev->devno);
2884 }
2885}
2886
2887static int ata_eh_revalidate_and_attach(struct ata_link *link,
2888 struct ata_device **r_failed_dev)
2889{
2890 struct ata_port *ap = link->ap;
2891 struct ata_eh_context *ehc = &link->eh_context;
2892 struct ata_device *dev;
2893 unsigned int new_mask = 0;
2894 unsigned long flags;
2895 int rc = 0;
2896
2897 DPRINTK("ENTER\n");
2898
2899
2900
2901
2902
2903 ata_for_each_dev(dev, link, ALL_REVERSE) {
2904 unsigned int action = ata_eh_dev_action(dev);
2905 unsigned int readid_flags = 0;
2906
2907 if (ehc->i.flags & ATA_EHI_DID_RESET)
2908 readid_flags |= ATA_READID_POSTRESET;
2909
2910 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2911 WARN_ON(dev->class == ATA_DEV_PMP);
2912
2913 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2914 rc = -EIO;
2915 goto err;
2916 }
2917
2918 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2919 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2920 readid_flags);
2921 if (rc)
2922 goto err;
2923
2924 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2925
2926
2927
2928
2929 ehc->i.flags |= ATA_EHI_SETMODE;
2930
2931
2932 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
2933 } else if (dev->class == ATA_DEV_UNKNOWN &&
2934 ehc->tries[dev->devno] &&
2935 ata_class_enabled(ehc->classes[dev->devno])) {
2936
2937
2938
2939
2940
2941
2942 dev->class = ehc->classes[dev->devno];
2943
2944 if (dev->class == ATA_DEV_PMP)
2945 rc = sata_pmp_attach(dev);
2946 else
2947 rc = ata_dev_read_id(dev, &dev->class,
2948 readid_flags, dev->id);
2949
2950
2951 ehc->classes[dev->devno] = dev->class;
2952 dev->class = ATA_DEV_UNKNOWN;
2953
2954 switch (rc) {
2955 case 0:
2956
2957 ata_ering_clear(&dev->ering);
2958 new_mask |= 1 << dev->devno;
2959 break;
2960 case -ENOENT:
2961
2962
2963
2964
2965 ata_eh_thaw_port(ap);
2966 break;
2967 default:
2968 goto err;
2969 }
2970 }
2971 }
2972
2973
2974 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
2975 if (ap->ops->cable_detect)
2976 ap->cbl = ap->ops->cable_detect(ap);
2977 ata_force_cbl(ap);
2978 }
2979
2980
2981
2982
2983 ata_for_each_dev(dev, link, ALL) {
2984 if (!(new_mask & (1 << dev->devno)))
2985 continue;
2986
2987 dev->class = ehc->classes[dev->devno];
2988
2989 if (dev->class == ATA_DEV_PMP)
2990 continue;
2991
2992 ehc->i.flags |= ATA_EHI_PRINTINFO;
2993 rc = ata_dev_configure(dev);
2994 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
2995 if (rc) {
2996 dev->class = ATA_DEV_UNKNOWN;
2997 goto err;
2998 }
2999
3000 spin_lock_irqsave(ap->lock, flags);
3001 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3002 spin_unlock_irqrestore(ap->lock, flags);
3003
3004
3005 ehc->i.flags |= ATA_EHI_SETMODE;
3006 }
3007
3008 return 0;
3009
3010 err:
3011 *r_failed_dev = dev;
3012 DPRINTK("EXIT rc=%d\n", rc);
3013 return rc;
3014}
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3032{
3033 struct ata_port *ap = link->ap;
3034 struct ata_device *dev;
3035 int rc;
3036
3037
3038 ata_for_each_dev(dev, link, ENABLED) {
3039 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3040 struct ata_ering_entry *ent;
3041
3042 ent = ata_ering_top(&dev->ering);
3043 if (ent)
3044 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3045 }
3046 }
3047
3048
3049 if (ap->ops->set_mode)
3050 rc = ap->ops->set_mode(link, r_failed_dev);
3051 else
3052 rc = ata_do_set_mode(link, r_failed_dev);
3053
3054
3055 ata_for_each_dev(dev, link, ENABLED) {
3056 struct ata_eh_context *ehc = &link->eh_context;
3057 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3058 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3059
3060 if (dev->xfer_mode != saved_xfer_mode ||
3061 ata_ncq_enabled(dev) != saved_ncq)
3062 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3063 }
3064
3065 return rc;
3066}
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082static int atapi_eh_clear_ua(struct ata_device *dev)
3083{
3084 int i;
3085
3086 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3087 u8 *sense_buffer = dev->link->ap->sector_buf;
3088 u8 sense_key = 0;
3089 unsigned int err_mask;
3090
3091 err_mask = atapi_eh_tur(dev, &sense_key);
3092 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3093 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
3094 "failed (err_mask=0x%x)\n", err_mask);
3095 return -EIO;
3096 }
3097
3098 if (!err_mask || sense_key != UNIT_ATTENTION)
3099 return 0;
3100
3101 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3102 if (err_mask) {
3103 ata_dev_printk(dev, KERN_WARNING, "failed to clear "
3104 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3105 return -EIO;
3106 }
3107 }
3108
3109 ata_dev_printk(dev, KERN_WARNING,
3110 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
3111
3112 return 0;
3113}
3114
3115static int ata_link_nr_enabled(struct ata_link *link)
3116{
3117 struct ata_device *dev;
3118 int cnt = 0;
3119
3120 ata_for_each_dev(dev, link, ENABLED)
3121 cnt++;
3122 return cnt;
3123}
3124
3125static int ata_link_nr_vacant(struct ata_link *link)
3126{
3127 struct ata_device *dev;
3128 int cnt = 0;
3129
3130 ata_for_each_dev(dev, link, ALL)
3131 if (dev->class == ATA_DEV_UNKNOWN)
3132 cnt++;
3133 return cnt;
3134}
3135
3136static int ata_eh_skip_recovery(struct ata_link *link)
3137{
3138 struct ata_port *ap = link->ap;
3139 struct ata_eh_context *ehc = &link->eh_context;
3140 struct ata_device *dev;
3141
3142
3143 if (link->flags & ATA_LFLAG_DISABLED)
3144 return 1;
3145
3146
3147 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3148 return 0;
3149
3150
3151 if ((ehc->i.action & ATA_EH_RESET) &&
3152 !(ehc->i.flags & ATA_EHI_DID_RESET))
3153 return 0;
3154
3155
3156 ata_for_each_dev(dev, link, ALL) {
3157 if (dev->class == ATA_DEV_UNKNOWN &&
3158 ehc->classes[dev->devno] != ATA_DEV_NONE)
3159 return 0;
3160 }
3161
3162 return 1;
3163}
3164
3165static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3166{
3167 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3168 u64 now = get_jiffies_64();
3169 int *trials = void_arg;
3170
3171 if (ent->timestamp < now - min(now, interval))
3172 return -1;
3173
3174 (*trials)++;
3175 return 0;
3176}
3177
3178static int ata_eh_schedule_probe(struct ata_device *dev)
3179{
3180 struct ata_eh_context *ehc = &dev->link->eh_context;
3181 struct ata_link *link = ata_dev_phys_link(dev);
3182 int trials = 0;
3183
3184 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3185 (ehc->did_probe_mask & (1 << dev->devno)))
3186 return 0;
3187
3188 ata_eh_detach_dev(dev);
3189 ata_dev_init(dev);
3190 ehc->did_probe_mask |= (1 << dev->devno);
3191 ehc->i.action |= ATA_EH_RESET;
3192 ehc->saved_xfer_mode[dev->devno] = 0;
3193 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3209 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3210
3211 if (trials > ATA_EH_PROBE_TRIALS)
3212 sata_down_spd_limit(link, 1);
3213
3214 return 1;
3215}
3216
3217static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3218{
3219 struct ata_eh_context *ehc = &dev->link->eh_context;
3220
3221
3222
3223
3224 if (err != -EAGAIN)
3225 ehc->tries[dev->devno]--;
3226
3227 switch (err) {
3228 case -ENODEV:
3229
3230 ehc->i.probe_mask |= (1 << dev->devno);
3231 case -EINVAL:
3232
3233 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3234 case -EIO:
3235 if (ehc->tries[dev->devno] == 1) {
3236
3237
3238
3239 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3240 if (dev->pio_mode > XFER_PIO_0)
3241 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3242 }
3243 }
3244
3245 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3246
3247 ata_dev_disable(dev);
3248
3249
3250 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3251 ata_eh_detach_dev(dev);
3252
3253
3254 if (ata_eh_schedule_probe(dev)) {
3255 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3256 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3257 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3258 }
3259
3260 return 1;
3261 } else {
3262 ehc->i.action |= ATA_EH_RESET;
3263 return 0;
3264 }
3265}
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3290 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3291 ata_postreset_fn_t postreset,
3292 struct ata_link **r_failed_link)
3293{
3294 struct ata_link *link;
3295 struct ata_device *dev;
3296 int nr_failed_devs;
3297 int rc;
3298 unsigned long flags, deadline;
3299
3300 DPRINTK("ENTER\n");
3301
3302
3303 ata_for_each_link(link, ap, EDGE) {
3304 struct ata_eh_context *ehc = &link->eh_context;
3305
3306
3307 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3308 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3309 spin_lock_irqsave(ap->lock, flags);
3310 link->flags &= ~ATA_LFLAG_DISABLED;
3311 spin_unlock_irqrestore(ap->lock, flags);
3312 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3313 }
3314
3315 ata_for_each_dev(dev, link, ALL) {
3316 if (link->flags & ATA_LFLAG_NO_RETRY)
3317 ehc->tries[dev->devno] = 1;
3318 else
3319 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3320
3321
3322 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3323 ~ATA_EH_PERDEV_MASK;
3324 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3325
3326
3327 if (dev->flags & ATA_DFLAG_DETACH)
3328 ata_eh_detach_dev(dev);
3329
3330
3331 if (!ata_dev_enabled(dev))
3332 ata_eh_schedule_probe(dev);
3333 }
3334 }
3335
3336 retry:
3337 rc = 0;
3338 nr_failed_devs = 0;
3339
3340
3341 if (ap->pflags & ATA_PFLAG_UNLOADING)
3342 goto out;
3343
3344
3345 ata_for_each_link(link, ap, EDGE) {
3346 struct ata_eh_context *ehc = &link->eh_context;
3347
3348
3349 if (ata_eh_skip_recovery(link))
3350 ehc->i.action = 0;
3351
3352 ata_for_each_dev(dev, link, ALL)
3353 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3354 }
3355
3356
3357 ata_for_each_link(link, ap, EDGE) {
3358 struct ata_eh_context *ehc = &link->eh_context;
3359
3360 if (!(ehc->i.action & ATA_EH_RESET))
3361 continue;
3362
3363 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3364 prereset, softreset, hardreset, postreset);
3365 if (rc) {
3366 ata_link_printk(link, KERN_ERR,
3367 "reset failed, giving up\n");
3368 goto out;
3369 }
3370 }
3371
3372 do {
3373 unsigned long now;
3374
3375
3376
3377
3378
3379 ata_eh_pull_park_action(ap);
3380
3381 deadline = jiffies;
3382 ata_for_each_link(link, ap, EDGE) {
3383 ata_for_each_dev(dev, link, ALL) {
3384 struct ata_eh_context *ehc = &link->eh_context;
3385 unsigned long tmp;
3386
3387 if (dev->class != ATA_DEV_ATA)
3388 continue;
3389 if (!(ehc->i.dev_action[dev->devno] &
3390 ATA_EH_PARK))
3391 continue;
3392 tmp = dev->unpark_deadline;
3393 if (time_before(deadline, tmp))
3394 deadline = tmp;
3395 else if (time_before_eq(tmp, jiffies))
3396 continue;
3397 if (ehc->unloaded_mask & (1 << dev->devno))
3398 continue;
3399
3400 ata_eh_park_issue_cmd(dev, 1);
3401 }
3402 }
3403
3404 now = jiffies;
3405 if (time_before_eq(deadline, now))
3406 break;
3407
3408 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3409 deadline - now);
3410 } while (deadline);
3411 ata_for_each_link(link, ap, EDGE) {
3412 ata_for_each_dev(dev, link, ALL) {
3413 if (!(link->eh_context.unloaded_mask &
3414 (1 << dev->devno)))
3415 continue;
3416
3417 ata_eh_park_issue_cmd(dev, 0);
3418 ata_eh_done(link, dev, ATA_EH_PARK);
3419 }
3420 }
3421
3422
3423 ata_for_each_link(link, ap, EDGE) {
3424 struct ata_eh_context *ehc = &link->eh_context;
3425
3426
3427 rc = ata_eh_revalidate_and_attach(link, &dev);
3428 if (rc)
3429 goto dev_fail;
3430
3431
3432 if (link->device->class == ATA_DEV_PMP) {
3433 ehc->i.action = 0;
3434 return 0;
3435 }
3436
3437
3438 if (ehc->i.flags & ATA_EHI_SETMODE) {
3439 rc = ata_set_mode(link, &dev);
3440 if (rc)
3441 goto dev_fail;
3442 ehc->i.flags &= ~ATA_EHI_SETMODE;
3443 }
3444
3445
3446
3447
3448 if (ehc->i.flags & ATA_EHI_DID_RESET) {
3449 ata_for_each_dev(dev, link, ALL) {
3450 if (dev->class != ATA_DEV_ATAPI)
3451 continue;
3452 rc = atapi_eh_clear_ua(dev);
3453 if (rc)
3454 goto dev_fail;
3455 }
3456 }
3457
3458
3459 if (ehc->i.action & ATA_EH_LPM)
3460 ata_for_each_dev(dev, link, ALL)
3461 ata_dev_enable_pm(dev, ap->pm_policy);
3462
3463
3464 ehc->i.flags = 0;
3465 continue;
3466
3467dev_fail:
3468 nr_failed_devs++;
3469 ata_eh_handle_dev_fail(dev, rc);
3470
3471 if (ap->pflags & ATA_PFLAG_FROZEN) {
3472
3473
3474
3475 if (sata_pmp_attached(ap))
3476 goto out;
3477 break;
3478 }
3479 }
3480
3481 if (nr_failed_devs)
3482 goto retry;
3483
3484 out:
3485 if (rc && r_failed_link)
3486 *r_failed_link = link;
3487
3488 DPRINTK("EXIT, rc=%d\n", rc);
3489 return rc;
3490}
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502void ata_eh_finish(struct ata_port *ap)
3503{
3504 int tag;
3505
3506
3507 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3508 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3509
3510 if (!(qc->flags & ATA_QCFLAG_FAILED))
3511 continue;
3512
3513 if (qc->err_mask) {
3514
3515
3516
3517
3518 if (qc->flags & ATA_QCFLAG_RETRY)
3519 ata_eh_qc_retry(qc);
3520 else
3521 ata_eh_qc_complete(qc);
3522 } else {
3523 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3524 ata_eh_qc_complete(qc);
3525 } else {
3526
3527 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3528 ata_eh_qc_retry(qc);
3529 }
3530 }
3531 }
3532
3533
3534 WARN_ON(ap->nr_active_links);
3535 ap->nr_active_links = 0;
3536}
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3553 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3554 ata_postreset_fn_t postreset)
3555{
3556 struct ata_device *dev;
3557 int rc;
3558
3559 ata_eh_autopsy(ap);
3560 ata_eh_report(ap);
3561
3562 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3563 NULL);
3564 if (rc) {
3565 ata_for_each_dev(dev, &ap->link, ALL)
3566 ata_dev_disable(dev);
3567 }
3568
3569 ata_eh_finish(ap);
3570}
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581void ata_std_error_handler(struct ata_port *ap)
3582{
3583 struct ata_port_operations *ops = ap->ops;
3584 ata_reset_fn_t hardreset = ops->hardreset;
3585
3586
3587 if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
3588 hardreset = NULL;
3589
3590 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3591}
3592
3593#ifdef CONFIG_PM
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603static void ata_eh_handle_port_suspend(struct ata_port *ap)
3604{
3605 unsigned long flags;
3606 int rc = 0;
3607
3608
3609 spin_lock_irqsave(ap->lock, flags);
3610 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3611 ap->pm_mesg.event == PM_EVENT_ON) {
3612 spin_unlock_irqrestore(ap->lock, flags);
3613 return;
3614 }
3615 spin_unlock_irqrestore(ap->lock, flags);
3616
3617 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3618
3619
3620 rc = ata_acpi_on_suspend(ap);
3621 if (rc)
3622 goto out;
3623
3624
3625 ata_eh_freeze_port(ap);
3626
3627 if (ap->ops->port_suspend)
3628 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3629
3630 ata_acpi_set_state(ap, PMSG_SUSPEND);
3631 out:
3632
3633 spin_lock_irqsave(ap->lock, flags);
3634
3635 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3636 if (rc == 0)
3637 ap->pflags |= ATA_PFLAG_SUSPENDED;
3638 else if (ap->pflags & ATA_PFLAG_FROZEN)
3639 ata_port_schedule_eh(ap);
3640
3641 if (ap->pm_result) {
3642 *ap->pm_result = rc;
3643 ap->pm_result = NULL;
3644 }
3645
3646 spin_unlock_irqrestore(ap->lock, flags);
3647
3648 return;
3649}
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660static void ata_eh_handle_port_resume(struct ata_port *ap)
3661{
3662 struct ata_link *link;
3663 struct ata_device *dev;
3664 unsigned long flags;
3665 int rc = 0;
3666
3667
3668 spin_lock_irqsave(ap->lock, flags);
3669 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3670 ap->pm_mesg.event != PM_EVENT_ON) {
3671 spin_unlock_irqrestore(ap->lock, flags);
3672 return;
3673 }
3674 spin_unlock_irqrestore(ap->lock, flags);
3675
3676 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3677
3678
3679
3680
3681
3682
3683
3684
3685 ata_for_each_link(link, ap, HOST_FIRST)
3686 ata_for_each_dev(dev, link, ALL)
3687 ata_ering_clear(&dev->ering);
3688
3689 ata_acpi_set_state(ap, PMSG_ON);
3690
3691 if (ap->ops->port_resume)
3692 rc = ap->ops->port_resume(ap);
3693
3694
3695 ata_acpi_on_resume(ap);
3696
3697
3698 spin_lock_irqsave(ap->lock, flags);
3699 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3700 if (ap->pm_result) {
3701 *ap->pm_result = rc;
3702 ap->pm_result = NULL;
3703 }
3704 spin_unlock_irqrestore(ap->lock, flags);
3705}
3706#endif
3707