1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56#include <linux/completion.h>
57#include <linux/irqflags.h>
58#include "sas.h"
59#include <scsi/libsas.h>
60#include "remote_device.h"
61#include "remote_node_context.h"
62#include "isci.h"
63#include "request.h"
64#include "task.h"
65#include "host.h"
66
67
68
69
70
71
72
73
74
75
76static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
77 enum service_response response,
78 enum exec_status status)
79
80{
81 enum isci_completion_selection disposition;
82
83 disposition = isci_perform_normal_io_completion;
84 disposition = isci_task_set_completion_status(task, response, status,
85 disposition);
86
87
88
89
90 switch (disposition) {
91 case isci_perform_normal_io_completion:
92
93 dev_dbg(&ihost->pdev->dev,
94 "%s: Normal - task = %p, response=%d, "
95 "status=%d\n",
96 __func__, task, response, status);
97
98 task->lldd_task = NULL;
99
100 isci_execpath_callback(ihost, task, task->task_done);
101 break;
102
103 case isci_perform_aborted_io_completion:
104
105
106
107
108 dev_dbg(&ihost->pdev->dev,
109 "%s: Aborted - task = %p, response=%d, "
110 "status=%d\n",
111 __func__, task, response, status);
112 break;
113
114 case isci_perform_error_io_completion:
115
116 dev_dbg(&ihost->pdev->dev,
117 "%s: Error - task = %p, response=%d, "
118 "status=%d\n",
119 __func__, task, response, status);
120
121 isci_execpath_callback(ihost, task, sas_task_abort);
122 break;
123
124 default:
125 dev_dbg(&ihost->pdev->dev,
126 "%s: isci task notification default case!",
127 __func__);
128 sas_task_abort(task);
129 break;
130 }
131}
132
133#define for_each_sas_task(num, task) \
134 for (; num > 0; num--,\
135 task = list_entry(task->list.next, struct sas_task, list))
136
137
138static inline int isci_device_io_ready(struct isci_remote_device *idev,
139 struct sas_task *task)
140{
141 return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
142 (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
143 isci_task_is_ncq_recovery(task))
144 : 0;
145}
146
147
148
149
150
151
152
153
154
155
156int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
157{
158 struct isci_host *ihost = dev_to_ihost(task->dev);
159 struct isci_remote_device *idev;
160 unsigned long flags;
161 bool io_ready;
162 u16 tag;
163
164 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
165
166 for_each_sas_task(num, task) {
167 enum sci_status status = SCI_FAILURE;
168
169 spin_lock_irqsave(&ihost->scic_lock, flags);
170 idev = isci_lookup_device(task->dev);
171 io_ready = isci_device_io_ready(idev, task);
172 tag = isci_alloc_tag(ihost);
173 spin_unlock_irqrestore(&ihost->scic_lock, flags);
174
175 dev_dbg(&ihost->pdev->dev,
176 "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
177 task, num, task->dev, idev, idev ? idev->flags : 0,
178 task->uldd_task);
179
180 if (!idev) {
181 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
182 SAS_DEVICE_UNKNOWN);
183 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
184
185
186
187 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
188 SAS_QUEUE_FULL);
189 } else {
190
191 spin_lock_irqsave(&task->task_state_lock, flags);
192
193 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
194
195 spin_unlock_irqrestore(&task->task_state_lock,
196 flags);
197
198 isci_task_refuse(ihost, task,
199 SAS_TASK_UNDELIVERED,
200 SAM_STAT_TASK_ABORTED);
201 } else {
202 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
203 spin_unlock_irqrestore(&task->task_state_lock, flags);
204
205
206 status = isci_request_execute(ihost, idev, task, tag);
207
208 if (status != SCI_SUCCESS) {
209
210 spin_lock_irqsave(&task->task_state_lock, flags);
211
212 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
213 spin_unlock_irqrestore(&task->task_state_lock, flags);
214
215 if (test_bit(IDEV_GONE, &idev->flags)) {
216
217
218
219
220 isci_task_refuse(ihost, task,
221 SAS_TASK_UNDELIVERED,
222 SAS_DEVICE_UNKNOWN);
223 } else {
224
225
226
227
228
229
230
231
232 isci_task_refuse(ihost, task,
233 SAS_TASK_COMPLETE,
234 SAS_QUEUE_FULL);
235 }
236 }
237 }
238 }
239 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
240 spin_lock_irqsave(&ihost->scic_lock, flags);
241
242
243
244 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
245 spin_unlock_irqrestore(&ihost->scic_lock, flags);
246 }
247 isci_put_device(idev);
248 }
249 return 0;
250}
251
252static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
253{
254 struct isci_tmf *isci_tmf;
255 enum sci_status status;
256
257 if (!test_bit(IREQ_TMF, &ireq->flags))
258 return SCI_FAILURE;
259
260 isci_tmf = isci_request_access_tmf(ireq);
261
262 switch (isci_tmf->tmf_code) {
263
264 case isci_tmf_sata_srst_high:
265 case isci_tmf_sata_srst_low: {
266 struct host_to_dev_fis *fis = &ireq->stp.cmd;
267
268 memset(fis, 0, sizeof(*fis));
269
270 fis->fis_type = 0x27;
271 fis->flags &= ~0x80;
272 fis->flags &= 0xF0;
273 if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
274 fis->control |= ATA_SRST;
275 else
276 fis->control &= ~ATA_SRST;
277 break;
278 }
279
280 default:
281 return SCI_FAILURE;
282 }
283
284
285
286
287 status = sci_task_request_construct_sata(ireq);
288
289 return status;
290}
291
292static struct isci_request *isci_task_request_build(struct isci_host *ihost,
293 struct isci_remote_device *idev,
294 u16 tag, struct isci_tmf *isci_tmf)
295{
296 enum sci_status status = SCI_FAILURE;
297 struct isci_request *ireq = NULL;
298 struct domain_device *dev;
299
300 dev_dbg(&ihost->pdev->dev,
301 "%s: isci_tmf = %p\n", __func__, isci_tmf);
302
303 dev = idev->domain_dev;
304
305
306 ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
307 if (!ireq)
308 return NULL;
309
310
311 status = sci_task_request_construct(ihost, idev, tag,
312 ireq);
313
314 if (status != SCI_SUCCESS) {
315 dev_warn(&ihost->pdev->dev,
316 "%s: sci_task_request_construct failed - "
317 "status = 0x%x\n",
318 __func__,
319 status);
320 return NULL;
321 }
322
323
324 if (dev->dev_type == SAS_END_DEV) {
325 isci_tmf->proto = SAS_PROTOCOL_SSP;
326 status = sci_task_request_construct_ssp(ireq);
327 if (status != SCI_SUCCESS)
328 return NULL;
329 }
330
331 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
332 isci_tmf->proto = SAS_PROTOCOL_SATA;
333 status = isci_sata_management_task_request_build(ireq);
334
335 if (status != SCI_SUCCESS)
336 return NULL;
337 }
338 return ireq;
339}
340
341
342
343
344static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq)
345{
346 struct completion *tmf_completion = NULL;
347 struct completion *req_completion;
348
349
350 ireq->status = dead;
351
352 req_completion = ireq->io_request_completion;
353 ireq->io_request_completion = NULL;
354
355 if (test_bit(IREQ_TMF, &ireq->flags)) {
356
357 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
358
359
360
361
362
363
364
365 if (tmf) {
366 tmf_completion = tmf->complete;
367 tmf->complete = NULL;
368 }
369 ireq->ttype_ptr.tmf_task_ptr = NULL;
370 dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n",
371 __func__, tmf->tmf_code, tmf->io_tag);
372 } else {
373
374
375
376 struct sas_task *task = isci_request_access_task(ireq);
377
378 if (task)
379 task->lldd_task = NULL;
380
381 ireq->ttype_ptr.io_task_ptr = NULL;
382 }
383
384 dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n",
385 ireq->io_tag);
386
387
388 if (req_completion)
389 complete(req_completion);
390
391 if (tmf_completion != NULL)
392 complete(tmf_completion);
393}
394
395static int isci_task_execute_tmf(struct isci_host *ihost,
396 struct isci_remote_device *idev,
397 struct isci_tmf *tmf, unsigned long timeout_ms)
398{
399 DECLARE_COMPLETION_ONSTACK(completion);
400 enum sci_task_status status = SCI_TASK_FAILURE;
401 struct isci_request *ireq;
402 int ret = TMF_RESP_FUNC_FAILED;
403 unsigned long flags;
404 unsigned long timeleft;
405 u16 tag;
406
407 spin_lock_irqsave(&ihost->scic_lock, flags);
408 tag = isci_alloc_tag(ihost);
409 spin_unlock_irqrestore(&ihost->scic_lock, flags);
410
411 if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
412 return ret;
413
414
415
416
417 if (!idev ||
418 (!test_bit(IDEV_IO_READY, &idev->flags) &&
419 !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
420 dev_dbg(&ihost->pdev->dev,
421 "%s: idev = %p not ready (%#lx)\n",
422 __func__,
423 idev, idev ? idev->flags : 0);
424 goto err_tci;
425 } else
426 dev_dbg(&ihost->pdev->dev,
427 "%s: idev = %p\n",
428 __func__, idev);
429
430
431 tmf->complete = &completion;
432 tmf->status = SCI_FAILURE_TIMEOUT;
433
434 ireq = isci_task_request_build(ihost, idev, tag, tmf);
435 if (!ireq)
436 goto err_tci;
437
438 spin_lock_irqsave(&ihost->scic_lock, flags);
439
440
441 status = sci_controller_start_task(ihost, idev, ireq);
442
443 if (status != SCI_TASK_SUCCESS) {
444 dev_dbg(&ihost->pdev->dev,
445 "%s: start_io failed - status = 0x%x, request = %p\n",
446 __func__,
447 status,
448 ireq);
449 spin_unlock_irqrestore(&ihost->scic_lock, flags);
450 goto err_tci;
451 }
452
453 if (tmf->cb_state_func != NULL)
454 tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
455
456 isci_request_change_state(ireq, started);
457
458
459 list_add(&ireq->dev_node, &idev->reqs_in_process);
460
461 spin_unlock_irqrestore(&ihost->scic_lock, flags);
462
463
464 timeleft = wait_for_completion_timeout(&completion,
465 msecs_to_jiffies(timeout_ms));
466
467 if (timeleft == 0) {
468
469
470
471 spin_lock_irqsave(&ihost->scic_lock, flags);
472
473 if (tmf->cb_state_func != NULL)
474 tmf->cb_state_func(isci_tmf_timed_out, tmf,
475 tmf->cb_data);
476
477 sci_controller_terminate_request(ihost, idev, ireq);
478
479 spin_unlock_irqrestore(&ihost->scic_lock, flags);
480
481 timeleft = wait_for_completion_timeout(
482 &completion,
483 msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
484
485 if (!timeleft) {
486
487
488
489 spin_lock_irqsave(&ihost->scic_lock, flags);
490
491
492 if (tmf->status == SCI_FAILURE_TIMEOUT)
493 isci_request_mark_zombie(ihost, ireq);
494
495 spin_unlock_irqrestore(&ihost->scic_lock, flags);
496 }
497 }
498
499 isci_print_tmf(tmf);
500
501 if (tmf->status == SCI_SUCCESS)
502 ret = TMF_RESP_FUNC_COMPLETE;
503 else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
504 dev_dbg(&ihost->pdev->dev,
505 "%s: tmf.status == "
506 "SCI_FAILURE_IO_RESPONSE_VALID\n",
507 __func__);
508 ret = TMF_RESP_FUNC_COMPLETE;
509 }
510
511
512 dev_dbg(&ihost->pdev->dev,
513 "%s: completed request = %p\n",
514 __func__,
515 ireq);
516
517 return ret;
518
519 err_tci:
520 spin_lock_irqsave(&ihost->scic_lock, flags);
521 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
522 spin_unlock_irqrestore(&ihost->scic_lock, flags);
523
524 return ret;
525}
526
527static void isci_task_build_tmf(struct isci_tmf *tmf,
528 enum isci_tmf_function_codes code,
529 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
530 struct isci_tmf *,
531 void *),
532 void *cb_data)
533{
534 memset(tmf, 0, sizeof(*tmf));
535
536 tmf->tmf_code = code;
537 tmf->cb_state_func = tmf_sent_cb;
538 tmf->cb_data = cb_data;
539}
540
541static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
542 enum isci_tmf_function_codes code,
543 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
544 struct isci_tmf *,
545 void *),
546 struct isci_request *old_request)
547{
548 isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
549 tmf->io_tag = old_request->io_tag;
550}
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567static enum isci_request_status isci_task_validate_request_to_abort(
568 struct isci_request *isci_request,
569 struct isci_host *isci_host,
570 struct isci_remote_device *isci_device,
571 struct completion *aborted_io_completion)
572{
573 enum isci_request_status old_state = unallocated;
574
575
576
577
578 if (isci_request && !list_empty(&isci_request->dev_node)) {
579 old_state = isci_request_change_started_to_aborted(
580 isci_request, aborted_io_completion);
581
582 }
583
584 return old_state;
585}
586
587static int isci_request_is_dealloc_managed(enum isci_request_status stat)
588{
589 switch (stat) {
590 case aborted:
591 case aborting:
592 case terminating:
593 case completed:
594 case dead:
595 return true;
596 default:
597 return false;
598 }
599}
600
601
602
603
604
605
606
607
608
609
610
611static void isci_terminate_request_core(struct isci_host *ihost,
612 struct isci_remote_device *idev,
613 struct isci_request *isci_request)
614{
615 enum sci_status status = SCI_SUCCESS;
616 bool was_terminated = false;
617 bool needs_cleanup_handling = false;
618 unsigned long flags;
619 unsigned long termination_completed = 1;
620 struct completion *io_request_completion;
621
622 dev_dbg(&ihost->pdev->dev,
623 "%s: device = %p; request = %p\n",
624 __func__, idev, isci_request);
625
626 spin_lock_irqsave(&ihost->scic_lock, flags);
627
628 io_request_completion = isci_request->io_request_completion;
629
630
631
632
633 set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
634
635
636
637
638
639 if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
640 was_terminated = true;
641 needs_cleanup_handling = true;
642 status = sci_controller_terminate_request(ihost,
643 idev,
644 isci_request);
645 }
646 spin_unlock_irqrestore(&ihost->scic_lock, flags);
647
648
649
650
651
652
653 if (status != SCI_SUCCESS) {
654 dev_dbg(&ihost->pdev->dev,
655 "%s: sci_controller_terminate_request"
656 " returned = 0x%x\n",
657 __func__, status);
658
659 isci_request->io_request_completion = NULL;
660
661 } else {
662 if (was_terminated) {
663 dev_dbg(&ihost->pdev->dev,
664 "%s: before completion wait (%p/%p)\n",
665 __func__, isci_request, io_request_completion);
666
667
668 termination_completed
669 = wait_for_completion_timeout(
670 io_request_completion,
671 msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
672
673 if (!termination_completed) {
674
675
676 spin_lock_irqsave(&ihost->scic_lock, flags);
677
678
679 if (!test_bit(IREQ_TERMINATED,
680 &isci_request->flags)) {
681
682
683
684
685
686 isci_request_mark_zombie(ihost,
687 isci_request);
688 needs_cleanup_handling = true;
689 } else
690 termination_completed = 1;
691
692 spin_unlock_irqrestore(&ihost->scic_lock,
693 flags);
694
695 if (!termination_completed) {
696
697 dev_dbg(&ihost->pdev->dev,
698 "%s: *** Timeout waiting for "
699 "termination(%p/%p)\n",
700 __func__, io_request_completion,
701 isci_request);
702
703
704
705
706
707 isci_request = NULL;
708 }
709 }
710 if (termination_completed)
711 dev_dbg(&ihost->pdev->dev,
712 "%s: after completion wait (%p/%p)\n",
713 __func__, isci_request, io_request_completion);
714 }
715
716 if (termination_completed) {
717
718 isci_request->io_request_completion = NULL;
719
720
721
722
723
724 spin_lock_irqsave(&isci_request->state_lock, flags);
725
726 needs_cleanup_handling
727 = isci_request_is_dealloc_managed(
728 isci_request->status);
729
730 spin_unlock_irqrestore(&isci_request->state_lock, flags);
731
732 }
733 if (needs_cleanup_handling) {
734
735 dev_dbg(&ihost->pdev->dev,
736 "%s: cleanup isci_device=%p, request=%p\n",
737 __func__, idev, isci_request);
738
739 if (isci_request != NULL) {
740 spin_lock_irqsave(&ihost->scic_lock, flags);
741 isci_free_tag(ihost, isci_request->io_tag);
742 isci_request_change_state(isci_request, unallocated);
743 list_del_init(&isci_request->dev_node);
744 spin_unlock_irqrestore(&ihost->scic_lock, flags);
745 }
746 }
747 }
748}
749
750
751
752
753
754
755
756
757
758
759
760void isci_terminate_pending_requests(struct isci_host *ihost,
761 struct isci_remote_device *idev)
762{
763 struct completion request_completion;
764 enum isci_request_status old_state;
765 unsigned long flags;
766 LIST_HEAD(list);
767
768 spin_lock_irqsave(&ihost->scic_lock, flags);
769 list_splice_init(&idev->reqs_in_process, &list);
770
771
772 while (!list_empty(&list)) {
773 struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
774
775
776
777
778 old_state = isci_request_change_started_to_newstate(ireq,
779 &request_completion,
780 terminating);
781 switch (old_state) {
782 case started:
783 case completed:
784 case aborting:
785 break;
786 default:
787
788
789
790
791 list_move(&ireq->dev_node, &idev->reqs_in_process);
792 ireq = NULL;
793 break;
794 }
795
796 if (!ireq)
797 continue;
798 spin_unlock_irqrestore(&ihost->scic_lock, flags);
799
800 init_completion(&request_completion);
801
802 dev_dbg(&ihost->pdev->dev,
803 "%s: idev=%p request=%p; task=%p old_state=%d\n",
804 __func__, idev, ireq,
805 (!test_bit(IREQ_TMF, &ireq->flags)
806 ? isci_request_access_task(ireq)
807 : NULL),
808 old_state);
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830 isci_terminate_request_core(ihost, idev, ireq);
831 spin_lock_irqsave(&ihost->scic_lock, flags);
832 }
833 spin_unlock_irqrestore(&ihost->scic_lock, flags);
834}
835
836
837
838
839
840
841
842
843static int isci_task_send_lu_reset_sas(
844 struct isci_host *isci_host,
845 struct isci_remote_device *isci_device,
846 u8 *lun)
847{
848 struct isci_tmf tmf;
849 int ret = TMF_RESP_FUNC_FAILED;
850
851 dev_dbg(&isci_host->pdev->dev,
852 "%s: isci_host = %p, isci_device = %p\n",
853 __func__, isci_host, isci_device);
854
855
856
857
858
859 isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
860
861 #define ISCI_LU_RESET_TIMEOUT_MS 2000
862 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
863
864 if (ret == TMF_RESP_FUNC_COMPLETE)
865 dev_dbg(&isci_host->pdev->dev,
866 "%s: %p: TMF_LU_RESET passed\n",
867 __func__, isci_device);
868 else
869 dev_dbg(&isci_host->pdev->dev,
870 "%s: %p: TMF_LU_RESET failed (%x)\n",
871 __func__, isci_device, ret);
872
873 return ret;
874}
875
876static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
877 struct isci_remote_device *idev, u8 *lun)
878{
879 int ret = TMF_RESP_FUNC_FAILED;
880 struct isci_tmf tmf;
881
882
883 #define ISCI_SRST_TIMEOUT_MS 25000
884 isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
885
886 ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
887
888 if (ret != TMF_RESP_FUNC_COMPLETE) {
889 dev_dbg(&ihost->pdev->dev,
890 "%s: Assert SRST failed (%p) = %x",
891 __func__, idev, ret);
892
893
894
895
896 }
897 return ret;
898}
899
900
901
902
903
904
905
906
907
908
909int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
910{
911 struct isci_host *isci_host = dev_to_ihost(domain_device);
912 struct isci_remote_device *isci_device;
913 unsigned long flags;
914 int ret;
915
916 spin_lock_irqsave(&isci_host->scic_lock, flags);
917 isci_device = isci_lookup_device(domain_device);
918 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
919
920 dev_dbg(&isci_host->pdev->dev,
921 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
922 __func__, domain_device, isci_host, isci_device);
923
924 if (!isci_device) {
925
926 dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__);
927
928 ret = TMF_RESP_FUNC_COMPLETE;
929 goto out;
930 }
931 set_bit(IDEV_EH, &isci_device->flags);
932
933
934 if (sas_protocol_ata(domain_device->tproto)) {
935 ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
936 } else
937 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
938
939
940 if (ret == TMF_RESP_FUNC_COMPLETE)
941
942 isci_terminate_pending_requests(isci_host,
943 isci_device);
944
945 out:
946 isci_put_device(isci_device);
947 return ret;
948}
949
950
951
952int isci_task_clear_nexus_port(struct asd_sas_port *port)
953{
954 return TMF_RESP_FUNC_FAILED;
955}
956
957
958
959int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
960{
961 return TMF_RESP_FUNC_FAILED;
962}
963
964
965
966
967
968
969
970
971
972
973
974
975
976static void isci_abort_task_process_cb(
977 enum isci_tmf_cb_state cb_state,
978 struct isci_tmf *tmf,
979 void *cb_data)
980{
981 struct isci_request *old_request;
982
983 old_request = (struct isci_request *)cb_data;
984
985 dev_dbg(&old_request->isci_host->pdev->dev,
986 "%s: tmf=%p, old_request=%p\n",
987 __func__, tmf, old_request);
988
989 switch (cb_state) {
990
991 case isci_tmf_started:
992
993
994
995
996 if ((old_request->status != aborted)
997 && (old_request->status != completed))
998 dev_dbg(&old_request->isci_host->pdev->dev,
999 "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
1000 __func__, old_request->status, tmf, old_request);
1001 break;
1002
1003 case isci_tmf_timed_out:
1004
1005
1006
1007
1008
1009
1010
1011
1012 isci_request_change_state(old_request, aborting);
1013 break;
1014
1015 default:
1016 dev_dbg(&old_request->isci_host->pdev->dev,
1017 "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
1018 __func__, cb_state, tmf, old_request);
1019 break;
1020 }
1021}
1022
1023
1024
1025
1026
1027
1028
1029
1030int isci_task_abort_task(struct sas_task *task)
1031{
1032 struct isci_host *isci_host = dev_to_ihost(task->dev);
1033 DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
1034 struct isci_request *old_request = NULL;
1035 enum isci_request_status old_state;
1036 struct isci_remote_device *isci_device = NULL;
1037 struct isci_tmf tmf;
1038 int ret = TMF_RESP_FUNC_FAILED;
1039 unsigned long flags;
1040 int perform_termination = 0;
1041
1042
1043
1044
1045
1046
1047 spin_lock_irqsave(&isci_host->scic_lock, flags);
1048 spin_lock(&task->task_state_lock);
1049
1050 old_request = task->lldd_task;
1051
1052
1053 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
1054 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
1055 old_request)
1056 isci_device = isci_lookup_device(task->dev);
1057
1058 spin_unlock(&task->task_state_lock);
1059 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1060
1061 dev_dbg(&isci_host->pdev->dev,
1062 "%s: dev = %p, task = %p, old_request == %p\n",
1063 __func__, isci_device, task, old_request);
1064
1065 if (isci_device)
1066 set_bit(IDEV_EH, &isci_device->flags);
1067
1068
1069
1070
1071
1072 if (!isci_device || !old_request) {
1073
1074
1075
1076
1077
1078 spin_lock_irqsave(&task->task_state_lock, flags);
1079 task->task_state_flags |= SAS_TASK_STATE_DONE;
1080 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1081 SAS_TASK_STATE_PENDING);
1082 spin_unlock_irqrestore(&task->task_state_lock, flags);
1083
1084 ret = TMF_RESP_FUNC_COMPLETE;
1085
1086 dev_dbg(&isci_host->pdev->dev,
1087 "%s: abort task not needed for %p\n",
1088 __func__, task);
1089 goto out;
1090 }
1091
1092 spin_lock_irqsave(&isci_host->scic_lock, flags);
1093
1094
1095
1096
1097
1098 old_state = isci_task_validate_request_to_abort(
1099 old_request, isci_host, isci_device,
1100 &aborted_io_completion);
1101 if ((old_state != started) &&
1102 (old_state != completed) &&
1103 (old_state != aborting)) {
1104
1105 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1106
1107
1108
1109
1110 dev_dbg(&isci_host->pdev->dev,
1111 "%s: device = %p; old_request %p already being aborted\n",
1112 __func__,
1113 isci_device, old_request);
1114 ret = TMF_RESP_FUNC_COMPLETE;
1115 goto out;
1116 }
1117 if (task->task_proto == SAS_PROTOCOL_SMP ||
1118 sas_protocol_ata(task->task_proto) ||
1119 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1120
1121 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1122
1123 dev_dbg(&isci_host->pdev->dev,
1124 "%s: %s request"
1125 " or complete_in_target (%d), thus no TMF\n",
1126 __func__,
1127 ((task->task_proto == SAS_PROTOCOL_SMP)
1128 ? "SMP"
1129 : (sas_protocol_ata(task->task_proto)
1130 ? "SATA/STP"
1131 : "<other>")
1132 ),
1133 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
1134
1135 if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1136 spin_lock_irqsave(&task->task_state_lock, flags);
1137 task->task_state_flags |= SAS_TASK_STATE_DONE;
1138 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1139 SAS_TASK_STATE_PENDING);
1140 spin_unlock_irqrestore(&task->task_state_lock, flags);
1141 ret = TMF_RESP_FUNC_COMPLETE;
1142 } else {
1143 spin_lock_irqsave(&task->task_state_lock, flags);
1144 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1145 SAS_TASK_STATE_PENDING);
1146 spin_unlock_irqrestore(&task->task_state_lock, flags);
1147 }
1148
1149
1150
1151
1152
1153
1154 perform_termination = 1;
1155
1156 } else {
1157
1158 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
1159 isci_abort_task_process_cb,
1160 old_request);
1161
1162 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1163
1164 #define ISCI_ABORT_TASK_TIMEOUT_MS 500
1165 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
1166 ISCI_ABORT_TASK_TIMEOUT_MS);
1167
1168 if (ret == TMF_RESP_FUNC_COMPLETE)
1169 perform_termination = 1;
1170 else
1171 dev_dbg(&isci_host->pdev->dev,
1172 "%s: isci_task_send_tmf failed\n", __func__);
1173 }
1174 if (perform_termination) {
1175 set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
1176
1177
1178
1179
1180 isci_terminate_request_core(isci_host, isci_device,
1181 old_request);
1182 }
1183
1184
1185 old_request->io_request_completion = NULL;
1186 out:
1187 isci_put_device(isci_device);
1188 return ret;
1189}
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201int isci_task_abort_task_set(
1202 struct domain_device *d_device,
1203 u8 *lun)
1204{
1205 return TMF_RESP_FUNC_FAILED;
1206}
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218int isci_task_clear_aca(
1219 struct domain_device *d_device,
1220 u8 *lun)
1221{
1222 return TMF_RESP_FUNC_FAILED;
1223}
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236int isci_task_clear_task_set(
1237 struct domain_device *d_device,
1238 u8 *lun)
1239{
1240 return TMF_RESP_FUNC_FAILED;
1241}
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256int isci_task_query_task(
1257 struct sas_task *task)
1258{
1259
1260 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1261 return TMF_RESP_FUNC_FAILED;
1262 else
1263 return TMF_RESP_FUNC_SUCC;
1264}
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276void
1277isci_task_request_complete(struct isci_host *ihost,
1278 struct isci_request *ireq,
1279 enum sci_task_status completion_status)
1280{
1281 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1282 struct completion *tmf_complete = NULL;
1283 struct completion *request_complete = ireq->io_request_completion;
1284
1285 dev_dbg(&ihost->pdev->dev,
1286 "%s: request = %p, status=%d\n",
1287 __func__, ireq, completion_status);
1288
1289 isci_request_change_state(ireq, completed);
1290
1291 set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
1292
1293 if (tmf) {
1294 tmf->status = completion_status;
1295
1296 if (tmf->proto == SAS_PROTOCOL_SSP) {
1297 memcpy(&tmf->resp.resp_iu,
1298 &ireq->ssp.rsp,
1299 SSP_RESP_IU_MAX_SIZE);
1300 } else if (tmf->proto == SAS_PROTOCOL_SATA) {
1301 memcpy(&tmf->resp.d2h_fis,
1302 &ireq->stp.rsp,
1303 sizeof(struct dev_to_host_fis));
1304 }
1305
1306 tmf_complete = tmf->complete;
1307 }
1308 sci_controller_complete_io(ihost, ireq->target_device, ireq);
1309
1310
1311
1312 set_bit(IREQ_TERMINATED, &ireq->flags);
1313
1314
1315
1316
1317
1318 if ((ireq->status == completed) ||
1319 !isci_request_is_dealloc_managed(ireq->status)) {
1320 isci_request_change_state(ireq, unallocated);
1321 isci_free_tag(ihost, ireq->io_tag);
1322 list_del_init(&ireq->dev_node);
1323 }
1324
1325
1326 if (request_complete)
1327 complete(request_complete);
1328
1329
1330 if (tmf_complete)
1331 complete(tmf_complete);
1332}
1333
1334static int isci_reset_device(struct isci_host *ihost,
1335 struct isci_remote_device *idev)
1336{
1337 struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
1338 enum sci_status status;
1339 unsigned long flags;
1340 int rc;
1341
1342 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1343
1344 spin_lock_irqsave(&ihost->scic_lock, flags);
1345 status = sci_remote_device_reset(idev);
1346 if (status != SCI_SUCCESS) {
1347 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1348
1349 dev_dbg(&ihost->pdev->dev,
1350 "%s: sci_remote_device_reset(%p) returned %d!\n",
1351 __func__, idev, status);
1352
1353 return TMF_RESP_FUNC_FAILED;
1354 }
1355 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1356
1357 rc = sas_phy_reset(phy, true);
1358
1359
1360 isci_remote_device_nuke_requests(ihost, idev);
1361
1362
1363 spin_lock_irqsave(&ihost->scic_lock, flags);
1364 status = sci_remote_device_reset_complete(idev);
1365 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1366
1367 if (status != SCI_SUCCESS) {
1368 dev_dbg(&ihost->pdev->dev,
1369 "%s: sci_remote_device_reset_complete(%p) "
1370 "returned %d!\n", __func__, idev, status);
1371 }
1372
1373 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
1374
1375 return rc;
1376}
1377
1378int isci_task_I_T_nexus_reset(struct domain_device *dev)
1379{
1380 struct isci_host *ihost = dev_to_ihost(dev);
1381 struct isci_remote_device *idev;
1382 unsigned long flags;
1383 int ret;
1384
1385 spin_lock_irqsave(&ihost->scic_lock, flags);
1386 idev = isci_lookup_device(dev);
1387 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1388
1389 if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
1390 ret = TMF_RESP_FUNC_COMPLETE;
1391 goto out;
1392 }
1393
1394 ret = isci_reset_device(ihost, idev);
1395 out:
1396 isci_put_device(idev);
1397 return ret;
1398}
1399
1400int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1401{
1402 struct domain_device *dev = sdev_to_domain_dev(cmd->device);
1403 struct isci_host *ihost = dev_to_ihost(dev);
1404 struct isci_remote_device *idev;
1405 unsigned long flags;
1406 int ret;
1407
1408 spin_lock_irqsave(&ihost->scic_lock, flags);
1409 idev = isci_lookup_device(dev);
1410 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1411
1412 if (!idev) {
1413 ret = TMF_RESP_FUNC_COMPLETE;
1414 goto out;
1415 }
1416
1417 ret = isci_reset_device(ihost, idev);
1418 out:
1419 isci_put_device(idev);
1420 return ret;
1421}
1422