1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55#include <linux/circ_buf.h>
56#include <linux/device.h>
57#include <scsi/sas.h>
58#include "host.h"
59#include "isci.h"
60#include "port.h"
61#include "probe_roms.h"
62#include "remote_device.h"
63#include "request.h"
64#include "scu_completion_codes.h"
65#include "scu_event_codes.h"
66#include "registers.h"
67#include "scu_remote_node_context.h"
68#include "scu_task_context.h"
69
70#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
71
72#define smu_max_ports(dcc_value) \
73 (\
74 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
75 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
76 )
77
78#define smu_max_task_contexts(dcc_value) \
79 (\
80 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
81 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
82 )
83
84#define smu_max_rncs(dcc_value) \
85 (\
86 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
87 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
88 )
89
90#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
91
92
93
94
95
96
97
98
99#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
100
101
102
103
104
105
106
107#define NORMALIZE_PUT_POINTER(x) \
108 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
109
110
111
112
113
114
115
116
117#define NORMALIZE_EVENT_POINTER(x) \
118 (\
119 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
120 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
121 )
122
123
124
125
126
127
128
129#define NORMALIZE_GET_POINTER(x) \
130 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
131
132
133
134
135
136
137
138#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
139 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
140
141
142
143
144
145
146#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
147
148
149void sci_init_sm(struct sci_base_state_machine *sm,
150 const struct sci_base_state *state_table, u32 initial_state)
151{
152 sci_state_transition_t handler;
153
154 sm->initial_state_id = initial_state;
155 sm->previous_state_id = initial_state;
156 sm->current_state_id = initial_state;
157 sm->state_table = state_table;
158
159 handler = sm->state_table[initial_state].enter_state;
160 if (handler)
161 handler(sm);
162}
163
164
165void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
166{
167 sci_state_transition_t handler;
168
169 handler = sm->state_table[sm->current_state_id].exit_state;
170 if (handler)
171 handler(sm);
172
173 sm->previous_state_id = sm->current_state_id;
174 sm->current_state_id = next_state;
175
176 handler = sm->state_table[sm->current_state_id].enter_state;
177 if (handler)
178 handler(sm);
179}
180
181static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
182{
183 u32 get_value = ihost->completion_queue_get;
184 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
185
186 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
187 COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
188 return true;
189
190 return false;
191}
192
193static bool sci_controller_isr(struct isci_host *ihost)
194{
195 if (sci_controller_completion_queue_has_entries(ihost))
196 return true;
197
198
199
200
201
202 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
203
204
205
206
207
208
209
210 spin_lock(&ihost->scic_lock);
211 if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
212 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
213 writel(0, &ihost->smu_registers->interrupt_mask);
214 }
215 spin_unlock(&ihost->scic_lock);
216
217 return false;
218}
219
220irqreturn_t isci_msix_isr(int vec, void *data)
221{
222 struct isci_host *ihost = data;
223
224 if (sci_controller_isr(ihost))
225 tasklet_schedule(&ihost->completion_tasklet);
226
227 return IRQ_HANDLED;
228}
229
230static bool sci_controller_error_isr(struct isci_host *ihost)
231{
232 u32 interrupt_status;
233
234 interrupt_status =
235 readl(&ihost->smu_registers->interrupt_status);
236 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
237
238 if (interrupt_status != 0) {
239
240
241
242 return true;
243 }
244
245
246
247
248
249
250
251 writel(0xff, &ihost->smu_registers->interrupt_mask);
252 writel(0, &ihost->smu_registers->interrupt_mask);
253
254 return false;
255}
256
257static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
258{
259 u32 index = SCU_GET_COMPLETION_INDEX(ent);
260 struct isci_request *ireq = ihost->reqs[index];
261
262
263 if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
264 ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
265 ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
266
267
268
269 sci_io_request_tc_completion(ireq, ent);
270}
271
272static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
273{
274 u32 index;
275 struct isci_request *ireq;
276 struct isci_remote_device *idev;
277
278 index = SCU_GET_COMPLETION_INDEX(ent);
279
280 switch (scu_get_command_request_type(ent)) {
281 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
282 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
283 ireq = ihost->reqs[index];
284 dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
285 __func__, ent, ireq);
286
287
288
289 break;
290 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
291 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
292 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
293 idev = ihost->device_table[index];
294 dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
295 __func__, ent, idev);
296
297
298
299 break;
300 default:
301 dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
302 __func__, ent);
303 break;
304 }
305}
306
307static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
308{
309 u32 index;
310 u32 frame_index;
311
312 struct scu_unsolicited_frame_header *frame_header;
313 struct isci_phy *iphy;
314 struct isci_remote_device *idev;
315
316 enum sci_status result = SCI_FAILURE;
317
318 frame_index = SCU_GET_FRAME_INDEX(ent);
319
320 frame_header = ihost->uf_control.buffers.array[frame_index].header;
321 ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
322
323 if (SCU_GET_FRAME_ERROR(ent)) {
324
325
326
327
328 sci_controller_release_frame(ihost, frame_index);
329 return;
330 }
331
332 if (frame_header->is_address_frame) {
333 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
334 iphy = &ihost->phys[index];
335 result = sci_phy_frame_handler(iphy, frame_index);
336 } else {
337
338 index = SCU_GET_COMPLETION_INDEX(ent);
339
340 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
341
342
343
344
345 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
346 iphy = &ihost->phys[index];
347 result = sci_phy_frame_handler(iphy, frame_index);
348 } else {
349 if (index < ihost->remote_node_entries)
350 idev = ihost->device_table[index];
351 else
352 idev = NULL;
353
354 if (idev != NULL)
355 result = sci_remote_device_frame_handler(idev, frame_index);
356 else
357 sci_controller_release_frame(ihost, frame_index);
358 }
359 }
360
361 if (result != SCI_SUCCESS) {
362
363
364
365 }
366}
367
368static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
369{
370 struct isci_remote_device *idev;
371 struct isci_request *ireq;
372 struct isci_phy *iphy;
373 u32 index;
374
375 index = SCU_GET_COMPLETION_INDEX(ent);
376
377 switch (scu_get_event_type(ent)) {
378 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
379
380 dev_err(&ihost->pdev->dev,
381 "%s: SCIC Controller 0x%p received SMU command error "
382 "0x%x\n",
383 __func__,
384 ihost,
385 ent);
386 break;
387
388 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
389 case SCU_EVENT_TYPE_SMU_ERROR:
390 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
391
392
393
394 dev_err(&ihost->pdev->dev,
395 "%s: SCIC Controller 0x%p received fatal controller "
396 "event 0x%x\n",
397 __func__,
398 ihost,
399 ent);
400 break;
401
402 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
403 ireq = ihost->reqs[index];
404 sci_io_request_event_handler(ireq, ent);
405 break;
406
407 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
408 switch (scu_get_event_specifier(ent)) {
409 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
410 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
411 ireq = ihost->reqs[index];
412 if (ireq != NULL)
413 sci_io_request_event_handler(ireq, ent);
414 else
415 dev_warn(&ihost->pdev->dev,
416 "%s: SCIC Controller 0x%p received "
417 "event 0x%x for io request object "
418 "that doesnt exist.\n",
419 __func__,
420 ihost,
421 ent);
422
423 break;
424
425 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
426 idev = ihost->device_table[index];
427 if (idev != NULL)
428 sci_remote_device_event_handler(idev, ent);
429 else
430 dev_warn(&ihost->pdev->dev,
431 "%s: SCIC Controller 0x%p received "
432 "event 0x%x for remote device object "
433 "that doesnt exist.\n",
434 __func__,
435 ihost,
436 ent);
437
438 break;
439 }
440 break;
441
442 case SCU_EVENT_TYPE_BROADCAST_CHANGE:
443
444
445
446 case SCU_EVENT_TYPE_ERR_CNT_EVENT:
447
448
449
450 case SCU_EVENT_TYPE_OSSP_EVENT:
451 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
452 iphy = &ihost->phys[index];
453 sci_phy_event_handler(iphy, ent);
454 break;
455
456 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
457 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
458 case SCU_EVENT_TYPE_RNC_OPS_MISC:
459 if (index < ihost->remote_node_entries) {
460 idev = ihost->device_table[index];
461
462 if (idev != NULL)
463 sci_remote_device_event_handler(idev, ent);
464 } else
465 dev_err(&ihost->pdev->dev,
466 "%s: SCIC Controller 0x%p received event 0x%x "
467 "for remote device object 0x%0x that doesnt "
468 "exist.\n",
469 __func__,
470 ihost,
471 ent,
472 index);
473
474 break;
475
476 default:
477 dev_warn(&ihost->pdev->dev,
478 "%s: SCIC Controller received unknown event code %x\n",
479 __func__,
480 ent);
481 break;
482 }
483}
484
485static void sci_controller_process_completions(struct isci_host *ihost)
486{
487 u32 completion_count = 0;
488 u32 ent;
489 u32 get_index;
490 u32 get_cycle;
491 u32 event_get;
492 u32 event_cycle;
493
494 dev_dbg(&ihost->pdev->dev,
495 "%s: completion queue beginning get:0x%08x\n",
496 __func__,
497 ihost->completion_queue_get);
498
499
500 get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
501 get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
502
503 event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
504 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
505
506 while (
507 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
508 == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
509 ) {
510 completion_count++;
511
512 ent = ihost->completion_queue[get_index];
513
514
515 get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
516 (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
517 get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
518
519 dev_dbg(&ihost->pdev->dev,
520 "%s: completion queue entry:0x%08x\n",
521 __func__,
522 ent);
523
524 switch (SCU_GET_COMPLETION_TYPE(ent)) {
525 case SCU_COMPLETION_TYPE_TASK:
526 sci_controller_task_completion(ihost, ent);
527 break;
528
529 case SCU_COMPLETION_TYPE_SDMA:
530 sci_controller_sdma_completion(ihost, ent);
531 break;
532
533 case SCU_COMPLETION_TYPE_UFI:
534 sci_controller_unsolicited_frame(ihost, ent);
535 break;
536
537 case SCU_COMPLETION_TYPE_EVENT:
538 sci_controller_event_completion(ihost, ent);
539 break;
540
541 case SCU_COMPLETION_TYPE_NOTIFY: {
542 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
543 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
544 event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
545
546 sci_controller_event_completion(ihost, ent);
547 break;
548 }
549 default:
550 dev_warn(&ihost->pdev->dev,
551 "%s: SCIC Controller received unknown "
552 "completion type %x\n",
553 __func__,
554 ent);
555 break;
556 }
557 }
558
559
560 if (completion_count > 0) {
561 ihost->completion_queue_get =
562 SMU_CQGR_GEN_BIT(ENABLE) |
563 SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
564 event_cycle |
565 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
566 get_cycle |
567 SMU_CQGR_GEN_VAL(POINTER, get_index);
568
569 writel(ihost->completion_queue_get,
570 &ihost->smu_registers->completion_queue_get);
571
572 }
573
574 dev_dbg(&ihost->pdev->dev,
575 "%s: completion queue ending get:0x%08x\n",
576 __func__,
577 ihost->completion_queue_get);
578
579}
580
581static void sci_controller_error_handler(struct isci_host *ihost)
582{
583 u32 interrupt_status;
584
585 interrupt_status =
586 readl(&ihost->smu_registers->interrupt_status);
587
588 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
589 sci_controller_completion_queue_has_entries(ihost)) {
590
591 sci_controller_process_completions(ihost);
592 writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
593 } else {
594 dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
595 interrupt_status);
596
597 sci_change_state(&ihost->sm, SCIC_FAILED);
598
599 return;
600 }
601
602
603
604
605 writel(0, &ihost->smu_registers->interrupt_mask);
606}
607
608irqreturn_t isci_intx_isr(int vec, void *data)
609{
610 irqreturn_t ret = IRQ_NONE;
611 struct isci_host *ihost = data;
612
613 if (sci_controller_isr(ihost)) {
614 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
615 tasklet_schedule(&ihost->completion_tasklet);
616 ret = IRQ_HANDLED;
617 } else if (sci_controller_error_isr(ihost)) {
618 spin_lock(&ihost->scic_lock);
619 sci_controller_error_handler(ihost);
620 spin_unlock(&ihost->scic_lock);
621 ret = IRQ_HANDLED;
622 }
623
624 return ret;
625}
626
627irqreturn_t isci_error_isr(int vec, void *data)
628{
629 struct isci_host *ihost = data;
630
631 if (sci_controller_error_isr(ihost))
632 sci_controller_error_handler(ihost);
633
634 return IRQ_HANDLED;
635}
636
637
638
639
640
641
642
643
644
645static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
646{
647 if (completion_status != SCI_SUCCESS)
648 dev_info(&ihost->pdev->dev,
649 "controller start timed out, continuing...\n");
650 clear_bit(IHOST_START_PENDING, &ihost->flags);
651 wake_up(&ihost->eventq);
652}
653
654int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
655{
656 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
657 struct isci_host *ihost = ha->lldd_ha;
658
659 if (test_bit(IHOST_START_PENDING, &ihost->flags))
660 return 0;
661
662 sas_drain_work(ha);
663
664 return 1;
665}
666
667
668
669
670
671
672
673
674
675
676
677
678
679static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
680{
681
682 if (!ihost)
683 return 0;
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
700 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
701 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
702}
703
704static void sci_controller_enable_interrupts(struct isci_host *ihost)
705{
706 set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
707 writel(0, &ihost->smu_registers->interrupt_mask);
708}
709
710void sci_controller_disable_interrupts(struct isci_host *ihost)
711{
712 clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
713 writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
714 readl(&ihost->smu_registers->interrupt_mask);
715}
716
717static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
718{
719 u32 port_task_scheduler_value;
720
721 port_task_scheduler_value =
722 readl(&ihost->scu_registers->peg0.ptsg.control);
723 port_task_scheduler_value |=
724 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
725 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
726 writel(port_task_scheduler_value,
727 &ihost->scu_registers->peg0.ptsg.control);
728}
729
730static void sci_controller_assign_task_entries(struct isci_host *ihost)
731{
732 u32 task_assignment;
733
734
735
736
737
738
739 task_assignment =
740 readl(&ihost->smu_registers->task_context_assignment[0]);
741
742 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
743 (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
744 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
745
746 writel(task_assignment,
747 &ihost->smu_registers->task_context_assignment[0]);
748
749}
750
751static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
752{
753 u32 index;
754 u32 completion_queue_control_value;
755 u32 completion_queue_get_value;
756 u32 completion_queue_put_value;
757
758 ihost->completion_queue_get = 0;
759
760 completion_queue_control_value =
761 (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
762 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
763
764 writel(completion_queue_control_value,
765 &ihost->smu_registers->completion_queue_control);
766
767
768
769 completion_queue_get_value = (
770 (SMU_CQGR_GEN_VAL(POINTER, 0))
771 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
772 | (SMU_CQGR_GEN_BIT(ENABLE))
773 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
774 );
775
776 writel(completion_queue_get_value,
777 &ihost->smu_registers->completion_queue_get);
778
779
780 completion_queue_put_value = (
781 (SMU_CQPR_GEN_VAL(POINTER, 0))
782 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
783 );
784
785 writel(completion_queue_put_value,
786 &ihost->smu_registers->completion_queue_put);
787
788
789 for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
790
791
792
793
794 ihost->completion_queue[index] = 0x80000000;
795 }
796}
797
798static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
799{
800 u32 frame_queue_control_value;
801 u32 frame_queue_get_value;
802 u32 frame_queue_put_value;
803
804
805 frame_queue_control_value =
806 SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
807
808 writel(frame_queue_control_value,
809 &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
810
811
812 frame_queue_get_value = (
813 SCU_UFQGP_GEN_VAL(POINTER, 0)
814 | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
815 );
816
817 writel(frame_queue_get_value,
818 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
819
820 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
821 writel(frame_queue_put_value,
822 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
823}
824
825void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
826{
827 if (ihost->sm.current_state_id == SCIC_STARTING) {
828
829
830
831
832 sci_change_state(&ihost->sm, SCIC_READY);
833
834 isci_host_start_complete(ihost, status);
835 }
836}
837
838static bool is_phy_starting(struct isci_phy *iphy)
839{
840 enum sci_phy_states state;
841
842 state = iphy->sm.current_state_id;
843 switch (state) {
844 case SCI_PHY_STARTING:
845 case SCI_PHY_SUB_INITIAL:
846 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
847 case SCI_PHY_SUB_AWAIT_IAF_UF:
848 case SCI_PHY_SUB_AWAIT_SAS_POWER:
849 case SCI_PHY_SUB_AWAIT_SATA_POWER:
850 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
851 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
852 case SCI_PHY_SUB_AWAIT_OSSP_EN:
853 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
854 case SCI_PHY_SUB_FINAL:
855 return true;
856 default:
857 return false;
858 }
859}
860
861bool is_controller_start_complete(struct isci_host *ihost)
862{
863 int i;
864
865 for (i = 0; i < SCI_MAX_PHYS; i++) {
866 struct isci_phy *iphy = &ihost->phys[i];
867 u32 state = iphy->sm.current_state_id;
868
869
870
871
872
873 if (is_port_config_apc(ihost))
874 ;
875 else if (!phy_get_non_dummy_port(iphy))
876 continue;
877
878
879
880
881
882
883
884 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
885 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
886 (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
887 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
888 return false;
889 }
890
891 return true;
892}
893
894
895
896
897
898
899
900
901
902static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
903{
904 struct sci_oem_params *oem = &ihost->oem_parameters;
905 struct isci_phy *iphy;
906 enum sci_status status;
907
908 status = SCI_SUCCESS;
909
910 if (ihost->phy_startup_timer_pending)
911 return status;
912
913 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
914 if (is_controller_start_complete(ihost)) {
915 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
916 sci_del_timer(&ihost->phy_timer);
917 ihost->phy_startup_timer_pending = false;
918 }
919 } else {
920 iphy = &ihost->phys[ihost->next_phy_to_start];
921
922 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
923 if (phy_get_non_dummy_port(iphy) == NULL) {
924 ihost->next_phy_to_start++;
925
926
927
928
929
930
931
932
933
934
935 return sci_controller_start_next_phy(ihost);
936 }
937 }
938
939 status = sci_phy_start(iphy);
940
941 if (status == SCI_SUCCESS) {
942 sci_mod_timer(&ihost->phy_timer,
943 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
944 ihost->phy_startup_timer_pending = true;
945 } else {
946 dev_warn(&ihost->pdev->dev,
947 "%s: Controller stop operation failed "
948 "to stop phy %d because of status "
949 "%d.\n",
950 __func__,
951 ihost->phys[ihost->next_phy_to_start].phy_index,
952 status);
953 }
954
955 ihost->next_phy_to_start++;
956 }
957
958 return status;
959}
960
961static void phy_startup_timeout(struct timer_list *t)
962{
963 struct sci_timer *tmr = from_timer(tmr, t, timer);
964 struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
965 unsigned long flags;
966 enum sci_status status;
967
968 spin_lock_irqsave(&ihost->scic_lock, flags);
969
970 if (tmr->cancel)
971 goto done;
972
973 ihost->phy_startup_timer_pending = false;
974
975 do {
976 status = sci_controller_start_next_phy(ihost);
977 } while (status != SCI_SUCCESS);
978
979done:
980 spin_unlock_irqrestore(&ihost->scic_lock, flags);
981}
982
983static u16 isci_tci_active(struct isci_host *ihost)
984{
985 return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
986}
987
988static enum sci_status sci_controller_start(struct isci_host *ihost,
989 u32 timeout)
990{
991 enum sci_status result;
992 u16 index;
993
994 if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
995 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
996 __func__, ihost->sm.current_state_id);
997 return SCI_FAILURE_INVALID_STATE;
998 }
999
1000
1001 BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
1002 ihost->tci_head = 0;
1003 ihost->tci_tail = 0;
1004 for (index = 0; index < ihost->task_context_entries; index++)
1005 isci_tci_free(ihost, index);
1006
1007
1008 sci_remote_node_table_initialize(&ihost->available_remote_nodes,
1009 ihost->remote_node_entries);
1010
1011
1012
1013
1014
1015 sci_controller_disable_interrupts(ihost);
1016
1017
1018 sci_controller_enable_port_task_scheduler(ihost);
1019
1020
1021 sci_controller_assign_task_entries(ihost);
1022
1023
1024 sci_controller_initialize_completion_queue(ihost);
1025
1026
1027 sci_controller_initialize_unsolicited_frame_queue(ihost);
1028
1029
1030 for (index = 0; index < ihost->logical_port_entries; index++) {
1031 struct isci_port *iport = &ihost->ports[index];
1032
1033 result = sci_port_start(iport);
1034 if (result)
1035 return result;
1036 }
1037
1038 sci_controller_start_next_phy(ihost);
1039
1040 sci_mod_timer(&ihost->timer, timeout);
1041
1042 sci_change_state(&ihost->sm, SCIC_STARTING);
1043
1044 return SCI_SUCCESS;
1045}
1046
1047void isci_host_start(struct Scsi_Host *shost)
1048{
1049 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1050 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1051
1052 set_bit(IHOST_START_PENDING, &ihost->flags);
1053
1054 spin_lock_irq(&ihost->scic_lock);
1055 sci_controller_start(ihost, tmo);
1056 sci_controller_enable_interrupts(ihost);
1057 spin_unlock_irq(&ihost->scic_lock);
1058}
1059
1060static void isci_host_stop_complete(struct isci_host *ihost)
1061{
1062 sci_controller_disable_interrupts(ihost);
1063 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1064 wake_up(&ihost->eventq);
1065}
1066
1067static void sci_controller_completion_handler(struct isci_host *ihost)
1068{
1069
1070 if (sci_controller_completion_queue_has_entries(ihost))
1071 sci_controller_process_completions(ihost);
1072
1073
1074 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
1075
1076 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
1077 writel(0, &ihost->smu_registers->interrupt_mask);
1078}
1079
1080void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
1081{
1082 if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
1083 !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1084 if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
1085
1086 dev_dbg(&ihost->pdev->dev,
1087 "%s: Normal - ireq/task = %p/%p\n",
1088 __func__, ireq, task);
1089 task->lldd_task = NULL;
1090 task->task_done(task);
1091 } else {
1092 dev_dbg(&ihost->pdev->dev,
1093 "%s: Error - ireq/task = %p/%p\n",
1094 __func__, ireq, task);
1095 if (sas_protocol_ata(task->task_proto))
1096 task->lldd_task = NULL;
1097 sas_task_abort(task);
1098 }
1099 } else
1100 task->lldd_task = NULL;
1101
1102 if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
1103 wake_up_all(&ihost->eventq);
1104
1105 if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
1106 isci_free_tag(ihost, ireq->io_tag);
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116void isci_host_completion_routine(unsigned long data)
1117{
1118 struct isci_host *ihost = (struct isci_host *)data;
1119 u16 active;
1120
1121 spin_lock_irq(&ihost->scic_lock);
1122 sci_controller_completion_handler(ihost);
1123 spin_unlock_irq(&ihost->scic_lock);
1124
1125
1126
1127
1128
1129 active = isci_tci_active(ihost) - SCI_MAX_PORTS;
1130
1131
1132
1133
1134
1135 writel(SMU_ICC_GEN_VAL(NUMBER, active) |
1136 SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
1137 &ihost->smu_registers->interrupt_coalesce_control);
1138}
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1159{
1160 if (ihost->sm.current_state_id != SCIC_READY) {
1161 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1162 __func__, ihost->sm.current_state_id);
1163 return SCI_FAILURE_INVALID_STATE;
1164 }
1165
1166 sci_mod_timer(&ihost->timer, timeout);
1167 sci_change_state(&ihost->sm, SCIC_STOPPING);
1168 return SCI_SUCCESS;
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183static enum sci_status sci_controller_reset(struct isci_host *ihost)
1184{
1185 switch (ihost->sm.current_state_id) {
1186 case SCIC_RESET:
1187 case SCIC_READY:
1188 case SCIC_STOPPING:
1189 case SCIC_FAILED:
1190
1191
1192
1193
1194 sci_change_state(&ihost->sm, SCIC_RESETTING);
1195 return SCI_SUCCESS;
1196 default:
1197 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1198 __func__, ihost->sm.current_state_id);
1199 return SCI_FAILURE_INVALID_STATE;
1200 }
1201}
1202
1203static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1204{
1205 u32 index;
1206 enum sci_status status;
1207 enum sci_status phy_status;
1208
1209 status = SCI_SUCCESS;
1210
1211 for (index = 0; index < SCI_MAX_PHYS; index++) {
1212 phy_status = sci_phy_stop(&ihost->phys[index]);
1213
1214 if (phy_status != SCI_SUCCESS &&
1215 phy_status != SCI_FAILURE_INVALID_STATE) {
1216 status = SCI_FAILURE;
1217
1218 dev_warn(&ihost->pdev->dev,
1219 "%s: Controller stop operation failed to stop "
1220 "phy %d because of status %d.\n",
1221 __func__,
1222 ihost->phys[index].phy_index, phy_status);
1223 }
1224 }
1225
1226 return status;
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241void isci_host_deinit(struct isci_host *ihost)
1242{
1243 int i;
1244
1245
1246 for (i = 0; i < isci_gpio_count(ihost); i++)
1247 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
1248
1249 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1250
1251 spin_lock_irq(&ihost->scic_lock);
1252 sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
1253 spin_unlock_irq(&ihost->scic_lock);
1254
1255 wait_for_stop(ihost);
1256
1257
1258
1259
1260
1261
1262 sci_controller_stop_phys(ihost);
1263
1264
1265
1266
1267 writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
1268
1269 spin_lock_irq(&ihost->scic_lock);
1270 sci_controller_reset(ihost);
1271 spin_unlock_irq(&ihost->scic_lock);
1272
1273
1274 for (i = 0; i < ihost->logical_port_entries; i++) {
1275 struct isci_port *iport = &ihost->ports[i];
1276 del_timer_sync(&iport->timer.timer);
1277 }
1278
1279
1280 for (i = 0; i < SCI_MAX_PHYS; i++) {
1281 struct isci_phy *iphy = &ihost->phys[i];
1282 del_timer_sync(&iphy->sata_timer.timer);
1283 }
1284
1285 del_timer_sync(&ihost->port_agent.timer.timer);
1286
1287 del_timer_sync(&ihost->power_control.timer.timer);
1288
1289 del_timer_sync(&ihost->timer.timer);
1290
1291 del_timer_sync(&ihost->phy_timer.timer);
1292}
1293
1294static void __iomem *scu_base(struct isci_host *isci_host)
1295{
1296 struct pci_dev *pdev = isci_host->pdev;
1297 int id = isci_host->id;
1298
1299 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1300}
1301
1302static void __iomem *smu_base(struct isci_host *isci_host)
1303{
1304 struct pci_dev *pdev = isci_host->pdev;
1305 int id = isci_host->id;
1306
1307 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1308}
1309
1310static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1311{
1312 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1313
1314 sci_change_state(&ihost->sm, SCIC_RESET);
1315}
1316
1317static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
1318{
1319 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1320
1321 sci_del_timer(&ihost->timer);
1322}
1323
1324#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1325#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1326#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
1327#define INTERRUPT_COALESCE_NUMBER_MAX 256
1328#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
1329#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348static enum sci_status
1349sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1350 u32 coalesce_number,
1351 u32 coalesce_timeout)
1352{
1353 u8 timeout_encode = 0;
1354 u32 min = 0;
1355 u32 max = 0;
1356
1357
1358 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1359 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399 if (coalesce_timeout == 0)
1400 timeout_encode = 0;
1401 else{
1402
1403 coalesce_timeout = coalesce_timeout * 100;
1404 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
1405 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
1406
1407
1408 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1409 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1410 timeout_encode++) {
1411 if (min <= coalesce_timeout && max > coalesce_timeout)
1412 break;
1413 else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1414 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1415 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1416 break;
1417 else{
1418 timeout_encode++;
1419 break;
1420 }
1421 } else {
1422 max = max * 2;
1423 min = min * 2;
1424 }
1425 }
1426
1427 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1428
1429 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1430 }
1431
1432 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1433 SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1434 &ihost->smu_registers->interrupt_coalesce_control);
1435
1436
1437 ihost->interrupt_coalesce_number = (u16)coalesce_number;
1438 ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
1439
1440 return SCI_SUCCESS;
1441}
1442
1443
1444static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1445{
1446 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1447 u32 val;
1448
1449
1450 val = readl(&ihost->smu_registers->clock_gating_control);
1451 val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
1452 SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
1453 SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
1454 val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
1455 writel(val, &ihost->smu_registers->clock_gating_control);
1456
1457
1458 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1459}
1460
1461static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1462{
1463 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1464
1465
1466 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1467}
1468
1469static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1470{
1471 u32 index;
1472 enum sci_status port_status;
1473 enum sci_status status = SCI_SUCCESS;
1474
1475 for (index = 0; index < ihost->logical_port_entries; index++) {
1476 struct isci_port *iport = &ihost->ports[index];
1477
1478 port_status = sci_port_stop(iport);
1479
1480 if ((port_status != SCI_SUCCESS) &&
1481 (port_status != SCI_FAILURE_INVALID_STATE)) {
1482 status = SCI_FAILURE;
1483
1484 dev_warn(&ihost->pdev->dev,
1485 "%s: Controller stop operation failed to "
1486 "stop port %d because of status %d.\n",
1487 __func__,
1488 iport->logical_port_index,
1489 port_status);
1490 }
1491 }
1492
1493 return status;
1494}
1495
1496static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1497{
1498 u32 index;
1499 enum sci_status status;
1500 enum sci_status device_status;
1501
1502 status = SCI_SUCCESS;
1503
1504 for (index = 0; index < ihost->remote_node_entries; index++) {
1505 if (ihost->device_table[index] != NULL) {
1506
1507 device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1508
1509 if ((device_status != SCI_SUCCESS) &&
1510 (device_status != SCI_FAILURE_INVALID_STATE)) {
1511 dev_warn(&ihost->pdev->dev,
1512 "%s: Controller stop operation failed "
1513 "to stop device 0x%p because of "
1514 "status %d.\n",
1515 __func__,
1516 ihost->device_table[index], device_status);
1517 }
1518 }
1519 }
1520
1521 return status;
1522}
1523
1524static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
1525{
1526 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1527
1528 sci_controller_stop_devices(ihost);
1529 sci_controller_stop_ports(ihost);
1530
1531 if (!sci_controller_has_remote_devices_stopping(ihost))
1532 isci_host_stop_complete(ihost);
1533}
1534
1535static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
1536{
1537 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1538
1539 sci_del_timer(&ihost->timer);
1540}
1541
1542static void sci_controller_reset_hardware(struct isci_host *ihost)
1543{
1544
1545 sci_controller_disable_interrupts(ihost);
1546
1547
1548 writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
1549
1550
1551 udelay(1000);
1552
1553
1554 writel(0x00000000, &ihost->smu_registers->completion_queue_get);
1555
1556
1557 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1558
1559
1560 writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
1561}
1562
1563static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
1564{
1565 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1566
1567 sci_controller_reset_hardware(ihost);
1568 sci_change_state(&ihost->sm, SCIC_RESET);
1569}
1570
1571static const struct sci_base_state sci_controller_state_table[] = {
1572 [SCIC_INITIAL] = {
1573 .enter_state = sci_controller_initial_state_enter,
1574 },
1575 [SCIC_RESET] = {},
1576 [SCIC_INITIALIZING] = {},
1577 [SCIC_INITIALIZED] = {},
1578 [SCIC_STARTING] = {
1579 .exit_state = sci_controller_starting_state_exit,
1580 },
1581 [SCIC_READY] = {
1582 .enter_state = sci_controller_ready_state_enter,
1583 .exit_state = sci_controller_ready_state_exit,
1584 },
1585 [SCIC_RESETTING] = {
1586 .enter_state = sci_controller_resetting_state_enter,
1587 },
1588 [SCIC_STOPPING] = {
1589 .enter_state = sci_controller_stopping_state_enter,
1590 .exit_state = sci_controller_stopping_state_exit,
1591 },
1592 [SCIC_FAILED] = {}
1593};
1594
1595static void controller_timeout(struct timer_list *t)
1596{
1597 struct sci_timer *tmr = from_timer(tmr, t, timer);
1598 struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
1599 struct sci_base_state_machine *sm = &ihost->sm;
1600 unsigned long flags;
1601
1602 spin_lock_irqsave(&ihost->scic_lock, flags);
1603
1604 if (tmr->cancel)
1605 goto done;
1606
1607 if (sm->current_state_id == SCIC_STARTING)
1608 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
1609 else if (sm->current_state_id == SCIC_STOPPING) {
1610 sci_change_state(sm, SCIC_FAILED);
1611 isci_host_stop_complete(ihost);
1612 } else
1613 dev_err(&ihost->pdev->dev,
1614 "%s: Controller timer fired when controller was not "
1615 "in a state being timed.\n",
1616 __func__);
1617
1618done:
1619 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1620}
1621
1622static enum sci_status sci_controller_construct(struct isci_host *ihost,
1623 void __iomem *scu_base,
1624 void __iomem *smu_base)
1625{
1626 u8 i;
1627
1628 sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1629
1630 ihost->scu_registers = scu_base;
1631 ihost->smu_registers = smu_base;
1632
1633 sci_port_configuration_agent_construct(&ihost->port_agent);
1634
1635
1636 for (i = 0; i < SCI_MAX_PORTS; i++)
1637 sci_port_construct(&ihost->ports[i], i, ihost);
1638 sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1639
1640
1641 for (i = 0; i < SCI_MAX_PHYS; i++) {
1642
1643 sci_phy_construct(&ihost->phys[i],
1644 &ihost->ports[SCI_MAX_PORTS], i);
1645 }
1646
1647 ihost->invalid_phy_mask = 0;
1648
1649 sci_init_timer(&ihost->timer, controller_timeout);
1650
1651 return sci_controller_reset(ihost);
1652}
1653
1654int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
1655{
1656 int i;
1657
1658 for (i = 0; i < SCI_MAX_PORTS; i++)
1659 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1660 return -EINVAL;
1661
1662 for (i = 0; i < SCI_MAX_PHYS; i++)
1663 if (oem->phys[i].sas_address.high == 0 &&
1664 oem->phys[i].sas_address.low == 0)
1665 return -EINVAL;
1666
1667 if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1668 for (i = 0; i < SCI_MAX_PHYS; i++)
1669 if (oem->ports[i].phy_mask != 0)
1670 return -EINVAL;
1671 } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1672 u8 phy_mask = 0;
1673
1674 for (i = 0; i < SCI_MAX_PHYS; i++)
1675 phy_mask |= oem->ports[i].phy_mask;
1676
1677 if (phy_mask == 0)
1678 return -EINVAL;
1679 } else
1680 return -EINVAL;
1681
1682 if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT ||
1683 oem->controller.max_concurr_spin_up < 1)
1684 return -EINVAL;
1685
1686 if (oem->controller.do_enable_ssc) {
1687 if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
1688 return -EINVAL;
1689
1690 if (version >= ISCI_ROM_VER_1_1) {
1691 u8 test = oem->controller.ssc_sata_tx_spread_level;
1692
1693 switch (test) {
1694 case 0:
1695 case 2:
1696 case 3:
1697 case 6:
1698 case 7:
1699 break;
1700 default:
1701 return -EINVAL;
1702 }
1703
1704 test = oem->controller.ssc_sas_tx_spread_level;
1705 if (oem->controller.ssc_sas_tx_type == 0) {
1706 switch (test) {
1707 case 0:
1708 case 2:
1709 case 3:
1710 break;
1711 default:
1712 return -EINVAL;
1713 }
1714 } else if (oem->controller.ssc_sas_tx_type == 1) {
1715 switch (test) {
1716 case 0:
1717 case 3:
1718 case 6:
1719 break;
1720 default:
1721 return -EINVAL;
1722 }
1723 }
1724 }
1725 }
1726
1727 return 0;
1728}
1729
1730static u8 max_spin_up(struct isci_host *ihost)
1731{
1732 if (ihost->user_parameters.max_concurr_spinup)
1733 return min_t(u8, ihost->user_parameters.max_concurr_spinup,
1734 MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
1735 else
1736 return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up,
1737 MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
1738}
1739
1740static void power_control_timeout(struct timer_list *t)
1741{
1742 struct sci_timer *tmr = from_timer(tmr, t, timer);
1743 struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
1744 struct isci_phy *iphy;
1745 unsigned long flags;
1746 u8 i;
1747
1748 spin_lock_irqsave(&ihost->scic_lock, flags);
1749
1750 if (tmr->cancel)
1751 goto done;
1752
1753 ihost->power_control.phys_granted_power = 0;
1754
1755 if (ihost->power_control.phys_waiting == 0) {
1756 ihost->power_control.timer_started = false;
1757 goto done;
1758 }
1759
1760 for (i = 0; i < SCI_MAX_PHYS; i++) {
1761
1762 if (ihost->power_control.phys_waiting == 0)
1763 break;
1764
1765 iphy = ihost->power_control.requesters[i];
1766 if (iphy == NULL)
1767 continue;
1768
1769 if (ihost->power_control.phys_granted_power >= max_spin_up(ihost))
1770 break;
1771
1772 ihost->power_control.requesters[i] = NULL;
1773 ihost->power_control.phys_waiting--;
1774 ihost->power_control.phys_granted_power++;
1775 sci_phy_consume_power_handler(iphy);
1776
1777 if (iphy->protocol == SAS_PROTOCOL_SSP) {
1778 u8 j;
1779
1780 for (j = 0; j < SCI_MAX_PHYS; j++) {
1781 struct isci_phy *requester = ihost->power_control.requesters[j];
1782
1783
1784
1785
1786
1787
1788 if (requester != NULL && requester != iphy) {
1789 u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
1790 iphy->frame_rcvd.iaf.sas_addr,
1791 sizeof(requester->frame_rcvd.iaf.sas_addr));
1792
1793 if (other == 0) {
1794 ihost->power_control.requesters[j] = NULL;
1795 ihost->power_control.phys_waiting--;
1796 sci_phy_consume_power_handler(requester);
1797 }
1798 }
1799 }
1800 }
1801 }
1802
1803
1804
1805
1806
1807 sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1808 ihost->power_control.timer_started = true;
1809
1810done:
1811 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1812}
1813
1814void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1815 struct isci_phy *iphy)
1816{
1817 BUG_ON(iphy == NULL);
1818
1819 if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
1820 ihost->power_control.phys_granted_power++;
1821 sci_phy_consume_power_handler(iphy);
1822
1823
1824
1825
1826
1827 if (ihost->power_control.timer_started)
1828 sci_del_timer(&ihost->power_control.timer);
1829
1830 sci_mod_timer(&ihost->power_control.timer,
1831 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1832 ihost->power_control.timer_started = true;
1833
1834 } else {
1835
1836
1837
1838
1839 u8 i;
1840 struct isci_phy *current_phy;
1841
1842 for (i = 0; i < SCI_MAX_PHYS; i++) {
1843 u8 other;
1844 current_phy = &ihost->phys[i];
1845
1846 other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
1847 iphy->frame_rcvd.iaf.sas_addr,
1848 sizeof(current_phy->frame_rcvd.iaf.sas_addr));
1849
1850 if (current_phy->sm.current_state_id == SCI_PHY_READY &&
1851 current_phy->protocol == SAS_PROTOCOL_SSP &&
1852 other == 0) {
1853 sci_phy_consume_power_handler(iphy);
1854 break;
1855 }
1856 }
1857
1858 if (i == SCI_MAX_PHYS) {
1859
1860 ihost->power_control.requesters[iphy->phy_index] = iphy;
1861 ihost->power_control.phys_waiting++;
1862 }
1863 }
1864}
1865
1866void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1867 struct isci_phy *iphy)
1868{
1869 BUG_ON(iphy == NULL);
1870
1871 if (ihost->power_control.requesters[iphy->phy_index])
1872 ihost->power_control.phys_waiting--;
1873
1874 ihost->power_control.requesters[iphy->phy_index] = NULL;
1875}
1876
1877static int is_long_cable(int phy, unsigned char selection_byte)
1878{
1879 return !!(selection_byte & (1 << phy));
1880}
1881
1882static int is_medium_cable(int phy, unsigned char selection_byte)
1883{
1884 return !!(selection_byte & (1 << (phy + 4)));
1885}
1886
1887static enum cable_selections decode_selection_byte(
1888 int phy,
1889 unsigned char selection_byte)
1890{
1891 return ((selection_byte & (1 << phy)) ? 1 : 0)
1892 + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
1893}
1894
1895static unsigned char *to_cable_select(struct isci_host *ihost)
1896{
1897 if (is_cable_select_overridden())
1898 return ((unsigned char *)&cable_selection_override)
1899 + ihost->id;
1900 else
1901 return &ihost->oem_parameters.controller.cable_selection_mask;
1902}
1903
1904enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
1905{
1906 return decode_selection_byte(phy, *to_cable_select(ihost));
1907}
1908
1909char *lookup_cable_names(enum cable_selections selection)
1910{
1911 static char *cable_names[] = {
1912 [short_cable] = "short",
1913 [long_cable] = "long",
1914 [medium_cable] = "medium",
1915 [undefined_cable] = "<undefined, assumed long>"
1916 };
1917 return (selection <= undefined_cable) ? cable_names[selection]
1918 : cable_names[undefined_cable];
1919}
1920
1921#define AFE_REGISTER_WRITE_DELAY 10
1922
1923static void sci_controller_afe_initialization(struct isci_host *ihost)
1924{
1925 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
1926 const struct sci_oem_params *oem = &ihost->oem_parameters;
1927 struct pci_dev *pdev = ihost->pdev;
1928 u32 afe_status;
1929 u32 phy_id;
1930 unsigned char cable_selection_mask = *to_cable_select(ihost);
1931
1932
1933 writel(0x0081000f, &afe->afe_dfx_master_control0);
1934 udelay(AFE_REGISTER_WRITE_DELAY);
1935
1936 if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
1937
1938
1939
1940 writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
1941 udelay(AFE_REGISTER_WRITE_DELAY);
1942 }
1943
1944
1945 if (is_a2(pdev))
1946 writel(0x00005A00, &afe->afe_bias_control);
1947 else if (is_b0(pdev) || is_c0(pdev))
1948 writel(0x00005F00, &afe->afe_bias_control);
1949 else if (is_c1(pdev))
1950 writel(0x00005500, &afe->afe_bias_control);
1951
1952 udelay(AFE_REGISTER_WRITE_DELAY);
1953
1954
1955 if (is_a2(pdev))
1956 writel(0x80040908, &afe->afe_pll_control0);
1957 else if (is_b0(pdev) || is_c0(pdev))
1958 writel(0x80040A08, &afe->afe_pll_control0);
1959 else if (is_c1(pdev)) {
1960 writel(0x80000B08, &afe->afe_pll_control0);
1961 udelay(AFE_REGISTER_WRITE_DELAY);
1962 writel(0x00000B08, &afe->afe_pll_control0);
1963 udelay(AFE_REGISTER_WRITE_DELAY);
1964 writel(0x80000B08, &afe->afe_pll_control0);
1965 }
1966
1967 udelay(AFE_REGISTER_WRITE_DELAY);
1968
1969
1970 do {
1971 afe_status = readl(&afe->afe_common_block_status);
1972 udelay(AFE_REGISTER_WRITE_DELAY);
1973 } while ((afe_status & 0x00001000) == 0);
1974
1975 if (is_a2(pdev)) {
1976
1977
1978
1979 writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
1980 udelay(AFE_REGISTER_WRITE_DELAY);
1981 }
1982
1983 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
1984 struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_id];
1985 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
1986 int cable_length_long =
1987 is_long_cable(phy_id, cable_selection_mask);
1988 int cable_length_medium =
1989 is_medium_cable(phy_id, cable_selection_mask);
1990
1991 if (is_a2(pdev)) {
1992
1993
1994
1995 writel(0x00004512, &xcvr->afe_xcvr_control0);
1996 udelay(AFE_REGISTER_WRITE_DELAY);
1997
1998 writel(0x0050100F, &xcvr->afe_xcvr_control1);
1999 udelay(AFE_REGISTER_WRITE_DELAY);
2000 } else if (is_b0(pdev)) {
2001
2002 writel(0x00030000, &xcvr->afe_tx_ssc_control);
2003 udelay(AFE_REGISTER_WRITE_DELAY);
2004 } else if (is_c0(pdev)) {
2005
2006 writel(0x00010202, &xcvr->afe_tx_ssc_control);
2007 udelay(AFE_REGISTER_WRITE_DELAY);
2008
2009
2010
2011
2012 writel(0x00014500, &xcvr->afe_xcvr_control0);
2013 udelay(AFE_REGISTER_WRITE_DELAY);
2014 } else if (is_c1(pdev)) {
2015
2016 writel(0x00010202, &xcvr->afe_tx_ssc_control);
2017 udelay(AFE_REGISTER_WRITE_DELAY);
2018
2019
2020
2021
2022 writel(0x0001C500, &xcvr->afe_xcvr_control0);
2023 udelay(AFE_REGISTER_WRITE_DELAY);
2024 }
2025
2026
2027
2028
2029 if (is_a2(pdev))
2030 writel(0x000003F0, &xcvr->afe_channel_control);
2031 else if (is_b0(pdev)) {
2032 writel(0x000003D7, &xcvr->afe_channel_control);
2033 udelay(AFE_REGISTER_WRITE_DELAY);
2034
2035 writel(0x000003D4, &xcvr->afe_channel_control);
2036 } else if (is_c0(pdev)) {
2037 writel(0x000001E7, &xcvr->afe_channel_control);
2038 udelay(AFE_REGISTER_WRITE_DELAY);
2039
2040 writel(0x000001E4, &xcvr->afe_channel_control);
2041 } else if (is_c1(pdev)) {
2042 writel(cable_length_long ? 0x000002F7 : 0x000001F7,
2043 &xcvr->afe_channel_control);
2044 udelay(AFE_REGISTER_WRITE_DELAY);
2045
2046 writel(cable_length_long ? 0x000002F4 : 0x000001F4,
2047 &xcvr->afe_channel_control);
2048 }
2049 udelay(AFE_REGISTER_WRITE_DELAY);
2050
2051 if (is_a2(pdev)) {
2052
2053 writel(0x00040000, &xcvr->afe_tx_control);
2054 udelay(AFE_REGISTER_WRITE_DELAY);
2055 }
2056
2057 if (is_a2(pdev) || is_b0(pdev))
2058
2059
2060
2061
2062 writel(0x00004100, &xcvr->afe_xcvr_control0);
2063 else if (is_c0(pdev))
2064 writel(0x00014100, &xcvr->afe_xcvr_control0);
2065 else if (is_c1(pdev))
2066 writel(0x0001C100, &xcvr->afe_xcvr_control0);
2067 udelay(AFE_REGISTER_WRITE_DELAY);
2068
2069
2070 if (is_a2(pdev))
2071 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2072 else if (is_b0(pdev)) {
2073 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2074 udelay(AFE_REGISTER_WRITE_DELAY);
2075
2076 writel(0x00040000, &xcvr->afe_tx_control);
2077 } else if (is_c0(pdev)) {
2078 writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
2079 udelay(AFE_REGISTER_WRITE_DELAY);
2080
2081 writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
2082 udelay(AFE_REGISTER_WRITE_DELAY);
2083
2084
2085 writel(0x00040000, &xcvr->afe_tx_control);
2086 } else if (is_c1(pdev)) {
2087 writel(cable_length_long ? 0x01500C0C :
2088 cable_length_medium ? 0x01400C0D : 0x02400C0D,
2089 &xcvr->afe_xcvr_control1);
2090 udelay(AFE_REGISTER_WRITE_DELAY);
2091
2092 writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
2093 udelay(AFE_REGISTER_WRITE_DELAY);
2094
2095 writel(cable_length_long ? 0x33091C1F :
2096 cable_length_medium ? 0x3315181F : 0x2B17161F,
2097 &xcvr->afe_rx_ssc_control0);
2098 udelay(AFE_REGISTER_WRITE_DELAY);
2099
2100
2101 writel(0x00040000, &xcvr->afe_tx_control);
2102 }
2103
2104 udelay(AFE_REGISTER_WRITE_DELAY);
2105
2106 writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
2107 udelay(AFE_REGISTER_WRITE_DELAY);
2108
2109 writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
2110 udelay(AFE_REGISTER_WRITE_DELAY);
2111
2112 writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
2113 udelay(AFE_REGISTER_WRITE_DELAY);
2114
2115 writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
2116 udelay(AFE_REGISTER_WRITE_DELAY);
2117 }
2118
2119
2120 writel(0x00010f00, &afe->afe_dfx_master_control0);
2121 udelay(AFE_REGISTER_WRITE_DELAY);
2122}
2123
2124static void sci_controller_initialize_power_control(struct isci_host *ihost)
2125{
2126 sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2127
2128 memset(ihost->power_control.requesters, 0,
2129 sizeof(ihost->power_control.requesters));
2130
2131 ihost->power_control.phys_waiting = 0;
2132 ihost->power_control.phys_granted_power = 0;
2133}
2134
2135static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2136{
2137 struct sci_base_state_machine *sm = &ihost->sm;
2138 enum sci_status result = SCI_FAILURE;
2139 unsigned long i, state, val;
2140
2141 if (ihost->sm.current_state_id != SCIC_RESET) {
2142 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2143 __func__, ihost->sm.current_state_id);
2144 return SCI_FAILURE_INVALID_STATE;
2145 }
2146
2147 sci_change_state(sm, SCIC_INITIALIZING);
2148
2149 sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
2150
2151 ihost->next_phy_to_start = 0;
2152 ihost->phy_startup_timer_pending = false;
2153
2154 sci_controller_initialize_power_control(ihost);
2155
2156
2157
2158
2159
2160
2161 sci_controller_afe_initialization(ihost);
2162
2163
2164
2165 writel(0, &ihost->smu_registers->soft_reset_control);
2166
2167
2168
2169
2170 for (i = 100; i >= 1; i--) {
2171 u32 status;
2172
2173
2174 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2175 status = readl(&ihost->smu_registers->control_status);
2176
2177 if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
2178 break;
2179 }
2180 if (i == 0)
2181 goto out;
2182
2183
2184
2185
2186 val = readl(&ihost->smu_registers->device_context_capacity);
2187
2188
2189 ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
2190 ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
2191 ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
2192
2193
2194
2195
2196
2197 for (i = 0; i < ihost->logical_port_entries; i++) {
2198 struct scu_port_task_scheduler_group_registers __iomem
2199 *ptsg = &ihost->scu_registers->peg0.ptsg;
2200
2201 writel(i, &ptsg->protocol_engine[i]);
2202 }
2203
2204
2205 val = readl(&ihost->scu_registers->sdma.pdma_configuration);
2206 val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2207 writel(val, &ihost->scu_registers->sdma.pdma_configuration);
2208
2209 val = readl(&ihost->scu_registers->sdma.cdma_configuration);
2210 val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2211 writel(val, &ihost->scu_registers->sdma.cdma_configuration);
2212
2213
2214
2215
2216
2217 for (i = 0; i < SCI_MAX_PHYS; i++) {
2218 result = sci_phy_initialize(&ihost->phys[i],
2219 &ihost->scu_registers->peg0.pe[i].tl,
2220 &ihost->scu_registers->peg0.pe[i].ll);
2221 if (result != SCI_SUCCESS)
2222 goto out;
2223 }
2224
2225 for (i = 0; i < ihost->logical_port_entries; i++) {
2226 struct isci_port *iport = &ihost->ports[i];
2227
2228 iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
2229 iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
2230 iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2231 }
2232
2233 result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2234
2235 out:
2236
2237 if (result == SCI_SUCCESS)
2238 state = SCIC_INITIALIZED;
2239 else
2240 state = SCIC_FAILED;
2241 sci_change_state(sm, state);
2242
2243 return result;
2244}
2245
2246static int sci_controller_dma_alloc(struct isci_host *ihost)
2247{
2248 struct device *dev = &ihost->pdev->dev;
2249 size_t size;
2250 int i;
2251
2252
2253 if (ihost->completion_queue)
2254 return 0;
2255
2256 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2257 ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
2258 GFP_KERNEL);
2259 if (!ihost->completion_queue)
2260 return -ENOMEM;
2261
2262 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2263 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
2264 GFP_KERNEL);
2265
2266 if (!ihost->remote_node_context_table)
2267 return -ENOMEM;
2268
2269 size = ihost->task_context_entries * sizeof(struct scu_task_context),
2270 ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
2271 GFP_KERNEL);
2272 if (!ihost->task_context_table)
2273 return -ENOMEM;
2274
2275 size = SCI_UFI_TOTAL_SIZE;
2276 ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
2277 if (!ihost->ufi_buf)
2278 return -ENOMEM;
2279
2280 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2281 struct isci_request *ireq;
2282 dma_addr_t dma;
2283
2284 ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
2285 if (!ireq)
2286 return -ENOMEM;
2287
2288 ireq->tc = &ihost->task_context_table[i];
2289 ireq->owning_controller = ihost;
2290 ireq->request_daddr = dma;
2291 ireq->isci_host = ihost;
2292 ihost->reqs[i] = ireq;
2293 }
2294
2295 return 0;
2296}
2297
2298static int sci_controller_mem_init(struct isci_host *ihost)
2299{
2300 int err = sci_controller_dma_alloc(ihost);
2301
2302 if (err)
2303 return err;
2304
2305 writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
2306 writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
2307
2308 writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
2309 writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
2310
2311 writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
2312 writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
2313
2314 sci_unsolicited_frame_control_construct(ihost);
2315
2316
2317
2318
2319
2320 writel(lower_32_bits(ihost->uf_control.headers.physical_address),
2321 &ihost->scu_registers->sdma.uf_header_base_address_lower);
2322 writel(upper_32_bits(ihost->uf_control.headers.physical_address),
2323 &ihost->scu_registers->sdma.uf_header_base_address_upper);
2324
2325 writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
2326 &ihost->scu_registers->sdma.uf_address_table_lower);
2327 writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
2328 &ihost->scu_registers->sdma.uf_address_table_upper);
2329
2330 return 0;
2331}
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341int isci_host_init(struct isci_host *ihost)
2342{
2343 int i, err;
2344 enum sci_status status;
2345
2346 spin_lock_irq(&ihost->scic_lock);
2347 status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
2348 spin_unlock_irq(&ihost->scic_lock);
2349 if (status != SCI_SUCCESS) {
2350 dev_err(&ihost->pdev->dev,
2351 "%s: sci_controller_construct failed - status = %x\n",
2352 __func__,
2353 status);
2354 return -ENODEV;
2355 }
2356
2357 spin_lock_irq(&ihost->scic_lock);
2358 status = sci_controller_initialize(ihost);
2359 spin_unlock_irq(&ihost->scic_lock);
2360 if (status != SCI_SUCCESS) {
2361 dev_warn(&ihost->pdev->dev,
2362 "%s: sci_controller_initialize failed -"
2363 " status = 0x%x\n",
2364 __func__, status);
2365 return -ENODEV;
2366 }
2367
2368 err = sci_controller_mem_init(ihost);
2369 if (err)
2370 return err;
2371
2372
2373 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
2374 for (i = 0; i < isci_gpio_count(ihost); i++)
2375 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
2376 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
2377
2378 return 0;
2379}
2380
2381void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
2382 struct isci_phy *iphy)
2383{
2384 switch (ihost->sm.current_state_id) {
2385 case SCIC_STARTING:
2386 sci_del_timer(&ihost->phy_timer);
2387 ihost->phy_startup_timer_pending = false;
2388 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2389 iport, iphy);
2390 sci_controller_start_next_phy(ihost);
2391 break;
2392 case SCIC_READY:
2393 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2394 iport, iphy);
2395 break;
2396 default:
2397 dev_dbg(&ihost->pdev->dev,
2398 "%s: SCIC Controller linkup event from phy %d in "
2399 "unexpected state %d\n", __func__, iphy->phy_index,
2400 ihost->sm.current_state_id);
2401 }
2402}
2403
2404void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2405 struct isci_phy *iphy)
2406{
2407 switch (ihost->sm.current_state_id) {
2408 case SCIC_STARTING:
2409 case SCIC_READY:
2410 ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
2411 iport, iphy);
2412 break;
2413 default:
2414 dev_dbg(&ihost->pdev->dev,
2415 "%s: SCIC Controller linkdown event from phy %d in "
2416 "unexpected state %d\n",
2417 __func__,
2418 iphy->phy_index,
2419 ihost->sm.current_state_id);
2420 }
2421}
2422
2423bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2424{
2425 u32 index;
2426
2427 for (index = 0; index < ihost->remote_node_entries; index++) {
2428 if ((ihost->device_table[index] != NULL) &&
2429 (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
2430 return true;
2431 }
2432
2433 return false;
2434}
2435
2436void sci_controller_remote_device_stopped(struct isci_host *ihost,
2437 struct isci_remote_device *idev)
2438{
2439 if (ihost->sm.current_state_id != SCIC_STOPPING) {
2440 dev_dbg(&ihost->pdev->dev,
2441 "SCIC Controller 0x%p remote device stopped event "
2442 "from device 0x%p in unexpected state %d\n",
2443 ihost, idev,
2444 ihost->sm.current_state_id);
2445 return;
2446 }
2447
2448 if (!sci_controller_has_remote_devices_stopping(ihost))
2449 isci_host_stop_complete(ihost);
2450}
2451
2452void sci_controller_post_request(struct isci_host *ihost, u32 request)
2453{
2454 dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
2455 __func__, ihost->id, request);
2456
2457 writel(request, &ihost->smu_registers->post_context_port);
2458}
2459
2460struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
2461{
2462 u16 task_index;
2463 u16 task_sequence;
2464
2465 task_index = ISCI_TAG_TCI(io_tag);
2466
2467 if (task_index < ihost->task_context_entries) {
2468 struct isci_request *ireq = ihost->reqs[task_index];
2469
2470 if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
2471 task_sequence = ISCI_TAG_SEQ(io_tag);
2472
2473 if (task_sequence == ihost->io_request_sequence[task_index])
2474 return ireq;
2475 }
2476 }
2477
2478 return NULL;
2479}
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
2496 struct isci_remote_device *idev,
2497 u16 *node_id)
2498{
2499 u16 node_index;
2500 u32 remote_node_count = sci_remote_device_node_count(idev);
2501
2502 node_index = sci_remote_node_table_allocate_remote_node(
2503 &ihost->available_remote_nodes, remote_node_count
2504 );
2505
2506 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2507 ihost->device_table[node_index] = idev;
2508
2509 *node_id = node_index;
2510
2511 return SCI_SUCCESS;
2512 }
2513
2514 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2515}
2516
2517void sci_controller_free_remote_node_context(struct isci_host *ihost,
2518 struct isci_remote_device *idev,
2519 u16 node_id)
2520{
2521 u32 remote_node_count = sci_remote_device_node_count(idev);
2522
2523 if (ihost->device_table[node_id] == idev) {
2524 ihost->device_table[node_id] = NULL;
2525
2526 sci_remote_node_table_release_remote_node_index(
2527 &ihost->available_remote_nodes, remote_node_count, node_id
2528 );
2529 }
2530}
2531
2532void sci_controller_copy_sata_response(void *response_buffer,
2533 void *frame_header,
2534 void *frame_buffer)
2535{
2536
2537 memcpy(response_buffer, frame_header, sizeof(u32));
2538
2539 memcpy(response_buffer + sizeof(u32),
2540 frame_buffer,
2541 sizeof(struct dev_to_host_fis) - sizeof(u32));
2542}
2543
2544void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2545{
2546 if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2547 writel(ihost->uf_control.get,
2548 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2549}
2550
2551void isci_tci_free(struct isci_host *ihost, u16 tci)
2552{
2553 u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
2554
2555 ihost->tci_pool[tail] = tci;
2556 ihost->tci_tail = tail + 1;
2557}
2558
2559static u16 isci_tci_alloc(struct isci_host *ihost)
2560{
2561 u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
2562 u16 tci = ihost->tci_pool[head];
2563
2564 ihost->tci_head = head + 1;
2565 return tci;
2566}
2567
2568static u16 isci_tci_space(struct isci_host *ihost)
2569{
2570 return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
2571}
2572
2573u16 isci_alloc_tag(struct isci_host *ihost)
2574{
2575 if (isci_tci_space(ihost)) {
2576 u16 tci = isci_tci_alloc(ihost);
2577 u8 seq = ihost->io_request_sequence[tci];
2578
2579 return ISCI_TAG(seq, tci);
2580 }
2581
2582 return SCI_CONTROLLER_INVALID_IO_TAG;
2583}
2584
2585enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
2586{
2587 u16 tci = ISCI_TAG_TCI(io_tag);
2588 u16 seq = ISCI_TAG_SEQ(io_tag);
2589
2590
2591 if (isci_tci_active(ihost) == 0)
2592 return SCI_FAILURE_INVALID_IO_TAG;
2593
2594 if (seq == ihost->io_request_sequence[tci]) {
2595 ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
2596
2597 isci_tci_free(ihost, tci);
2598
2599 return SCI_SUCCESS;
2600 }
2601 return SCI_FAILURE_INVALID_IO_TAG;
2602}
2603
2604enum sci_status sci_controller_start_io(struct isci_host *ihost,
2605 struct isci_remote_device *idev,
2606 struct isci_request *ireq)
2607{
2608 enum sci_status status;
2609
2610 if (ihost->sm.current_state_id != SCIC_READY) {
2611 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2612 __func__, ihost->sm.current_state_id);
2613 return SCI_FAILURE_INVALID_STATE;
2614 }
2615
2616 status = sci_remote_device_start_io(ihost, idev, ireq);
2617 if (status != SCI_SUCCESS)
2618 return status;
2619
2620 set_bit(IREQ_ACTIVE, &ireq->flags);
2621 sci_controller_post_request(ihost, ireq->post_context);
2622 return SCI_SUCCESS;
2623}
2624
2625enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
2626 struct isci_remote_device *idev,
2627 struct isci_request *ireq)
2628{
2629
2630
2631
2632
2633 enum sci_status status;
2634
2635 if (ihost->sm.current_state_id != SCIC_READY) {
2636 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2637 __func__, ihost->sm.current_state_id);
2638 return SCI_FAILURE_INVALID_STATE;
2639 }
2640 status = sci_io_request_terminate(ireq);
2641
2642 dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
2643 __func__, status, ireq, ireq->flags);
2644
2645 if ((status == SCI_SUCCESS) &&
2646 !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
2647 !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) {
2648
2649
2650
2651 sci_controller_post_request(
2652 ihost, ireq->post_context |
2653 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2654 }
2655 return status;
2656}
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669enum sci_status sci_controller_complete_io(struct isci_host *ihost,
2670 struct isci_remote_device *idev,
2671 struct isci_request *ireq)
2672{
2673 enum sci_status status;
2674
2675 switch (ihost->sm.current_state_id) {
2676 case SCIC_STOPPING:
2677
2678 return SCI_FAILURE;
2679 case SCIC_READY:
2680 status = sci_remote_device_complete_io(ihost, idev, ireq);
2681 if (status != SCI_SUCCESS)
2682 return status;
2683
2684 clear_bit(IREQ_ACTIVE, &ireq->flags);
2685 return SCI_SUCCESS;
2686 default:
2687 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2688 __func__, ihost->sm.current_state_id);
2689 return SCI_FAILURE_INVALID_STATE;
2690 }
2691
2692}
2693
2694enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2695{
2696 struct isci_host *ihost = ireq->owning_controller;
2697
2698 if (ihost->sm.current_state_id != SCIC_READY) {
2699 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2700 __func__, ihost->sm.current_state_id);
2701 return SCI_FAILURE_INVALID_STATE;
2702 }
2703
2704 set_bit(IREQ_ACTIVE, &ireq->flags);
2705 sci_controller_post_request(ihost, ireq->post_context);
2706 return SCI_SUCCESS;
2707}
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718enum sci_status sci_controller_start_task(struct isci_host *ihost,
2719 struct isci_remote_device *idev,
2720 struct isci_request *ireq)
2721{
2722 enum sci_status status;
2723
2724 if (ihost->sm.current_state_id != SCIC_READY) {
2725 dev_warn(&ihost->pdev->dev,
2726 "%s: SCIC Controller starting task from invalid "
2727 "state\n",
2728 __func__);
2729 return SCI_FAILURE_INVALID_STATE;
2730 }
2731
2732 status = sci_remote_device_start_task(ihost, idev, ireq);
2733 switch (status) {
2734 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
2735 set_bit(IREQ_ACTIVE, &ireq->flags);
2736
2737
2738
2739
2740
2741
2742 return SCI_SUCCESS;
2743 case SCI_SUCCESS:
2744 set_bit(IREQ_ACTIVE, &ireq->flags);
2745 sci_controller_post_request(ihost, ireq->post_context);
2746 break;
2747 default:
2748 break;
2749 }
2750
2751 return status;
2752}
2753
2754static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data)
2755{
2756 int d;
2757
2758
2759 if (reg_index == 0)
2760 return -EINVAL;
2761
2762 for (d = 0; d < isci_gpio_count(ihost); d++) {
2763 u32 val = 0x444;
2764 int i;
2765
2766 for (i = 0; i < 3; i++) {
2767 int bit;
2768
2769 bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i),
2770 write_data, reg_index,
2771 reg_count);
2772 if (bit < 0)
2773 break;
2774
2775
2776 val &= ~(bit << ((i << 2) + 2));
2777 }
2778
2779 if (i < 3)
2780 break;
2781 writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]);
2782 }
2783
2784
2785
2786
2787 return d > 0;
2788}
2789
2790int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index,
2791 u8 reg_count, u8 *write_data)
2792{
2793 struct isci_host *ihost = sas_ha->lldd_ha;
2794 int written;
2795
2796 switch (reg_type) {
2797 case SAS_GPIO_REG_TX_GP:
2798 written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data);
2799 break;
2800 default:
2801 written = -EINVAL;
2802 }
2803
2804 return written;
2805}
2806