1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#include "esas2r.h"
45
46void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
47{
48 struct esas2r_target *t = NULL;
49 struct esas2r_request *startrq = rq;
50 unsigned long flags;
51
52 if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) ||
53 test_bit(AF_POWER_DOWN, &a->flags))) {
54 if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
55 rq->req_stat = RS_SEL2;
56 else
57 rq->req_stat = RS_DEGRADED;
58 } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
59 t = a->targetdb + rq->target_id;
60
61 if (unlikely(t >= a->targetdb_end
62 || !(t->flags & TF_USED))) {
63 rq->req_stat = RS_SEL;
64 } else {
65
66 rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id);
67
68
69
70
71
72
73 if (unlikely(t->target_state != TS_PRESENT &&
74 !test_bit(AF_DISC_PENDING, &a->flags)))
75 rq->req_stat = RS_SEL;
76 }
77 }
78
79 if (unlikely(rq->req_stat != RS_PENDING)) {
80 esas2r_complete_request(a, rq);
81 return;
82 }
83
84 esas2r_trace("rq=%p", rq);
85 esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle);
86
87 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
88 esas2r_trace("rq->target_id=%d", rq->target_id);
89 esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags);
90 }
91
92 spin_lock_irqsave(&a->queue_lock, flags);
93
94 if (likely(list_empty(&a->defer_list) &&
95 !test_bit(AF_CHPRST_PENDING, &a->flags) &&
96 !test_bit(AF_FLASHING, &a->flags) &&
97 !test_bit(AF_DISC_PENDING, &a->flags)))
98 esas2r_local_start_request(a, startrq);
99 else
100 list_add_tail(&startrq->req_list, &a->defer_list);
101
102 spin_unlock_irqrestore(&a->queue_lock, flags);
103}
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119void esas2r_local_start_request(struct esas2r_adapter *a,
120 struct esas2r_request *rq)
121{
122 esas2r_trace_enter();
123 esas2r_trace("rq=%p", rq);
124 esas2r_trace("rq->vrq:%p", rq->vrq);
125 esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr);
126
127 if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
128 && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
129 set_bit(AF_FLASHING, &a->flags);
130
131 list_add_tail(&rq->req_list, &a->active_list);
132 esas2r_start_vda_request(a, rq);
133 esas2r_trace_exit();
134 return;
135}
136
137void esas2r_start_vda_request(struct esas2r_adapter *a,
138 struct esas2r_request *rq)
139{
140 struct esas2r_inbound_list_source_entry *element;
141 u32 dw;
142
143 rq->req_stat = RS_STARTED;
144
145
146
147
148 a->last_write++;
149 if (a->last_write >= a->list_size) {
150 a->last_write = 0;
151
152 if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
153 clear_bit(AF_COMM_LIST_TOGGLE, &a->flags);
154 else
155 set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
156 }
157
158 element =
159 (struct esas2r_inbound_list_source_entry *)a->inbound_list_md.
160 virt_addr
161 + a->last_write;
162
163
164 if (rq->vda_req_sz == RQ_SIZE_DEFAULT)
165 rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32));
166
167 element->address = cpu_to_le64(rq->vrq_md->phys_addr);
168 element->length = cpu_to_le32(rq->vda_req_sz);
169
170
171 dw = a->last_write;
172
173 if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
174 dw |= MU_ILW_TOGGLE;
175
176 esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
177 esas2r_trace("dw:%x", dw);
178 esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz);
179 esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw);
180}
181
182
183
184
185
186
187bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
188 struct esas2r_sg_context *sgc)
189{
190 struct esas2r_request *rq = sgc->first_req;
191 union atto_vda_req *vrq = rq->vrq;
192
193 while (sgc->length) {
194 u32 rem = 0;
195 u64 addr;
196 u32 len;
197
198 len = (*sgc->get_phys_addr)(sgc, &addr);
199
200 if (unlikely(len == 0))
201 return false;
202
203
204 if (unlikely(len > sgc->length))
205 len = sgc->length;
206
207another_entry:
208
209 if (len > SGE_LEN_MAX) {
210
211
212
213
214
215
216
217 rem = len - SGE_LEN_MAX;
218 len = SGE_LEN_MAX;
219 }
220
221
222 if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) {
223 u8 sgelen;
224 struct esas2r_mem_desc *sgl;
225
226
227
228
229
230
231 sgl = esas2r_alloc_sgl(a);
232
233 if (unlikely(sgl == NULL))
234 return false;
235
236
237 sgelen = (u8)((u8 *)sgc->sge.a64.curr
238 - (u8 *)sgc->sge.a64.last);
239
240
241
242
243
244 memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen);
245
246
247 sgc->sge.a64.curr =
248 (struct atto_vda_sge *)((u8 *)sgl->virt_addr +
249 sgelen);
250
251
252 sgc->sge.a64.limit =
253 (struct atto_vda_sge *)((u8 *)sgl->virt_addr
254 + sgl_page_size
255 - sizeof(struct
256 atto_vda_sge));
257 sgc->sge.a64.last->length = cpu_to_le32(
258 SGE_CHAIN | SGE_ADDR_64);
259 sgc->sge.a64.last->address =
260 cpu_to_le64(sgl->phys_addr);
261
262
263
264
265
266
267
268 if (sgc->sge.a64.chain) {
269 sgc->sge.a64.chain->length |=
270 cpu_to_le32(
271 ((u8 *)(sgc->sge.a64.
272 last + 1)
273 - (u8 *)rq->sg_table->
274 virt_addr)
275 + sizeof(struct atto_vda_sge) *
276 LOBIT(SGE_CHAIN_SZ));
277 } else {
278 vrq->scsi.chain_offset = (u8)
279 ((u8 *)sgc->
280 sge.a64.last -
281 (u8 *)vrq);
282
283
284
285
286
287
288 rq->vda_req_sz =
289 (vrq->scsi.chain_offset +
290 sizeof(struct atto_vda_sge) +
291 3)
292 / sizeof(u32);
293 }
294
295
296
297
298
299 sgc->sge.a64.chain = sgc->sge.a64.last;
300
301
302 list_add(&sgl->next_desc, &rq->sg_table_head);
303 }
304
305
306 sgc->sge.a64.last = sgc->sge.a64.curr;
307
308
309 sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len);
310 sgc->sge.a64.curr->address = cpu_to_le32(addr);
311 sgc->sge.a64.curr++;
312 sgc->cur_offset += len;
313 sgc->length -= len;
314
315
316
317
318
319 if (rem) {
320 addr += len;
321 len = rem;
322 rem = 0;
323 goto another_entry;
324 }
325 }
326
327
328 sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST);
329
330
331
332
333
334 if (sgc->sge.a64.chain) {
335 sgc->sge.a64.chain->length |= cpu_to_le32(
336 ((u8 *)(sgc->sge.a64.curr) -
337 (u8 *)rq->sg_table->virt_addr));
338 } else {
339 u16 reqsize;
340
341
342
343
344
345 reqsize =
346 ((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq)
347 + sizeof(struct atto_vda_sge) + 3) / sizeof(u32);
348
349
350
351
352
353
354 if (reqsize > rq->vda_req_sz)
355 rq->vda_req_sz = reqsize;
356 }
357 return true;
358}
359
360
361
362
363
364
365
366
367
368
369
370static bool esas2r_build_prd_iblk(struct esas2r_adapter *a,
371 struct esas2r_sg_context *sgc)
372{
373 struct esas2r_request *rq = sgc->first_req;
374 u64 addr;
375 u32 len;
376 struct esas2r_mem_desc *sgl;
377 u32 numchain = 1;
378 u32 rem = 0;
379
380 while (sgc->length) {
381
382
383 len = (*sgc->get_phys_addr)(sgc, &addr);
384
385 if (unlikely(len == 0))
386 return false;
387
388
389
390 if (unlikely(len > sgc->length))
391 len = sgc->length;
392
393another_entry:
394
395
396 if (len > PRD_LEN_MAX) {
397
398
399
400
401
402
403
404 rem = len - PRD_LEN_MAX;
405 len = PRD_LEN_MAX;
406 }
407
408
409 if (sgc->sge.prd.sge_cnt == 0) {
410 if (len == sgc->length) {
411
412
413
414
415
416
417
418 sgc->sge.prd.curr->ctl_len = cpu_to_le32(
419 PRD_DATA | len);
420 sgc->sge.prd.curr->address = cpu_to_le64(addr);
421
422
423 sgc->cur_offset += len;
424 sgc->length -= len;
425
426
427 numchain = 0;
428
429 break;
430 }
431
432 if (sgc->sge.prd.chain) {
433
434
435
436
437
438
439 sgc->sge.prd.chain->ctl_len |= cpu_to_le32(
440 sgc->sge.prd.sgl_max_cnt);
441 }
442
443
444
445
446
447
448
449 sgl = esas2r_alloc_sgl(a);
450
451 if (unlikely(sgl == NULL))
452 return false;
453
454
455
456
457
458 list_add(&sgl->next_desc, &rq->sg_table_head);
459
460
461
462
463
464
465
466
467 sgc->sge.prd.chain = sgc->sge.prd.curr;
468
469 sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN);
470 sgc->sge.prd.chain->address =
471 cpu_to_le64(sgl->phys_addr);
472
473
474
475
476
477
478 sgc->sge.prd.curr =
479 (struct atto_physical_region_description *)sgl
480 ->
481 virt_addr;
482 sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1;
483 }
484
485 sgc->sge.prd.sge_cnt--;
486
487 sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len);
488 sgc->sge.prd.curr->address = cpu_to_le64(addr);
489
490
491
492 sgc->sge.prd.curr++;
493
494
495
496 sgc->cur_offset += len;
497 sgc->length -= len;
498
499
500
501
502
503
504 if (rem) {
505 addr += len;
506 len = rem;
507 rem = 0;
508 goto another_entry;
509 }
510 }
511
512 if (!list_empty(&rq->sg_table_head)) {
513 if (sgc->sge.prd.chain) {
514 sgc->sge.prd.chain->ctl_len |=
515 cpu_to_le32(sgc->sge.prd.sgl_max_cnt
516 - sgc->sge.prd.sge_cnt
517 - numchain);
518 }
519 }
520
521 return true;
522}
523
524bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
525 struct esas2r_sg_context *sgc)
526{
527 struct esas2r_request *rq = sgc->first_req;
528 u32 len = sgc->length;
529 struct esas2r_target *t = a->targetdb + rq->target_id;
530 u8 is_i_o = 0;
531 u16 reqsize;
532 struct atto_physical_region_description *curr_iblk_chn;
533 u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0];
534
535
536
537
538
539
540 if (rq->vrq->scsi.function == VDA_FUNC_SCSI
541 && t->target_state == TS_PRESENT
542 && !(t->flags & TF_PASS_THRU)) {
543 u32 lbalo = 0;
544
545 switch (rq->vrq->scsi.cdb[0]) {
546 case READ_16:
547 case WRITE_16:
548 {
549 lbalo =
550 MAKEDWORD(MAKEWORD(cdb[9],
551 cdb[8]),
552 MAKEWORD(cdb[7],
553 cdb[6]));
554 is_i_o = 1;
555 break;
556 }
557
558 case READ_12:
559 case WRITE_12:
560 case READ_10:
561 case WRITE_10:
562 {
563 lbalo =
564 MAKEDWORD(MAKEWORD(cdb[5],
565 cdb[4]),
566 MAKEWORD(cdb[3],
567 cdb[2]));
568 is_i_o = 1;
569 break;
570 }
571
572 case READ_6:
573 case WRITE_6:
574 {
575 lbalo =
576 MAKEDWORD(MAKEWORD(cdb[3],
577 cdb[2]),
578 MAKEWORD(cdb[1] & 0x1F,
579 0));
580 is_i_o = 1;
581 break;
582 }
583
584 default:
585 break;
586 }
587
588 if (is_i_o) {
589 u32 startlba;
590
591 rq->vrq->scsi.iblk_cnt_prd = 0;
592
593
594 startlba = t->inter_block - (lbalo & (t->inter_block -
595 1));
596 sgc->length = startlba * t->block_size;
597
598
599 if ((lbalo & (t->inter_block - 1)) == 0)
600 rq->flags |= RF_1ST_IBLK_BASE;
601
602 if (sgc->length > len)
603 sgc->length = len;
604 } else {
605 sgc->length = len;
606 }
607 } else {
608 sgc->length = len;
609 }
610
611
612
613 curr_iblk_chn =
614 (struct atto_physical_region_description *)sgc->sge.a64.curr;
615
616 sgc->sge.prd.sgl_max_cnt = sgl_page_size /
617 sizeof(struct
618 atto_physical_region_description);
619
620
621
622 while (len) {
623 sgc->sge.prd.sge_cnt = 0;
624 sgc->sge.prd.chain = NULL;
625 sgc->sge.prd.curr = curr_iblk_chn;
626
627
628
629 len -= sgc->length;
630
631
632
633 if (unlikely(!esas2r_build_prd_iblk(a, sgc)))
634 return false;
635
636 curr_iblk_chn++;
637
638 if (is_i_o) {
639 rq->vrq->scsi.iblk_cnt_prd++;
640
641 if (len > t->inter_byte)
642 sgc->length = t->inter_byte;
643 else
644 sgc->length = len;
645 }
646 }
647
648
649
650 reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq))
651 / sizeof(u32);
652
653
654
655
656
657
658
659 if (reqsize > rq->vda_req_sz)
660 rq->vda_req_sz = reqsize;
661
662 return true;
663}
664
665static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)
666{
667 u32 delta = currtime - a->chip_init_time;
668
669 if (delta <= ESAS2R_CHPRST_WAIT_TIME) {
670
671 } else if (delta >= ESAS2R_CHPRST_TIME) {
672
673
674
675
676 esas2r_local_reset_adapter(a);
677 } else {
678
679 u32 doorbell;
680
681 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
682 if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) {
683 esas2r_force_interrupt(a);
684 } else {
685 u32 ver = (doorbell & DRBL_FW_VER_MSK);
686
687
688 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
689 doorbell);
690 if (ver == DRBL_FW_VER_0) {
691 set_bit(AF_CHPRST_DETECTED, &a->flags);
692 set_bit(AF_LEGACY_SGE_MODE, &a->flags);
693
694 a->max_vdareq_size = 128;
695 a->build_sgl = esas2r_build_sg_list_sge;
696 } else if (ver == DRBL_FW_VER_1) {
697 set_bit(AF_CHPRST_DETECTED, &a->flags);
698 clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
699
700 a->max_vdareq_size = 1024;
701 a->build_sgl = esas2r_build_sg_list_prd;
702 } else {
703 esas2r_local_reset_adapter(a);
704 }
705 }
706 }
707}
708
709
710
711void esas2r_timer_tick(struct esas2r_adapter *a)
712{
713 u32 currtime = jiffies_to_msecs(jiffies);
714 u32 deltatime = currtime - a->last_tick_time;
715
716 a->last_tick_time = currtime;
717
718
719 if (a->chip_uptime &&
720 !test_bit(AF_CHPRST_PENDING, &a->flags) &&
721 !test_bit(AF_DISC_PENDING, &a->flags)) {
722 if (deltatime >= a->chip_uptime)
723 a->chip_uptime = 0;
724 else
725 a->chip_uptime -= deltatime;
726 }
727
728 if (test_bit(AF_CHPRST_PENDING, &a->flags)) {
729 if (!test_bit(AF_CHPRST_NEEDED, &a->flags) &&
730 !test_bit(AF_CHPRST_DETECTED, &a->flags))
731 esas2r_handle_pending_reset(a, currtime);
732 } else {
733 if (test_bit(AF_DISC_PENDING, &a->flags))
734 esas2r_disc_check_complete(a);
735 if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) {
736 if (test_bit(AF_HEARTBEAT, &a->flags)) {
737 if ((currtime - a->heartbeat_time) >=
738 ESAS2R_HEARTBEAT_TIME) {
739 clear_bit(AF_HEARTBEAT, &a->flags);
740 esas2r_hdebug("heartbeat failed");
741 esas2r_log(ESAS2R_LOG_CRIT,
742 "heartbeat failed");
743 esas2r_bugon();
744 esas2r_local_reset_adapter(a);
745 }
746 } else {
747 set_bit(AF_HEARTBEAT, &a->flags);
748 a->heartbeat_time = currtime;
749 esas2r_force_interrupt(a);
750 }
751 }
752 }
753
754 if (atomic_read(&a->disable_cnt) == 0)
755 esas2r_do_deferred_processes(a);
756}
757
758
759
760
761
762
763
764bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
765 struct esas2r_request *rqaux, u8 task_mgt_func)
766{
767 u16 targetid = rqaux->target_id;
768 u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags);
769 bool ret = false;
770 struct esas2r_request *rq;
771 struct list_head *next, *element;
772 unsigned long flags;
773
774 LIST_HEAD(comp_list);
775
776 esas2r_trace_enter();
777 esas2r_trace("rqaux:%p", rqaux);
778 esas2r_trace("task_mgt_func:%x", task_mgt_func);
779 spin_lock_irqsave(&a->queue_lock, flags);
780
781
782 list_for_each_safe(element, next, &a->defer_list) {
783 rq = list_entry(element, struct esas2r_request, req_list);
784
785 if (rq->vrq->scsi.function == VDA_FUNC_SCSI
786 && rq->target_id == targetid
787 && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
788 || task_mgt_func == 0x20)) {
789
790 if (rq->req_stat == RS_PENDING) {
791
792
793
794
795 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
796 list_add_tail(&rq->comp_list,
797 &comp_list);
798 }
799 }
800 }
801
802
803 rqaux->sense_len = 0;
804 rqaux->vrq->scsi.length = 0;
805 rqaux->target_id = targetid;
806 rqaux->vrq->scsi.flags |= cpu_to_le32(lun);
807 memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb));
808 rqaux->vrq->scsi.flags |=
809 cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
810
811 if (test_bit(AF_FLASHING, &a->flags)) {
812
813 rqaux->req_stat = RS_SUCCESS;
814
815 list_for_each_safe(element, next, &a->active_list) {
816 rq = list_entry(element, struct esas2r_request,
817 req_list);
818 if (rq->vrq->scsi.function == VDA_FUNC_SCSI
819 && rq->target_id == targetid
820 && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
821 || task_mgt_func == 0x20))
822 rqaux->req_stat = RS_BUSY;
823 }
824
825 ret = true;
826 }
827
828 spin_unlock_irqrestore(&a->queue_lock, flags);
829
830 if (!test_bit(AF_FLASHING, &a->flags))
831 esas2r_start_request(a, rqaux);
832
833 esas2r_comp_list_drain(a, &comp_list);
834
835 if (atomic_read(&a->disable_cnt) == 0)
836 esas2r_do_deferred_processes(a);
837
838 esas2r_trace_exit();
839
840 return ret;
841}
842
843void esas2r_reset_bus(struct esas2r_adapter *a)
844{
845 esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
846
847 if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
848 !test_bit(AF_CHPRST_PENDING, &a->flags) &&
849 !test_bit(AF_DISC_PENDING, &a->flags)) {
850 set_bit(AF_BUSRST_NEEDED, &a->flags);
851 set_bit(AF_BUSRST_PENDING, &a->flags);
852 set_bit(AF_OS_RESET, &a->flags);
853
854 esas2r_schedule_tasklet(a);
855 }
856}
857
858bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
859 u8 status)
860{
861 esas2r_trace_enter();
862 esas2r_trace("rq:%p", rq);
863 list_del_init(&rq->req_list);
864 if (rq->timeout > RQ_MAX_TIMEOUT) {
865
866
867
868
869 rq->req_stat = RS_BUSY;
870 esas2r_trace_exit();
871 return true;
872 }
873
874 rq->req_stat = status;
875 esas2r_trace_exit();
876 return true;
877}
878