1
2
3
4
5
6
7
8
9
10
11#define QLA1280_VERSION "3.27.1"
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330#include <linux/module.h>
331
332#include <linux/types.h>
333#include <linux/string.h>
334#include <linux/errno.h>
335#include <linux/kernel.h>
336#include <linux/ioport.h>
337#include <linux/delay.h>
338#include <linux/timer.h>
339#include <linux/pci.h>
340#include <linux/proc_fs.h>
341#include <linux/stat.h>
342#include <linux/pci_ids.h>
343#include <linux/interrupt.h>
344#include <linux/init.h>
345#include <linux/dma-mapping.h>
346#include <linux/firmware.h>
347
348#include <asm/io.h>
349#include <asm/irq.h>
350#include <asm/byteorder.h>
351#include <asm/processor.h>
352#include <asm/types.h>
353
354#include <scsi/scsi.h>
355#include <scsi/scsi_cmnd.h>
356#include <scsi/scsi_device.h>
357#include <scsi/scsi_host.h>
358#include <scsi/scsi_tcq.h>
359
360
361
362
363
364
365#define DEBUG_QLA1280_INTR 0
366#define DEBUG_PRINT_NVRAM 0
367#define DEBUG_QLA1280 0
368
369#define MEMORY_MAPPED_IO 1
370
371#include "qla1280.h"
372
373#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
374#define QLA_64BIT_PTR 1
375#endif
376
377#define NVRAM_DELAY() udelay(500)
378
379#define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
380#define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
381 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
382#define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
383 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
384
385
386static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
387static void qla1280_remove_one(struct pci_dev *);
388
389
390
391
392static void qla1280_done(struct scsi_qla_host *);
393static int qla1280_get_token(char *);
394static int qla1280_setup(char *s) __init;
395
396
397
398
399static int qla1280_load_firmware(struct scsi_qla_host *);
400static int qla1280_init_rings(struct scsi_qla_host *);
401static int qla1280_nvram_config(struct scsi_qla_host *);
402static int qla1280_mailbox_command(struct scsi_qla_host *,
403 uint8_t, uint16_t *);
404static int qla1280_bus_reset(struct scsi_qla_host *, int);
405static int qla1280_device_reset(struct scsi_qla_host *, int, int);
406static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
407static int qla1280_abort_isp(struct scsi_qla_host *);
408#ifdef QLA_64BIT_PTR
409static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
410#else
411static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
412#endif
413static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
414static void qla1280_poll(struct scsi_qla_host *);
415static void qla1280_reset_adapter(struct scsi_qla_host *);
416static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
417static void qla1280_isp_cmd(struct scsi_qla_host *);
418static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
419static void qla1280_rst_aen(struct scsi_qla_host *);
420static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
421 struct list_head *);
422static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
423 struct list_head *);
424static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
425static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
426static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
427static request_t *qla1280_req_pkt(struct scsi_qla_host *);
428static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
429 unsigned int);
430static void qla1280_get_target_parameters(struct scsi_qla_host *,
431 struct scsi_device *);
432static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
433
434
435static struct qla_driver_setup driver_setup;
436
437
438
439
440static inline uint16_t
441qla1280_data_direction(struct scsi_cmnd *cmnd)
442{
443 switch(cmnd->sc_data_direction) {
444 case DMA_FROM_DEVICE:
445 return BIT_5;
446 case DMA_TO_DEVICE:
447 return BIT_6;
448 case DMA_BIDIRECTIONAL:
449 return BIT_5 | BIT_6;
450
451
452
453
454
455 case DMA_NONE:
456 default:
457 return 0;
458 }
459}
460
461#if DEBUG_QLA1280
462static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
463static void __qla1280_dump_buffer(char *, int);
464#endif
465
466
467
468
469
470#ifdef MODULE
471static char *qla1280;
472
473
474module_param(qla1280, charp, 0);
475#else
476__setup("qla1280=", qla1280_setup);
477#endif
478
479
480
481
482
483
484
485
486#define CMD_SP(Cmnd) &Cmnd->SCp
487#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
488#define CMD_CDBP(Cmnd) Cmnd->cmnd
489#define CMD_SNSP(Cmnd) Cmnd->sense_buffer
490#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
491#define CMD_RESULT(Cmnd) Cmnd->result
492#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
493#define CMD_REQUEST(Cmnd) Cmnd->request->cmd
494
495#define CMD_HOST(Cmnd) Cmnd->device->host
496#define SCSI_BUS_32(Cmnd) Cmnd->device->channel
497#define SCSI_TCN_32(Cmnd) Cmnd->device->id
498#define SCSI_LUN_32(Cmnd) Cmnd->device->lun
499
500
501
502
503
504
505struct qla_boards {
506 char *name;
507 int numPorts;
508 int fw_index;
509};
510
511
512static struct pci_device_id qla1280_pci_tbl[] = {
513 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
514 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
515 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
516 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
517 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
518 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
519 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
520 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
521 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
522 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
523 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
524 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
525 {0,}
526};
527MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
528
529DEFINE_MUTEX(qla1280_firmware_mutex);
530
531struct qla_fw {
532 char *fwname;
533 const struct firmware *fw;
534};
535
536#define QL_NUM_FW_IMAGES 3
537
538struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
539 {"qlogic/1040.bin", NULL},
540 {"qlogic/1280.bin", NULL},
541 {"qlogic/12160.bin", NULL},
542};
543
544
545static struct qla_boards ql1280_board_tbl[] = {
546 {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
547 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
548 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
549 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
550 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
551 {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
552 {.name = " ", .numPorts = 0, .fw_index = -1},
553};
554
555static int qla1280_verbose = 1;
556
557#if DEBUG_QLA1280
558static int ql_debug_level = 1;
559#define dprintk(level, format, a...) \
560 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
561#define qla1280_dump_buffer(level, buf, size) \
562 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
563#define qla1280_print_scsi_cmd(level, cmd) \
564 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
565#else
566#define ql_debug_level 0
567#define dprintk(level, format, a...) do{}while(0)
568#define qla1280_dump_buffer(a, b, c) do{}while(0)
569#define qla1280_print_scsi_cmd(a, b) do{}while(0)
570#endif
571
572#define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
573#define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
574#define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
575#define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
576
577
578static int qla1280_read_nvram(struct scsi_qla_host *ha)
579{
580 uint16_t *wptr;
581 uint8_t chksum;
582 int cnt, i;
583 struct nvram *nv;
584
585 ENTER("qla1280_read_nvram");
586
587 if (driver_setup.no_nvram)
588 return 1;
589
590 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
591
592 wptr = (uint16_t *)&ha->nvram;
593 nv = &ha->nvram;
594 chksum = 0;
595 for (cnt = 0; cnt < 3; cnt++) {
596 *wptr = qla1280_get_nvram_word(ha, cnt);
597 chksum += *wptr & 0xff;
598 chksum += (*wptr >> 8) & 0xff;
599 wptr++;
600 }
601
602 if (nv->id0 != 'I' || nv->id1 != 'S' ||
603 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
604 dprintk(2, "Invalid nvram ID or version!\n");
605 chksum = 1;
606 } else {
607 for (; cnt < sizeof(struct nvram); cnt++) {
608 *wptr = qla1280_get_nvram_word(ha, cnt);
609 chksum += *wptr & 0xff;
610 chksum += (*wptr >> 8) & 0xff;
611 wptr++;
612 }
613 }
614
615 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
616 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
617 nv->version);
618
619
620 if (chksum) {
621 if (!driver_setup.no_nvram)
622 printk(KERN_WARNING "scsi(%ld): Unable to identify or "
623 "validate NVRAM checksum, using default "
624 "settings\n", ha->host_no);
625 ha->nvram_valid = 0;
626 } else
627 ha->nvram_valid = 1;
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645 nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
646 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
647 for(i = 0; i < MAX_BUSES; i++) {
648 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
649 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
650 }
651 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
652 LEAVE("qla1280_read_nvram");
653
654 return chksum;
655}
656
657
658
659
660
661static const char *
662qla1280_info(struct Scsi_Host *host)
663{
664 static char qla1280_scsi_name_buffer[125];
665 char *bp;
666 struct scsi_qla_host *ha;
667 struct qla_boards *bdp;
668
669 bp = &qla1280_scsi_name_buffer[0];
670 ha = (struct scsi_qla_host *)host->hostdata;
671 bdp = &ql1280_board_tbl[ha->devnum];
672 memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
673
674 sprintf (bp,
675 "QLogic %s PCI to SCSI Host Adapter\n"
676 " Firmware version: %2d.%02d.%02d, Driver version %s",
677 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
678 QLA1280_VERSION);
679 return bp;
680}
681
682
683
684
685
686
687
688
689
690
691
692
693static int
694qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
695{
696 struct Scsi_Host *host = cmd->device->host;
697 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
698 struct srb *sp = (struct srb *)CMD_SP(cmd);
699 int status;
700
701 cmd->scsi_done = fn;
702 sp->cmd = cmd;
703 sp->flags = 0;
704 sp->wait = NULL;
705 CMD_HANDLE(cmd) = (unsigned char *)NULL;
706
707 qla1280_print_scsi_cmd(5, cmd);
708
709#ifdef QLA_64BIT_PTR
710
711
712
713
714
715
716 status = qla1280_64bit_start_scsi(ha, sp);
717#else
718 status = qla1280_32bit_start_scsi(ha, sp);
719#endif
720 return status;
721}
722
723static DEF_SCSI_QCMD(qla1280_queuecommand)
724
725enum action {
726 ABORT_COMMAND,
727 DEVICE_RESET,
728 BUS_RESET,
729 ADAPTER_RESET,
730};
731
732
733static void qla1280_mailbox_timeout(struct timer_list *t)
734{
735 struct scsi_qla_host *ha = from_timer(ha, t, mailbox_timer);
736 struct device_reg __iomem *reg;
737 reg = ha->iobase;
738
739 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0);
740 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
741 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
742 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus));
743 complete(ha->mailbox_wait);
744}
745
746static int
747_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
748 struct completion *wait)
749{
750 int status = FAILED;
751 struct scsi_cmnd *cmd = sp->cmd;
752
753 spin_unlock_irq(ha->host->host_lock);
754 wait_for_completion_timeout(wait, 4*HZ);
755 spin_lock_irq(ha->host->host_lock);
756 sp->wait = NULL;
757 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
758 status = SUCCESS;
759 (*cmd->scsi_done)(cmd);
760 }
761 return status;
762}
763
764static int
765qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
766{
767 DECLARE_COMPLETION_ONSTACK(wait);
768
769 sp->wait = &wait;
770 return _qla1280_wait_for_single_command(ha, sp, &wait);
771}
772
773static int
774qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
775{
776 int cnt;
777 int status;
778 struct srb *sp;
779 struct scsi_cmnd *cmd;
780
781 status = SUCCESS;
782
783
784
785
786
787 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
788 sp = ha->outstanding_cmds[cnt];
789 if (sp) {
790 cmd = sp->cmd;
791
792 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
793 continue;
794 if (target >= 0 && SCSI_TCN_32(cmd) != target)
795 continue;
796
797 status = qla1280_wait_for_single_command(ha, sp);
798 if (status == FAILED)
799 break;
800 }
801 }
802 return status;
803}
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819static int
820qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
821{
822 struct scsi_qla_host *ha;
823 int bus, target, lun;
824 struct srb *sp;
825 int i, found;
826 int result=FAILED;
827 int wait_for_bus=-1;
828 int wait_for_target = -1;
829 DECLARE_COMPLETION_ONSTACK(wait);
830
831 ENTER("qla1280_error_action");
832
833 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
834 sp = (struct srb *)CMD_SP(cmd);
835 bus = SCSI_BUS_32(cmd);
836 target = SCSI_TCN_32(cmd);
837 lun = SCSI_LUN_32(cmd);
838
839 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
840 RD_REG_WORD(&ha->iobase->istatus));
841
842 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
843 RD_REG_WORD(&ha->iobase->host_cmd),
844 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
845
846 if (qla1280_verbose)
847 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
848 "Handle=0x%p, action=0x%x\n",
849 ha->host_no, cmd, CMD_HANDLE(cmd), action);
850
851
852
853
854
855
856
857 found = -1;
858 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
859 if (sp == ha->outstanding_cmds[i]) {
860 found = i;
861 sp->wait = &wait;
862 break;
863 }
864 }
865
866 if (found < 0) {
867 result = SUCCESS;
868 if (qla1280_verbose) {
869 printk(KERN_INFO
870 "scsi(%ld:%d:%d:%d): specified command has "
871 "already completed.\n", ha->host_no, bus,
872 target, lun);
873 }
874 }
875
876 switch (action) {
877
878 case ABORT_COMMAND:
879 dprintk(1, "qla1280: RISC aborting command\n");
880
881
882
883
884
885 if (found >= 0)
886 qla1280_abort_command(ha, sp, found);
887 break;
888
889 case DEVICE_RESET:
890 if (qla1280_verbose)
891 printk(KERN_INFO
892 "scsi(%ld:%d:%d:%d): Queueing device reset "
893 "command.\n", ha->host_no, bus, target, lun);
894 if (qla1280_device_reset(ha, bus, target) == 0) {
895
896 wait_for_bus = bus;
897 wait_for_target = target;
898 }
899 break;
900
901 case BUS_RESET:
902 if (qla1280_verbose)
903 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
904 "reset.\n", ha->host_no, bus);
905 if (qla1280_bus_reset(ha, bus) == 0) {
906
907 wait_for_bus = bus;
908 }
909 break;
910
911 case ADAPTER_RESET:
912 default:
913 if (qla1280_verbose) {
914 printk(KERN_INFO
915 "scsi(%ld): Issued ADAPTER RESET\n",
916 ha->host_no);
917 printk(KERN_INFO "scsi(%ld): I/O processing will "
918 "continue automatically\n", ha->host_no);
919 }
920 ha->flags.reset_active = 1;
921
922 if (qla1280_abort_isp(ha) != 0) {
923 result = FAILED;
924 }
925
926 ha->flags.reset_active = 0;
927 }
928
929
930
931
932
933
934
935
936
937
938 if (found >= 0)
939 result = _qla1280_wait_for_single_command(ha, sp, &wait);
940
941 if (action == ABORT_COMMAND && result != SUCCESS) {
942 printk(KERN_WARNING
943 "scsi(%li:%i:%i:%i): "
944 "Unable to abort command!\n",
945 ha->host_no, bus, target, lun);
946 }
947
948
949
950
951
952
953
954
955
956
957
958 if (result == SUCCESS && wait_for_bus >= 0) {
959 result = qla1280_wait_for_pending_commands(ha,
960 wait_for_bus, wait_for_target);
961 }
962
963 dprintk(1, "RESET returning %d\n", result);
964
965 LEAVE("qla1280_error_action");
966 return result;
967}
968
969
970
971
972
973static int
974qla1280_eh_abort(struct scsi_cmnd * cmd)
975{
976 int rc;
977
978 spin_lock_irq(cmd->device->host->host_lock);
979 rc = qla1280_error_action(cmd, ABORT_COMMAND);
980 spin_unlock_irq(cmd->device->host->host_lock);
981
982 return rc;
983}
984
985
986
987
988
989static int
990qla1280_eh_device_reset(struct scsi_cmnd *cmd)
991{
992 int rc;
993
994 spin_lock_irq(cmd->device->host->host_lock);
995 rc = qla1280_error_action(cmd, DEVICE_RESET);
996 spin_unlock_irq(cmd->device->host->host_lock);
997
998 return rc;
999}
1000
1001
1002
1003
1004
1005static int
1006qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1007{
1008 int rc;
1009
1010 spin_lock_irq(cmd->device->host->host_lock);
1011 rc = qla1280_error_action(cmd, BUS_RESET);
1012 spin_unlock_irq(cmd->device->host->host_lock);
1013
1014 return rc;
1015}
1016
1017
1018
1019
1020
1021static int
1022qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1023{
1024 int rc;
1025
1026 spin_lock_irq(cmd->device->host->host_lock);
1027 rc = qla1280_error_action(cmd, ADAPTER_RESET);
1028 spin_unlock_irq(cmd->device->host->host_lock);
1029
1030 return rc;
1031}
1032
1033static int
1034qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1035 sector_t capacity, int geom[])
1036{
1037 int heads, sectors, cylinders;
1038
1039 heads = 64;
1040 sectors = 32;
1041 cylinders = (unsigned long)capacity / (heads * sectors);
1042 if (cylinders > 1024) {
1043 heads = 255;
1044 sectors = 63;
1045 cylinders = (unsigned long)capacity / (heads * sectors);
1046
1047
1048 }
1049
1050 geom[0] = heads;
1051 geom[1] = sectors;
1052 geom[2] = cylinders;
1053
1054 return 0;
1055}
1056
1057
1058
1059static inline void
1060qla1280_disable_intrs(struct scsi_qla_host *ha)
1061{
1062 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1063 RD_REG_WORD(&ha->iobase->ictrl);
1064}
1065
1066
1067static inline void
1068qla1280_enable_intrs(struct scsi_qla_host *ha)
1069{
1070 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1071 RD_REG_WORD(&ha->iobase->ictrl);
1072}
1073
1074
1075
1076
1077
1078static irqreturn_t
1079qla1280_intr_handler(int irq, void *dev_id)
1080{
1081 struct scsi_qla_host *ha;
1082 struct device_reg __iomem *reg;
1083 u16 data;
1084 int handled = 0;
1085
1086 ENTER_INTR ("qla1280_intr_handler");
1087 ha = (struct scsi_qla_host *)dev_id;
1088
1089 spin_lock(ha->host->host_lock);
1090
1091 ha->isr_count++;
1092 reg = ha->iobase;
1093
1094 qla1280_disable_intrs(ha);
1095
1096 data = qla1280_debounce_register(®->istatus);
1097
1098 if (data & RISC_INT) {
1099 qla1280_isr(ha, &ha->done_q);
1100 handled = 1;
1101 }
1102 if (!list_empty(&ha->done_q))
1103 qla1280_done(ha);
1104
1105 spin_unlock(ha->host->host_lock);
1106
1107 qla1280_enable_intrs(ha);
1108
1109 LEAVE_INTR("qla1280_intr_handler");
1110 return IRQ_RETVAL(handled);
1111}
1112
1113
1114static int
1115qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1116{
1117 uint8_t mr;
1118 uint16_t mb[MAILBOX_REGISTER_COUNT];
1119 struct nvram *nv;
1120 int status, lun;
1121
1122 nv = &ha->nvram;
1123
1124 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1125
1126
1127 mb[0] = MBC_SET_TARGET_PARAMETERS;
1128 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1129 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1130 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1131 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1132 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1133 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1134 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1135 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1136 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1137
1138 if (IS_ISP1x160(ha)) {
1139 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1140 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1141 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1142 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1143 mr |= BIT_6;
1144 } else {
1145 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1146 }
1147 mb[3] |= nv->bus[bus].target[target].sync_period;
1148
1149 status = qla1280_mailbox_command(ha, mr, mb);
1150
1151
1152 for (lun = 0; lun < MAX_LUNS; lun++) {
1153 mb[0] = MBC_SET_DEVICE_QUEUE;
1154 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1155 mb[1] |= lun;
1156 mb[2] = nv->bus[bus].max_queue_depth;
1157 mb[3] = nv->bus[bus].target[target].execution_throttle;
1158 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1159 }
1160
1161 if (status)
1162 printk(KERN_WARNING "scsi(%ld:%i:%i): "
1163 "qla1280_set_target_parameters() failed\n",
1164 ha->host_no, bus, target);
1165 return status;
1166}
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180static int
1181qla1280_slave_configure(struct scsi_device *device)
1182{
1183 struct scsi_qla_host *ha;
1184 int default_depth = 3;
1185 int bus = device->channel;
1186 int target = device->id;
1187 int status = 0;
1188 struct nvram *nv;
1189 unsigned long flags;
1190
1191 ha = (struct scsi_qla_host *)device->host->hostdata;
1192 nv = &ha->nvram;
1193
1194 if (qla1280_check_for_dead_scsi_bus(ha, bus))
1195 return 1;
1196
1197 if (device->tagged_supported &&
1198 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1199 scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat);
1200 } else {
1201 scsi_change_queue_depth(device, default_depth);
1202 }
1203
1204 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1205 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1206 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1207
1208 if (driver_setup.no_sync ||
1209 (driver_setup.sync_mask &&
1210 (~driver_setup.sync_mask & (1 << target))))
1211 nv->bus[bus].target[target].parameter.enable_sync = 0;
1212 if (driver_setup.no_wide ||
1213 (driver_setup.wide_mask &&
1214 (~driver_setup.wide_mask & (1 << target))))
1215 nv->bus[bus].target[target].parameter.enable_wide = 0;
1216 if (IS_ISP1x160(ha)) {
1217 if (driver_setup.no_ppr ||
1218 (driver_setup.ppr_mask &&
1219 (~driver_setup.ppr_mask & (1 << target))))
1220 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1221 }
1222
1223 spin_lock_irqsave(ha->host->host_lock, flags);
1224 if (nv->bus[bus].target[target].parameter.enable_sync)
1225 status = qla1280_set_target_parameters(ha, bus, target);
1226 qla1280_get_target_parameters(ha, device);
1227 spin_unlock_irqrestore(ha->host->host_lock, flags);
1228 return status;
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239static void
1240qla1280_done(struct scsi_qla_host *ha)
1241{
1242 struct srb *sp;
1243 struct list_head *done_q;
1244 int bus, target, lun;
1245 struct scsi_cmnd *cmd;
1246
1247 ENTER("qla1280_done");
1248
1249 done_q = &ha->done_q;
1250
1251 while (!list_empty(done_q)) {
1252 sp = list_entry(done_q->next, struct srb, list);
1253
1254 list_del(&sp->list);
1255
1256 cmd = sp->cmd;
1257 bus = SCSI_BUS_32(cmd);
1258 target = SCSI_TCN_32(cmd);
1259 lun = SCSI_LUN_32(cmd);
1260
1261 switch ((CMD_RESULT(cmd) >> 16)) {
1262 case DID_RESET:
1263
1264 if (!ha->flags.abort_isp_active)
1265 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1266 break;
1267 case DID_ABORT:
1268 sp->flags &= ~SRB_ABORT_PENDING;
1269 sp->flags |= SRB_ABORTED;
1270 break;
1271 default:
1272 break;
1273 }
1274
1275
1276 scsi_dma_unmap(cmd);
1277
1278
1279 ha->actthreads--;
1280
1281 if (sp->wait == NULL)
1282 (*(cmd)->scsi_done)(cmd);
1283 else
1284 complete(sp->wait);
1285 }
1286 LEAVE("qla1280_done");
1287}
1288
1289
1290
1291
1292static int
1293qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1294{
1295 int host_status = DID_ERROR;
1296 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1297 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1298 uint32_t residual_length = le32_to_cpu(sts->residual_length);
1299 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1300#if DEBUG_QLA1280_INTR
1301 static char *reason[] = {
1302 "DID_OK",
1303 "DID_NO_CONNECT",
1304 "DID_BUS_BUSY",
1305 "DID_TIME_OUT",
1306 "DID_BAD_TARGET",
1307 "DID_ABORT",
1308 "DID_PARITY",
1309 "DID_ERROR",
1310 "DID_RESET",
1311 "DID_BAD_INTR"
1312 };
1313#endif
1314
1315 ENTER("qla1280_return_status");
1316
1317#if DEBUG_QLA1280_INTR
1318
1319
1320
1321
1322#endif
1323
1324 switch (comp_status) {
1325 case CS_COMPLETE:
1326 host_status = DID_OK;
1327 break;
1328
1329 case CS_INCOMPLETE:
1330 if (!(state_flags & SF_GOT_BUS))
1331 host_status = DID_NO_CONNECT;
1332 else if (!(state_flags & SF_GOT_TARGET))
1333 host_status = DID_BAD_TARGET;
1334 else if (!(state_flags & SF_SENT_CDB))
1335 host_status = DID_ERROR;
1336 else if (!(state_flags & SF_TRANSFERRED_DATA))
1337 host_status = DID_ERROR;
1338 else if (!(state_flags & SF_GOT_STATUS))
1339 host_status = DID_ERROR;
1340 else if (!(state_flags & SF_GOT_SENSE))
1341 host_status = DID_ERROR;
1342 break;
1343
1344 case CS_RESET:
1345 host_status = DID_RESET;
1346 break;
1347
1348 case CS_ABORTED:
1349 host_status = DID_ABORT;
1350 break;
1351
1352 case CS_TIMEOUT:
1353 host_status = DID_TIME_OUT;
1354 break;
1355
1356 case CS_DATA_OVERRUN:
1357 dprintk(2, "Data overrun 0x%x\n", residual_length);
1358 dprintk(2, "qla1280_return_status: response packet data\n");
1359 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1360 host_status = DID_ERROR;
1361 break;
1362
1363 case CS_DATA_UNDERRUN:
1364 if ((scsi_bufflen(cp) - residual_length) <
1365 cp->underflow) {
1366 printk(KERN_WARNING
1367 "scsi: Underflow detected - retrying "
1368 "command.\n");
1369 host_status = DID_ERROR;
1370 } else {
1371 scsi_set_resid(cp, residual_length);
1372 host_status = DID_OK;
1373 }
1374 break;
1375
1376 default:
1377 host_status = DID_ERROR;
1378 break;
1379 }
1380
1381#if DEBUG_QLA1280_INTR
1382 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1383 reason[host_status], scsi_status);
1384#endif
1385
1386 LEAVE("qla1280_return_status");
1387
1388 return (scsi_status & 0xff) | (host_status << 16);
1389}
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405static int
1406qla1280_initialize_adapter(struct scsi_qla_host *ha)
1407{
1408 struct device_reg __iomem *reg;
1409 int status;
1410 int bus;
1411 unsigned long flags;
1412
1413 ENTER("qla1280_initialize_adapter");
1414
1415
1416 ha->flags.online = 0;
1417 ha->flags.disable_host_adapter = 0;
1418 ha->flags.reset_active = 0;
1419 ha->flags.abort_isp_active = 0;
1420
1421
1422 if (IS_ISP1040(ha))
1423 driver_setup.no_nvram = 1;
1424
1425 dprintk(1, "Configure PCI space for adapter...\n");
1426
1427 reg = ha->iobase;
1428
1429
1430 WRT_REG_WORD(®->semaphore, 0);
1431 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
1432 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT);
1433 RD_REG_WORD(®->host_cmd);
1434
1435 if (qla1280_read_nvram(ha)) {
1436 dprintk(2, "qla1280_initialize_adapter: failed to read "
1437 "NVRAM\n");
1438 }
1439
1440
1441
1442
1443
1444
1445 spin_lock_irqsave(ha->host->host_lock, flags);
1446
1447 status = qla1280_load_firmware(ha);
1448 if (status) {
1449 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1450 ha->host_no);
1451 goto out;
1452 }
1453
1454
1455 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1456 qla1280_nvram_config(ha);
1457
1458 if (ha->flags.disable_host_adapter) {
1459 status = 1;
1460 goto out;
1461 }
1462
1463 status = qla1280_init_rings(ha);
1464 if (status)
1465 goto out;
1466
1467
1468 for (bus = 0; bus < ha->ports; bus++) {
1469 if (!ha->bus_settings[bus].disable_scsi_reset &&
1470 qla1280_bus_reset(ha, bus) &&
1471 qla1280_bus_reset(ha, bus))
1472 ha->bus_settings[bus].scsi_bus_dead = 1;
1473 }
1474
1475 ha->flags.online = 1;
1476 out:
1477 spin_unlock_irqrestore(ha->host->host_lock, flags);
1478
1479 if (status)
1480 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1481
1482 LEAVE("qla1280_initialize_adapter");
1483 return status;
1484}
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498static const struct firmware *
1499qla1280_request_firmware(struct scsi_qla_host *ha)
1500{
1501 const struct firmware *fw;
1502 int err;
1503 int index;
1504 char *fwname;
1505
1506 spin_unlock_irq(ha->host->host_lock);
1507 mutex_lock(&qla1280_firmware_mutex);
1508
1509 index = ql1280_board_tbl[ha->devnum].fw_index;
1510 fw = qla1280_fw_tbl[index].fw;
1511 if (fw)
1512 goto out;
1513
1514 fwname = qla1280_fw_tbl[index].fwname;
1515 err = request_firmware(&fw, fwname, &ha->pdev->dev);
1516
1517 if (err) {
1518 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1519 fwname, err);
1520 fw = ERR_PTR(err);
1521 goto unlock;
1522 }
1523 if ((fw->size % 2) || (fw->size < 6)) {
1524 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
1525 fw->size, fwname);
1526 release_firmware(fw);
1527 fw = ERR_PTR(-EINVAL);
1528 goto unlock;
1529 }
1530
1531 qla1280_fw_tbl[index].fw = fw;
1532
1533 out:
1534 ha->fwver1 = fw->data[0];
1535 ha->fwver2 = fw->data[1];
1536 ha->fwver3 = fw->data[2];
1537 unlock:
1538 mutex_unlock(&qla1280_firmware_mutex);
1539 spin_lock_irq(ha->host->host_lock);
1540 return fw;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553static int
1554qla1280_chip_diag(struct scsi_qla_host *ha)
1555{
1556 uint16_t mb[MAILBOX_REGISTER_COUNT];
1557 struct device_reg __iomem *reg = ha->iobase;
1558 int status = 0;
1559 int cnt;
1560 uint16_t data;
1561 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l);
1562
1563 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1564
1565
1566 WRT_REG_WORD(®->ictrl, ISP_RESET);
1567
1568
1569
1570
1571
1572
1573
1574
1575 udelay(20);
1576 data = qla1280_debounce_register(®->ictrl);
1577
1578
1579
1580 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1581 udelay(5);
1582 data = RD_REG_WORD(®->ictrl);
1583 }
1584
1585 if (!cnt)
1586 goto fail;
1587
1588
1589 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1590
1591 WRT_REG_WORD(®->cfg_1, 0);
1592
1593
1594
1595 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
1596 HC_RELEASE_RISC | HC_DISABLE_BIOS);
1597
1598 RD_REG_WORD(®->id_l);
1599 data = qla1280_debounce_register(®->mailbox0);
1600
1601
1602
1603
1604 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1605 udelay(5);
1606 data = RD_REG_WORD(®->mailbox0);
1607 }
1608
1609 if (!cnt)
1610 goto fail;
1611
1612
1613 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1614
1615 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
1616 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
1617 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
1618 RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
1619 RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
1620 printk(KERN_INFO "qla1280: Wrong product ID = "
1621 "0x%x,0x%x,0x%x,0x%x\n",
1622 RD_REG_WORD(®->mailbox1),
1623 RD_REG_WORD(®->mailbox2),
1624 RD_REG_WORD(®->mailbox3),
1625 RD_REG_WORD(®->mailbox4));
1626 goto fail;
1627 }
1628
1629
1630
1631
1632 qla1280_enable_intrs(ha);
1633
1634 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1635
1636 mb[0] = MBC_MAILBOX_REGISTER_TEST;
1637 mb[1] = 0xAAAA;
1638 mb[2] = 0x5555;
1639 mb[3] = 0xAA55;
1640 mb[4] = 0x55AA;
1641 mb[5] = 0xA5A5;
1642 mb[6] = 0x5A5A;
1643 mb[7] = 0x2525;
1644
1645 status = qla1280_mailbox_command(ha, 0xff, mb);
1646 if (status)
1647 goto fail;
1648
1649 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1650 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1651 mb[7] != 0x2525) {
1652 printk(KERN_INFO "qla1280: Failed mbox check\n");
1653 goto fail;
1654 }
1655
1656 dprintk(3, "qla1280_chip_diag: exiting normally\n");
1657 return 0;
1658 fail:
1659 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1660 return status;
1661}
1662
1663static int
1664qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1665{
1666
1667
1668 const struct firmware *fw;
1669 const __le16 *fw_data;
1670 uint16_t risc_address, risc_code_size;
1671 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1672 int err = 0;
1673
1674 fw = qla1280_request_firmware(ha);
1675 if (IS_ERR(fw))
1676 return PTR_ERR(fw);
1677
1678 fw_data = (const __le16 *)&fw->data[0];
1679 ha->fwstart = __le16_to_cpu(fw_data[2]);
1680
1681
1682 risc_address = ha->fwstart;
1683 fw_data = (const __le16 *)&fw->data[6];
1684 risc_code_size = (fw->size - 6) / 2;
1685
1686 for (i = 0; i < risc_code_size; i++) {
1687 mb[0] = MBC_WRITE_RAM_WORD;
1688 mb[1] = risc_address + i;
1689 mb[2] = __le16_to_cpu(fw_data[i]);
1690
1691 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1692 if (err) {
1693 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1694 ha->host_no);
1695 break;
1696 }
1697 }
1698
1699 return err;
1700}
1701
1702#ifdef QLA_64BIT_PTR
1703#define LOAD_CMD MBC_LOAD_RAM_A64_ROM
1704#define DUMP_CMD MBC_DUMP_RAM_A64_ROM
1705#define CMD_ARGS (BIT_7 | BIT_6 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
1706#else
1707#define LOAD_CMD MBC_LOAD_RAM
1708#define DUMP_CMD MBC_DUMP_RAM
1709#define CMD_ARGS (BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
1710#endif
1711
1712#define DUMP_IT_BACK 0
1713static int
1714qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1715{
1716
1717 const struct firmware *fw;
1718 const __le16 *fw_data;
1719 uint16_t risc_address, risc_code_size;
1720 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1721 int err = 0, num, i;
1722#if DUMP_IT_BACK
1723 uint8_t *sp, *tbuf;
1724 dma_addr_t p_tbuf;
1725
1726 tbuf = dma_alloc_coherent(&ha->pdev->dev, 8000, &p_tbuf, GFP_KERNEL);
1727 if (!tbuf)
1728 return -ENOMEM;
1729#endif
1730
1731 fw = qla1280_request_firmware(ha);
1732 if (IS_ERR(fw))
1733 return PTR_ERR(fw);
1734
1735 fw_data = (const __le16 *)&fw->data[0];
1736 ha->fwstart = __le16_to_cpu(fw_data[2]);
1737
1738
1739 risc_address = ha->fwstart;
1740 fw_data = (const __le16 *)&fw->data[6];
1741 risc_code_size = (fw->size - 6) / 2;
1742
1743 dprintk(1, "%s: DMA RISC code (%i) words\n",
1744 __func__, risc_code_size);
1745
1746 num = 0;
1747 while (risc_code_size > 0) {
1748 int warn __attribute__((unused)) = 0;
1749
1750 cnt = 2000 >> 1;
1751
1752 if (cnt > risc_code_size)
1753 cnt = risc_code_size;
1754
1755 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1756 "%d,%d(0x%x)\n",
1757 fw_data, cnt, num, risc_address);
1758 for(i = 0; i < cnt; i++)
1759 ((__le16 *)ha->request_ring)[i] = fw_data[i];
1760
1761 mb[0] = LOAD_CMD;
1762 mb[1] = risc_address;
1763 mb[4] = cnt;
1764 mb[3] = ha->request_dma & 0xffff;
1765 mb[2] = (ha->request_dma >> 16) & 0xffff;
1766 mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
1767 mb[6] = upper_32_bits(ha->request_dma) >> 16;
1768 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1769 __func__, mb[0],
1770 (void *)(long)ha->request_dma,
1771 mb[6], mb[7], mb[2], mb[3]);
1772 err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
1773 if (err) {
1774 printk(KERN_ERR "scsi(%li): Failed to load partial "
1775 "segment of f\n", ha->host_no);
1776 goto out;
1777 }
1778
1779#if DUMP_IT_BACK
1780 mb[0] = DUMP_CMD;
1781 mb[1] = risc_address;
1782 mb[4] = cnt;
1783 mb[3] = p_tbuf & 0xffff;
1784 mb[2] = (p_tbuf >> 16) & 0xffff;
1785 mb[7] = upper_32_bits(p_tbuf) & 0xffff;
1786 mb[6] = upper_32_bits(p_tbuf) >> 16;
1787
1788 err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
1789 if (err) {
1790 printk(KERN_ERR
1791 "Failed to dump partial segment of f/w\n");
1792 goto out;
1793 }
1794 sp = (uint8_t *)ha->request_ring;
1795 for (i = 0; i < (cnt << 1); i++) {
1796 if (tbuf[i] != sp[i] && warn++ < 10) {
1797 printk(KERN_ERR "%s: FW compare error @ "
1798 "byte(0x%x) loop#=%x\n",
1799 __func__, i, num);
1800 printk(KERN_ERR "%s: FWbyte=%x "
1801 "FWfromChip=%x\n",
1802 __func__, sp[i], tbuf[i]);
1803
1804 }
1805 }
1806#endif
1807 risc_address += cnt;
1808 risc_code_size = risc_code_size - cnt;
1809 fw_data = fw_data + cnt;
1810 num++;
1811 }
1812
1813 out:
1814#if DUMP_IT_BACK
1815 dma_free_coherent(&ha->pdev->dev, 8000, tbuf, p_tbuf);
1816#endif
1817 return err;
1818}
1819
1820static int
1821qla1280_start_firmware(struct scsi_qla_host *ha)
1822{
1823 uint16_t mb[MAILBOX_REGISTER_COUNT];
1824 int err;
1825
1826 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1827 __func__);
1828
1829
1830 mb[0] = MBC_VERIFY_CHECKSUM;
1831
1832 mb[1] = ha->fwstart;
1833 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1834 if (err) {
1835 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1836 return err;
1837 }
1838
1839
1840 dprintk(1, "%s: start firmware running.\n", __func__);
1841 mb[0] = MBC_EXECUTE_FIRMWARE;
1842 mb[1] = ha->fwstart;
1843 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1844 if (err) {
1845 printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1846 ha->host_no);
1847 }
1848
1849 return err;
1850}
1851
1852static int
1853qla1280_load_firmware(struct scsi_qla_host *ha)
1854{
1855
1856 int err;
1857
1858 err = qla1280_chip_diag(ha);
1859 if (err)
1860 goto out;
1861 if (IS_ISP1040(ha))
1862 err = qla1280_load_firmware_pio(ha);
1863 else
1864 err = qla1280_load_firmware_dma(ha);
1865 if (err)
1866 goto out;
1867 err = qla1280_start_firmware(ha);
1868 out:
1869 return err;
1870}
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885static int
1886qla1280_init_rings(struct scsi_qla_host *ha)
1887{
1888 uint16_t mb[MAILBOX_REGISTER_COUNT];
1889 int status = 0;
1890
1891 ENTER("qla1280_init_rings");
1892
1893
1894 memset(ha->outstanding_cmds, 0,
1895 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1896
1897
1898 ha->request_ring_ptr = ha->request_ring;
1899 ha->req_ring_index = 0;
1900 ha->req_q_cnt = REQUEST_ENTRY_CNT;
1901
1902 mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1903 mb[1] = REQUEST_ENTRY_CNT;
1904 mb[3] = ha->request_dma & 0xffff;
1905 mb[2] = (ha->request_dma >> 16) & 0xffff;
1906 mb[4] = 0;
1907 mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
1908 mb[6] = upper_32_bits(ha->request_dma) >> 16;
1909 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1910 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1911 &mb[0]))) {
1912
1913 ha->response_ring_ptr = ha->response_ring;
1914 ha->rsp_ring_index = 0;
1915
1916 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1917 mb[1] = RESPONSE_ENTRY_CNT;
1918 mb[3] = ha->response_dma & 0xffff;
1919 mb[2] = (ha->response_dma >> 16) & 0xffff;
1920 mb[5] = 0;
1921 mb[7] = upper_32_bits(ha->response_dma) & 0xffff;
1922 mb[6] = upper_32_bits(ha->response_dma) >> 16;
1923 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1924 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1925 &mb[0]);
1926 }
1927
1928 if (status)
1929 dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1930
1931 LEAVE("qla1280_init_rings");
1932 return status;
1933}
1934
1935static void
1936qla1280_print_settings(struct nvram *nv)
1937{
1938 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1939 nv->bus[0].config_1.initiator_id);
1940 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1941 nv->bus[1].config_1.initiator_id);
1942
1943 dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1944 nv->bus[0].bus_reset_delay);
1945 dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1946 nv->bus[1].bus_reset_delay);
1947
1948 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1949 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1950 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1951 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1952
1953 dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1954 nv->bus[0].config_2.async_data_setup_time);
1955 dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1956 nv->bus[1].config_2.async_data_setup_time);
1957
1958 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1959 nv->bus[0].config_2.req_ack_active_negation);
1960 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1961 nv->bus[1].config_2.req_ack_active_negation);
1962
1963 dprintk(1, "qla1280 : data line active negation[0]=%d\n",
1964 nv->bus[0].config_2.data_line_active_negation);
1965 dprintk(1, "qla1280 : data line active negation[1]=%d\n",
1966 nv->bus[1].config_2.data_line_active_negation);
1967
1968 dprintk(1, "qla1280 : disable loading risc code=%d\n",
1969 nv->cntr_flags_1.disable_loading_risc_code);
1970
1971 dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
1972 nv->cntr_flags_1.enable_64bit_addressing);
1973
1974 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
1975 nv->bus[0].selection_timeout);
1976 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
1977 nv->bus[1].selection_timeout);
1978
1979 dprintk(1, "qla1280 : max queue depth[0]=%d\n",
1980 nv->bus[0].max_queue_depth);
1981 dprintk(1, "qla1280 : max queue depth[1]=%d\n",
1982 nv->bus[1].max_queue_depth);
1983}
1984
1985static void
1986qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
1987{
1988 struct nvram *nv = &ha->nvram;
1989
1990 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
1991 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
1992 nv->bus[bus].target[target].parameter.tag_queuing = 1;
1993 nv->bus[bus].target[target].parameter.enable_sync = 1;
1994#if 1
1995 nv->bus[bus].target[target].parameter.enable_wide = 1;
1996#endif
1997 nv->bus[bus].target[target].execution_throttle =
1998 nv->bus[bus].max_queue_depth - 1;
1999 nv->bus[bus].target[target].parameter.parity_checking = 1;
2000 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
2001
2002 if (IS_ISP1x160(ha)) {
2003 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
2004 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
2005 nv->bus[bus].target[target].sync_period = 9;
2006 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
2007 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
2008 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
2009 } else {
2010 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
2011 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
2012 nv->bus[bus].target[target].sync_period = 10;
2013 }
2014}
2015
2016static void
2017qla1280_set_defaults(struct scsi_qla_host *ha)
2018{
2019 struct nvram *nv = &ha->nvram;
2020 int bus, target;
2021
2022 dprintk(1, "Using defaults for NVRAM: \n");
2023 memset(nv, 0, sizeof(struct nvram));
2024
2025
2026 nv->firmware_feature.f.enable_fast_posting = 1;
2027 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2028 nv->termination.scsi_bus_0_control = 3;
2029 nv->termination.scsi_bus_1_control = 3;
2030 nv->termination.auto_term_support = 1;
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040 nv->isp_config.burst_enable = 1;
2041 if (IS_ISP1040(ha))
2042 nv->isp_config.fifo_threshold |= 3;
2043 else
2044 nv->isp_config.fifo_threshold |= 4;
2045
2046 if (IS_ISP1x160(ha))
2047 nv->isp_parameter = 0x01;
2048
2049 for (bus = 0; bus < MAX_BUSES; bus++) {
2050 nv->bus[bus].config_1.initiator_id = 7;
2051 nv->bus[bus].config_2.req_ack_active_negation = 1;
2052 nv->bus[bus].config_2.data_line_active_negation = 1;
2053 nv->bus[bus].selection_timeout = 250;
2054 nv->bus[bus].max_queue_depth = 32;
2055
2056 if (IS_ISP1040(ha)) {
2057 nv->bus[bus].bus_reset_delay = 3;
2058 nv->bus[bus].config_2.async_data_setup_time = 6;
2059 nv->bus[bus].retry_delay = 1;
2060 } else {
2061 nv->bus[bus].bus_reset_delay = 5;
2062 nv->bus[bus].config_2.async_data_setup_time = 8;
2063 }
2064
2065 for (target = 0; target < MAX_TARGETS; target++)
2066 qla1280_set_target_defaults(ha, bus, target);
2067 }
2068}
2069
2070static int
2071qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2072{
2073 struct nvram *nv = &ha->nvram;
2074 uint16_t mb[MAILBOX_REGISTER_COUNT];
2075 int status, lun;
2076 uint16_t flag;
2077
2078
2079 mb[0] = MBC_SET_TARGET_PARAMETERS;
2080 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2081
2082
2083
2084
2085
2086
2087 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2088 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2089
2090 if (IS_ISP1x160(ha))
2091 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2092 else
2093 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2094 mb[3] |= nv->bus[bus].target[target].sync_period;
2095 status = qla1280_mailbox_command(ha, 0x0f, mb);
2096
2097
2098 flag = (BIT_0 << target);
2099 if (nv->bus[bus].target[target].parameter.tag_queuing)
2100 ha->bus_settings[bus].qtag_enables |= flag;
2101
2102
2103 if (IS_ISP1x160(ha)) {
2104 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2105 ha->bus_settings[bus].device_enables |= flag;
2106 ha->bus_settings[bus].lun_disables |= 0;
2107 } else {
2108 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2109 ha->bus_settings[bus].device_enables |= flag;
2110
2111 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2112 ha->bus_settings[bus].lun_disables |= flag;
2113 }
2114
2115
2116 for (lun = 0; lun < MAX_LUNS; lun++) {
2117 mb[0] = MBC_SET_DEVICE_QUEUE;
2118 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2119 mb[1] |= lun;
2120 mb[2] = nv->bus[bus].max_queue_depth;
2121 mb[3] = nv->bus[bus].target[target].execution_throttle;
2122 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2123 }
2124
2125 return status;
2126}
2127
2128static int
2129qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2130{
2131 struct nvram *nv = &ha->nvram;
2132 uint16_t mb[MAILBOX_REGISTER_COUNT];
2133 int target, status;
2134
2135
2136 ha->bus_settings[bus].disable_scsi_reset =
2137 nv->bus[bus].config_1.scsi_reset_disable;
2138
2139
2140 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2141 mb[0] = MBC_SET_INITIATOR_ID;
2142 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2143 ha->bus_settings[bus].id;
2144 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2145
2146
2147 ha->bus_settings[bus].bus_reset_delay =
2148 nv->bus[bus].bus_reset_delay;
2149
2150
2151 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2152
2153
2154 for (target = 0; target < MAX_TARGETS; target++)
2155 status |= qla1280_config_target(ha, bus, target);
2156
2157 return status;
2158}
2159
2160static int
2161qla1280_nvram_config(struct scsi_qla_host *ha)
2162{
2163 struct device_reg __iomem *reg = ha->iobase;
2164 struct nvram *nv = &ha->nvram;
2165 int bus, target, status = 0;
2166 uint16_t mb[MAILBOX_REGISTER_COUNT];
2167
2168 ENTER("qla1280_nvram_config");
2169
2170 if (ha->nvram_valid) {
2171
2172 for (bus = 0; bus < MAX_BUSES; bus++)
2173 for (target = 0; target < MAX_TARGETS; target++) {
2174 nv->bus[bus].target[target].parameter.
2175 auto_request_sense = 1;
2176 }
2177 } else {
2178 qla1280_set_defaults(ha);
2179 }
2180
2181 qla1280_print_settings(nv);
2182
2183
2184 ha->flags.disable_risc_code_load =
2185 nv->cntr_flags_1.disable_loading_risc_code;
2186
2187 if (IS_ISP1040(ha)) {
2188 uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
2189
2190 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
2191
2192 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2193 cdma_conf = RD_REG_WORD(®->cdma_cfg);
2194 ddma_conf = RD_REG_WORD(®->ddma_cfg);
2195
2196
2197 if (hwrev != ISP_CFG0_1040A)
2198 cfg1 |= nv->isp_config.fifo_threshold << 4;
2199
2200 cfg1 |= nv->isp_config.burst_enable << 2;
2201 WRT_REG_WORD(®->cfg_1, cfg1);
2202
2203 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2204 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2205 } else {
2206 uint16_t cfg1, term;
2207
2208
2209 cfg1 = nv->isp_config.fifo_threshold << 4;
2210 cfg1 |= nv->isp_config.burst_enable << 2;
2211
2212 if (ha->ports > 1)
2213 cfg1 |= BIT_13;
2214 WRT_REG_WORD(®->cfg_1, cfg1);
2215
2216
2217 WRT_REG_WORD(®->gpio_enable,
2218 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2219 term = nv->termination.scsi_bus_1_control;
2220 term |= nv->termination.scsi_bus_0_control << 2;
2221 term |= nv->termination.auto_term_support << 7;
2222 RD_REG_WORD(®->id_l);
2223 WRT_REG_WORD(®->gpio_data, term);
2224 }
2225 RD_REG_WORD(®->id_l);
2226
2227
2228 mb[0] = MBC_SET_SYSTEM_PARAMETER;
2229 mb[1] = nv->isp_parameter;
2230 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2231
2232 if (IS_ISP1x40(ha)) {
2233
2234 mb[0] = MBC_SET_CLOCK_RATE;
2235 mb[1] = 40;
2236 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2237 }
2238
2239
2240 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2241 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2242 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2243 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2244 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2245
2246
2247 mb[0] = MBC_SET_RETRY_COUNT;
2248 mb[1] = nv->bus[0].retry_count;
2249 mb[2] = nv->bus[0].retry_delay;
2250 mb[6] = nv->bus[1].retry_count;
2251 mb[7] = nv->bus[1].retry_delay;
2252 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2253 BIT_1 | BIT_0, &mb[0]);
2254
2255
2256 mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2257 mb[1] = nv->bus[0].config_2.async_data_setup_time;
2258 mb[2] = nv->bus[1].config_2.async_data_setup_time;
2259 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2260
2261
2262 mb[0] = MBC_SET_ACTIVE_NEGATION;
2263 mb[1] = 0;
2264 if (nv->bus[0].config_2.req_ack_active_negation)
2265 mb[1] |= BIT_5;
2266 if (nv->bus[0].config_2.data_line_active_negation)
2267 mb[1] |= BIT_4;
2268 mb[2] = 0;
2269 if (nv->bus[1].config_2.req_ack_active_negation)
2270 mb[2] |= BIT_5;
2271 if (nv->bus[1].config_2.data_line_active_negation)
2272 mb[2] |= BIT_4;
2273 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2274
2275 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2276 mb[1] = 2;
2277 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2278
2279
2280 mb[0] = MBC_SET_PCI_CONTROL;
2281 mb[1] = BIT_1;
2282 mb[2] = BIT_1;
2283 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2284
2285 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2286 mb[1] = 8;
2287 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2288
2289
2290 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2291 mb[1] = nv->bus[0].selection_timeout;
2292 mb[2] = nv->bus[1].selection_timeout;
2293 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2294
2295 for (bus = 0; bus < ha->ports; bus++)
2296 status |= qla1280_config_bus(ha, bus);
2297
2298 if (status)
2299 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2300
2301 LEAVE("qla1280_nvram_config");
2302 return status;
2303}
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317static uint16_t
2318qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2319{
2320 uint32_t nv_cmd;
2321 uint16_t data;
2322
2323 nv_cmd = address << 16;
2324 nv_cmd |= NV_READ_OP;
2325
2326 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2327
2328 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2329 "0x%x", data);
2330
2331 return data;
2332}
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348static uint16_t
2349qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2350{
2351 struct device_reg __iomem *reg = ha->iobase;
2352 int cnt;
2353 uint16_t data = 0;
2354 uint16_t reg_data;
2355
2356
2357
2358 nv_cmd <<= 5;
2359 for (cnt = 0; cnt < 11; cnt++) {
2360 if (nv_cmd & BIT_31)
2361 qla1280_nv_write(ha, NV_DATA_OUT);
2362 else
2363 qla1280_nv_write(ha, 0);
2364 nv_cmd <<= 1;
2365 }
2366
2367
2368
2369 for (cnt = 0; cnt < 16; cnt++) {
2370 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK));
2371 RD_REG_WORD(®->id_l);
2372 NVRAM_DELAY();
2373 data <<= 1;
2374 reg_data = RD_REG_WORD(®->nvram);
2375 if (reg_data & NV_DATA_IN)
2376 data |= BIT_0;
2377 WRT_REG_WORD(®->nvram, NV_SELECT);
2378 RD_REG_WORD(®->id_l);
2379 NVRAM_DELAY();
2380 }
2381
2382
2383
2384 WRT_REG_WORD(®->nvram, NV_DESELECT);
2385 RD_REG_WORD(®->id_l);
2386 NVRAM_DELAY();
2387
2388 return data;
2389}
2390
2391static void
2392qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2393{
2394 struct device_reg __iomem *reg = ha->iobase;
2395
2396 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2397 RD_REG_WORD(®->id_l);
2398 NVRAM_DELAY();
2399 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK);
2400 RD_REG_WORD(®->id_l);
2401 NVRAM_DELAY();
2402 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2403 RD_REG_WORD(®->id_l);
2404 NVRAM_DELAY();
2405}
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422static int
2423qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2424{
2425 struct device_reg __iomem *reg = ha->iobase;
2426 int status = 0;
2427 int cnt;
2428 uint16_t *optr, *iptr;
2429 uint16_t __iomem *mptr;
2430 uint16_t data;
2431 DECLARE_COMPLETION_ONSTACK(wait);
2432
2433 ENTER("qla1280_mailbox_command");
2434
2435 if (ha->mailbox_wait) {
2436 printk(KERN_ERR "Warning mailbox wait already in use!\n");
2437 }
2438 ha->mailbox_wait = &wait;
2439
2440
2441
2442
2443
2444
2445 mptr = (uint16_t __iomem *) ®->mailbox0;
2446 iptr = mb;
2447 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2448 if (mr & BIT_0) {
2449 WRT_REG_WORD(mptr, (*iptr));
2450 }
2451
2452 mr >>= 1;
2453 mptr++;
2454 iptr++;
2455 }
2456
2457
2458
2459
2460 timer_setup(&ha->mailbox_timer, qla1280_mailbox_timeout, 0);
2461 mod_timer(&ha->mailbox_timer, jiffies + 20 * HZ);
2462
2463 spin_unlock_irq(ha->host->host_lock);
2464 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
2465 data = qla1280_debounce_register(®->istatus);
2466
2467 wait_for_completion(&wait);
2468 del_timer_sync(&ha->mailbox_timer);
2469
2470 spin_lock_irq(ha->host->host_lock);
2471
2472 ha->mailbox_wait = NULL;
2473
2474
2475 if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2476 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2477 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2478 "0x%04x\n",
2479 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus));
2480 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2481 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1),
2482 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3));
2483 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2484 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5),
2485 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7));
2486 status = 1;
2487 }
2488
2489
2490 optr = mb;
2491 iptr = (uint16_t *) &ha->mailbox_out[0];
2492 mr = MAILBOX_REGISTER_COUNT;
2493 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2494
2495 if (ha->flags.reset_marker)
2496 qla1280_rst_aen(ha);
2497
2498 if (status)
2499 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2500 "0x%x ****\n", mb[0]);
2501
2502 LEAVE("qla1280_mailbox_command");
2503 return status;
2504}
2505
2506
2507
2508
2509
2510
2511
2512
2513static void
2514qla1280_poll(struct scsi_qla_host *ha)
2515{
2516 struct device_reg __iomem *reg = ha->iobase;
2517 uint16_t data;
2518 LIST_HEAD(done_q);
2519
2520
2521
2522
2523 data = RD_REG_WORD(®->istatus);
2524 if (data & RISC_INT)
2525 qla1280_isr(ha, &done_q);
2526
2527 if (!ha->mailbox_wait) {
2528 if (ha->flags.reset_marker)
2529 qla1280_rst_aen(ha);
2530 }
2531
2532 if (!list_empty(&done_q))
2533 qla1280_done(ha);
2534
2535
2536}
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549static int
2550qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2551{
2552 uint16_t mb[MAILBOX_REGISTER_COUNT];
2553 uint16_t reset_delay;
2554 int status;
2555
2556 dprintk(3, "qla1280_bus_reset: entered\n");
2557
2558 if (qla1280_verbose)
2559 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2560 ha->host_no, bus);
2561
2562 reset_delay = ha->bus_settings[bus].bus_reset_delay;
2563 mb[0] = MBC_BUS_RESET;
2564 mb[1] = reset_delay;
2565 mb[2] = (uint16_t) bus;
2566 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2567
2568 if (status) {
2569 if (ha->bus_settings[bus].failed_reset_count > 2)
2570 ha->bus_settings[bus].scsi_bus_dead = 1;
2571 ha->bus_settings[bus].failed_reset_count++;
2572 } else {
2573 spin_unlock_irq(ha->host->host_lock);
2574 ssleep(reset_delay);
2575 spin_lock_irq(ha->host->host_lock);
2576
2577 ha->bus_settings[bus].scsi_bus_dead = 0;
2578 ha->bus_settings[bus].failed_reset_count = 0;
2579 ha->bus_settings[bus].reset_marker = 0;
2580
2581 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2582 }
2583
2584
2585
2586
2587
2588
2589 if (status)
2590 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2591 else
2592 dprintk(3, "qla1280_bus_reset: exiting normally\n");
2593
2594 return status;
2595}
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609static int
2610qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2611{
2612 uint16_t mb[MAILBOX_REGISTER_COUNT];
2613 int status;
2614
2615 ENTER("qla1280_device_reset");
2616
2617 mb[0] = MBC_ABORT_TARGET;
2618 mb[1] = (bus ? (target | BIT_7) : target) << 8;
2619 mb[2] = 1;
2620 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2621
2622
2623 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2624
2625 if (status)
2626 dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2627
2628 LEAVE("qla1280_device_reset");
2629 return status;
2630}
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643static int
2644qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2645{
2646 uint16_t mb[MAILBOX_REGISTER_COUNT];
2647 unsigned int bus, target, lun;
2648 int status;
2649
2650 ENTER("qla1280_abort_command");
2651
2652 bus = SCSI_BUS_32(sp->cmd);
2653 target = SCSI_TCN_32(sp->cmd);
2654 lun = SCSI_LUN_32(sp->cmd);
2655
2656 sp->flags |= SRB_ABORT_PENDING;
2657
2658 mb[0] = MBC_ABORT_COMMAND;
2659 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2660 mb[2] = handle >> 16;
2661 mb[3] = handle & 0xffff;
2662 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2663
2664 if (status) {
2665 dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2666 sp->flags &= ~SRB_ABORT_PENDING;
2667 }
2668
2669
2670 LEAVE("qla1280_abort_command");
2671 return status;
2672}
2673
2674
2675
2676
2677
2678
2679
2680
2681static void
2682qla1280_reset_adapter(struct scsi_qla_host *ha)
2683{
2684 struct device_reg __iomem *reg = ha->iobase;
2685
2686 ENTER("qla1280_reset_adapter");
2687
2688
2689 ha->flags.online = 0;
2690 WRT_REG_WORD(®->ictrl, ISP_RESET);
2691 WRT_REG_WORD(®->host_cmd,
2692 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2693 RD_REG_WORD(®->id_l);
2694
2695 LEAVE("qla1280_reset_adapter");
2696}
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709static void
2710qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2711{
2712 struct mrk_entry *pkt;
2713
2714 ENTER("qla1280_marker");
2715
2716
2717 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2718 pkt->entry_type = MARKER_TYPE;
2719 pkt->lun = (uint8_t) lun;
2720 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2721 pkt->modifier = type;
2722 pkt->entry_status = 0;
2723
2724
2725 qla1280_isp_cmd(ha);
2726 }
2727
2728 LEAVE("qla1280_marker");
2729}
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744#ifdef QLA_64BIT_PTR
2745static int
2746qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2747{
2748 struct device_reg __iomem *reg = ha->iobase;
2749 struct scsi_cmnd *cmd = sp->cmd;
2750 cmd_a64_entry_t *pkt;
2751 __le32 *dword_ptr;
2752 dma_addr_t dma_handle;
2753 int status = 0;
2754 int cnt;
2755 int req_cnt;
2756 int seg_cnt;
2757 u8 dir;
2758
2759 ENTER("qla1280_64bit_start_scsi:");
2760
2761
2762 req_cnt = 1;
2763 seg_cnt = scsi_dma_map(cmd);
2764 if (seg_cnt > 0) {
2765 if (seg_cnt > 2) {
2766 req_cnt += (seg_cnt - 2) / 5;
2767 if ((seg_cnt - 2) % 5)
2768 req_cnt++;
2769 }
2770 } else if (seg_cnt < 0) {
2771 status = 1;
2772 goto out;
2773 }
2774
2775 if ((req_cnt + 2) >= ha->req_q_cnt) {
2776
2777 cnt = RD_REG_WORD(®->mailbox4);
2778 if (ha->req_ring_index < cnt)
2779 ha->req_q_cnt = cnt - ha->req_ring_index;
2780 else
2781 ha->req_q_cnt =
2782 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2783 }
2784
2785 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2786 ha->req_q_cnt, seg_cnt);
2787
2788
2789 if ((req_cnt + 2) >= ha->req_q_cnt) {
2790 status = SCSI_MLQUEUE_HOST_BUSY;
2791 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2792 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2793 req_cnt);
2794 goto out;
2795 }
2796
2797
2798 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2799 ha->outstanding_cmds[cnt] != NULL; cnt++);
2800
2801 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2802 status = SCSI_MLQUEUE_HOST_BUSY;
2803 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2804 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2805 goto out;
2806 }
2807
2808 ha->outstanding_cmds[cnt] = sp;
2809 ha->req_q_cnt -= req_cnt;
2810 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2811
2812 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2813 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2814 dprintk(2, " bus %i, target %i, lun %i\n",
2815 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2816 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2817
2818
2819
2820
2821 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2822
2823 pkt->entry_type = COMMAND_A64_TYPE;
2824 pkt->entry_count = (uint8_t) req_cnt;
2825 pkt->sys_define = (uint8_t) ha->req_ring_index;
2826 pkt->entry_status = 0;
2827 pkt->handle = cpu_to_le32(cnt);
2828
2829
2830 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2831
2832
2833 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2834
2835
2836 pkt->lun = SCSI_LUN_32(cmd);
2837 pkt->target = SCSI_BUS_32(cmd) ?
2838 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2839
2840
2841 if (cmd->device->simple_tags)
2842 pkt->control_flags |= cpu_to_le16(BIT_3);
2843
2844
2845 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2846 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2847
2848
2849
2850 dir = qla1280_data_direction(cmd);
2851 pkt->control_flags |= cpu_to_le16(dir);
2852
2853
2854 pkt->dseg_count = cpu_to_le16(seg_cnt);
2855
2856
2857
2858
2859 if (seg_cnt) {
2860 struct scatterlist *sg, *s;
2861 int remseg = seg_cnt;
2862
2863 sg = scsi_sglist(cmd);
2864
2865
2866 dword_ptr = (u32 *)&pkt->dseg_0_address;
2867
2868
2869 for_each_sg(sg, s, seg_cnt, cnt) {
2870 if (cnt == 2)
2871 break;
2872
2873 dma_handle = sg_dma_address(s);
2874 *dword_ptr++ =
2875 cpu_to_le32(lower_32_bits(dma_handle));
2876 *dword_ptr++ =
2877 cpu_to_le32(upper_32_bits(dma_handle));
2878 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2879 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2880 cpu_to_le32(upper_32_bits(dma_handle)),
2881 cpu_to_le32(lower_32_bits(dma_handle)),
2882 cpu_to_le32(sg_dma_len(sg_next(s))));
2883 remseg--;
2884 }
2885 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2886 "command packet data - b %i, t %i, l %i \n",
2887 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2888 SCSI_LUN_32(cmd));
2889 qla1280_dump_buffer(5, (char *)pkt,
2890 REQUEST_ENTRY_SIZE);
2891
2892
2893
2894
2895 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2896 "remains\n", seg_cnt);
2897
2898 while (remseg > 0) {
2899
2900 sg = s;
2901
2902 ha->req_ring_index++;
2903 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2904 ha->req_ring_index = 0;
2905 ha->request_ring_ptr =
2906 ha->request_ring;
2907 } else
2908 ha->request_ring_ptr++;
2909
2910 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2911
2912
2913 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2914
2915
2916 ((struct cont_a64_entry *) pkt)->entry_type =
2917 CONTINUE_A64_TYPE;
2918 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2919 ((struct cont_a64_entry *) pkt)->sys_define =
2920 (uint8_t)ha->req_ring_index;
2921
2922 dword_ptr =
2923 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2924
2925
2926 for_each_sg(sg, s, remseg, cnt) {
2927 if (cnt == 5)
2928 break;
2929 dma_handle = sg_dma_address(s);
2930 *dword_ptr++ =
2931 cpu_to_le32(lower_32_bits(dma_handle));
2932 *dword_ptr++ =
2933 cpu_to_le32(upper_32_bits(dma_handle));
2934 *dword_ptr++ =
2935 cpu_to_le32(sg_dma_len(s));
2936 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2937 cpu_to_le32(upper_32_bits(dma_handle)),
2938 cpu_to_le32(lower_32_bits(dma_handle)),
2939 cpu_to_le32(sg_dma_len(s)));
2940 }
2941 remseg -= cnt;
2942 dprintk(5, "qla1280_64bit_start_scsi: "
2943 "continuation packet data - b %i, t "
2944 "%i, l %i \n", SCSI_BUS_32(cmd),
2945 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2946 qla1280_dump_buffer(5, (char *)pkt,
2947 REQUEST_ENTRY_SIZE);
2948 }
2949 } else {
2950 dprintk(5, "qla1280_64bit_start_scsi: No data, command "
2951 "packet data - b %i, t %i, l %i \n",
2952 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2953 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
2954 }
2955
2956 ha->req_ring_index++;
2957 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2958 ha->req_ring_index = 0;
2959 ha->request_ring_ptr = ha->request_ring;
2960 } else
2961 ha->request_ring_ptr++;
2962
2963
2964 dprintk(2,
2965 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
2966 sp->flags |= SRB_SENT;
2967 ha->actthreads++;
2968 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
2969
2970 out:
2971 if (status)
2972 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
2973 else
2974 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
2975
2976 return status;
2977}
2978#else
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999static int
3000qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3001{
3002 struct device_reg __iomem *reg = ha->iobase;
3003 struct scsi_cmnd *cmd = sp->cmd;
3004 struct cmd_entry *pkt;
3005 __le32 *dword_ptr;
3006 int status = 0;
3007 int cnt;
3008 int req_cnt;
3009 int seg_cnt;
3010 u8 dir;
3011
3012 ENTER("qla1280_32bit_start_scsi");
3013
3014 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3015 cmd->cmnd[0]);
3016
3017
3018 req_cnt = 1;
3019 seg_cnt = scsi_dma_map(cmd);
3020 if (seg_cnt) {
3021
3022
3023
3024
3025 if (seg_cnt > 4) {
3026 req_cnt += (seg_cnt - 4) / 7;
3027 if ((seg_cnt - 4) % 7)
3028 req_cnt++;
3029 }
3030 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3031 cmd, seg_cnt, req_cnt);
3032 } else if (seg_cnt < 0) {
3033 status = 1;
3034 goto out;
3035 }
3036
3037 if ((req_cnt + 2) >= ha->req_q_cnt) {
3038
3039 cnt = RD_REG_WORD(®->mailbox4);
3040 if (ha->req_ring_index < cnt)
3041 ha->req_q_cnt = cnt - ha->req_ring_index;
3042 else
3043 ha->req_q_cnt =
3044 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3045 }
3046
3047 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3048 ha->req_q_cnt, seg_cnt);
3049
3050 if ((req_cnt + 2) >= ha->req_q_cnt) {
3051 status = SCSI_MLQUEUE_HOST_BUSY;
3052 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3053 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3054 ha->req_q_cnt, req_cnt);
3055 goto out;
3056 }
3057
3058
3059 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3060 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3061
3062 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3063 status = SCSI_MLQUEUE_HOST_BUSY;
3064 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3065 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3066 goto out;
3067 }
3068
3069 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3070 ha->outstanding_cmds[cnt] = sp;
3071 ha->req_q_cnt -= req_cnt;
3072
3073
3074
3075
3076 pkt = (struct cmd_entry *) ha->request_ring_ptr;
3077
3078 pkt->entry_type = COMMAND_TYPE;
3079 pkt->entry_count = (uint8_t) req_cnt;
3080 pkt->sys_define = (uint8_t) ha->req_ring_index;
3081 pkt->entry_status = 0;
3082 pkt->handle = cpu_to_le32(cnt);
3083
3084
3085 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3086
3087
3088 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3089
3090
3091 pkt->lun = SCSI_LUN_32(cmd);
3092 pkt->target = SCSI_BUS_32(cmd) ?
3093 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3094
3095
3096 if (cmd->device->simple_tags)
3097 pkt->control_flags |= cpu_to_le16(BIT_3);
3098
3099
3100 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3101 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3102
3103
3104
3105 dir = qla1280_data_direction(cmd);
3106 pkt->control_flags |= cpu_to_le16(dir);
3107
3108
3109 pkt->dseg_count = cpu_to_le16(seg_cnt);
3110
3111
3112
3113
3114 if (seg_cnt) {
3115 struct scatterlist *sg, *s;
3116 int remseg = seg_cnt;
3117
3118 sg = scsi_sglist(cmd);
3119
3120
3121 dword_ptr = &pkt->dseg_0_address;
3122
3123 dprintk(3, "Building S/G data segments..\n");
3124 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3125
3126
3127 for_each_sg(sg, s, seg_cnt, cnt) {
3128 if (cnt == 4)
3129 break;
3130 *dword_ptr++ =
3131 cpu_to_le32(lower_32_bits(sg_dma_address(s)));
3132 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3133 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3134 (lower_32_bits(sg_dma_address(s))),
3135 (sg_dma_len(s)));
3136 remseg--;
3137 }
3138
3139
3140
3141 dprintk(3, "S/G Building Continuation"
3142 "...seg_cnt=0x%x remains\n", seg_cnt);
3143 while (remseg > 0) {
3144
3145 sg = s;
3146
3147 ha->req_ring_index++;
3148 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3149 ha->req_ring_index = 0;
3150 ha->request_ring_ptr =
3151 ha->request_ring;
3152 } else
3153 ha->request_ring_ptr++;
3154
3155 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3156
3157
3158 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3159
3160
3161 ((struct cont_entry *) pkt)->
3162 entry_type = CONTINUE_TYPE;
3163 ((struct cont_entry *) pkt)->entry_count = 1;
3164
3165 ((struct cont_entry *) pkt)->sys_define =
3166 (uint8_t) ha->req_ring_index;
3167
3168
3169 dword_ptr =
3170 &((struct cont_entry *) pkt)->dseg_0_address;
3171
3172
3173 for_each_sg(sg, s, remseg, cnt) {
3174 if (cnt == 7)
3175 break;
3176 *dword_ptr++ =
3177 cpu_to_le32(lower_32_bits(sg_dma_address(s)));
3178 *dword_ptr++ =
3179 cpu_to_le32(sg_dma_len(s));
3180 dprintk(1,
3181 "S/G Segment Cont. phys_addr=0x%x, "
3182 "len=0x%x\n",
3183 cpu_to_le32(lower_32_bits(sg_dma_address(s))),
3184 cpu_to_le32(sg_dma_len(s)));
3185 }
3186 remseg -= cnt;
3187 dprintk(5, "qla1280_32bit_start_scsi: "
3188 "continuation packet data - "
3189 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3190 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3191 qla1280_dump_buffer(5, (char *)pkt,
3192 REQUEST_ENTRY_SIZE);
3193 }
3194 } else {
3195 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3196 "packet data - \n");
3197 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3198 }
3199 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3200 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3201 REQUEST_ENTRY_SIZE);
3202
3203
3204 ha->req_ring_index++;
3205 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3206 ha->req_ring_index = 0;
3207 ha->request_ring_ptr = ha->request_ring;
3208 } else
3209 ha->request_ring_ptr++;
3210
3211
3212 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3213 "for pending command\n");
3214 sp->flags |= SRB_SENT;
3215 ha->actthreads++;
3216 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3217
3218out:
3219 if (status)
3220 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3221
3222 LEAVE("qla1280_32bit_start_scsi");
3223
3224 return status;
3225}
3226#endif
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239static request_t *
3240qla1280_req_pkt(struct scsi_qla_host *ha)
3241{
3242 struct device_reg __iomem *reg = ha->iobase;
3243 request_t *pkt = NULL;
3244 int cnt;
3245 uint32_t timer;
3246
3247 ENTER("qla1280_req_pkt");
3248
3249
3250
3251
3252
3253 for (timer = 15000000; timer; timer--) {
3254 if (ha->req_q_cnt > 0) {
3255
3256 cnt = RD_REG_WORD(®->mailbox4);
3257 if (ha->req_ring_index < cnt)
3258 ha->req_q_cnt = cnt - ha->req_ring_index;
3259 else
3260 ha->req_q_cnt =
3261 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3262 }
3263
3264
3265 if (ha->req_q_cnt > 0) {
3266 ha->req_q_cnt--;
3267 pkt = ha->request_ring_ptr;
3268
3269
3270 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3271
3272
3273
3274
3275
3276
3277 pkt->sys_define = (uint8_t) ha->req_ring_index;
3278
3279
3280 pkt->entry_count = 1;
3281
3282 break;
3283 }
3284
3285 udelay(2);
3286
3287
3288 qla1280_poll(ha);
3289 }
3290
3291 if (!pkt)
3292 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3293 else
3294 dprintk(3, "qla1280_req_pkt: exiting normally\n");
3295
3296 return pkt;
3297}
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307static void
3308qla1280_isp_cmd(struct scsi_qla_host *ha)
3309{
3310 struct device_reg __iomem *reg = ha->iobase;
3311
3312 ENTER("qla1280_isp_cmd");
3313
3314 dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3315 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3316 REQUEST_ENTRY_SIZE);
3317
3318
3319 ha->req_ring_index++;
3320 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3321 ha->req_ring_index = 0;
3322 ha->request_ring_ptr = ha->request_ring;
3323 } else
3324 ha->request_ring_ptr++;
3325
3326
3327
3328
3329 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3330
3331 LEAVE("qla1280_isp_cmd");
3332}
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346static void
3347qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3348{
3349 struct device_reg __iomem *reg = ha->iobase;
3350 struct response *pkt;
3351 struct srb *sp = NULL;
3352 uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3353 uint16_t *wptr;
3354 uint32_t index;
3355 u16 istatus;
3356
3357 ENTER("qla1280_isr");
3358
3359 istatus = RD_REG_WORD(®->istatus);
3360 if (!(istatus & (RISC_INT | PCI_INT)))
3361 return;
3362
3363
3364 mailbox[5] = RD_REG_WORD(®->mailbox5);
3365
3366
3367
3368 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore);
3369
3370 if (mailbox[0] & BIT_0) {
3371
3372
3373
3374 wptr = &mailbox[0];
3375 *wptr++ = RD_REG_WORD(®->mailbox0);
3376 *wptr++ = RD_REG_WORD(®->mailbox1);
3377 *wptr = RD_REG_WORD(®->mailbox2);
3378 if (mailbox[0] != MBA_SCSI_COMPLETION) {
3379 wptr++;
3380 *wptr++ = RD_REG_WORD(®->mailbox3);
3381 *wptr++ = RD_REG_WORD(®->mailbox4);
3382 wptr++;
3383 *wptr++ = RD_REG_WORD(®->mailbox6);
3384 *wptr = RD_REG_WORD(®->mailbox7);
3385 }
3386
3387
3388
3389 WRT_REG_WORD(®->semaphore, 0);
3390 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3391
3392 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3393 mailbox[0]);
3394
3395
3396 switch (mailbox[0]) {
3397 case MBA_SCSI_COMPLETION:
3398 dprintk(5, "qla1280_isr: mailbox SCSI response "
3399 "completion\n");
3400
3401 if (ha->flags.online) {
3402
3403 index = mailbox[2] << 16 | mailbox[1];
3404
3405
3406 if (index < MAX_OUTSTANDING_COMMANDS)
3407 sp = ha->outstanding_cmds[index];
3408 else
3409 sp = NULL;
3410
3411 if (sp) {
3412
3413 ha->outstanding_cmds[index] = NULL;
3414
3415
3416 CMD_RESULT(sp->cmd) = 0;
3417 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3418
3419
3420 list_add_tail(&sp->list, done_q);
3421 } else {
3422
3423
3424
3425 printk(KERN_WARNING
3426 "qla1280: ISP invalid handle\n");
3427 }
3428 }
3429 break;
3430
3431 case MBA_BUS_RESET:
3432 ha->flags.reset_marker = 1;
3433 index = mailbox[6] & BIT_0;
3434 ha->bus_settings[index].reset_marker = 1;
3435
3436 printk(KERN_DEBUG "qla1280_isr(): index %i "
3437 "asynchronous BUS_RESET\n", index);
3438 break;
3439
3440 case MBA_SYSTEM_ERR:
3441 printk(KERN_WARNING
3442 "qla1280: ISP System Error - mbx1=%xh, mbx2="
3443 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3444 mailbox[3]);
3445 break;
3446
3447 case MBA_REQ_TRANSFER_ERR:
3448 printk(KERN_WARNING
3449 "qla1280: ISP Request Transfer Error\n");
3450 break;
3451
3452 case MBA_RSP_TRANSFER_ERR:
3453 printk(KERN_WARNING
3454 "qla1280: ISP Response Transfer Error\n");
3455 break;
3456
3457 case MBA_WAKEUP_THRES:
3458 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3459 break;
3460
3461 case MBA_TIMEOUT_RESET:
3462 dprintk(2,
3463 "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3464 break;
3465
3466 case MBA_DEVICE_RESET:
3467 printk(KERN_INFO "qla1280_isr(): asynchronous "
3468 "BUS_DEVICE_RESET\n");
3469
3470 ha->flags.reset_marker = 1;
3471 index = mailbox[6] & BIT_0;
3472 ha->bus_settings[index].reset_marker = 1;
3473 break;
3474
3475 case MBA_BUS_MODE_CHANGE:
3476 dprintk(2,
3477 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3478 break;
3479
3480 default:
3481
3482 if (mailbox[0] < MBA_ASYNC_EVENT) {
3483 wptr = &mailbox[0];
3484 memcpy((uint16_t *) ha->mailbox_out, wptr,
3485 MAILBOX_REGISTER_COUNT *
3486 sizeof(uint16_t));
3487
3488 if(ha->mailbox_wait != NULL)
3489 complete(ha->mailbox_wait);
3490 }
3491 break;
3492 }
3493 } else {
3494 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3495 }
3496
3497
3498
3499
3500
3501 if (!(ha->flags.online && !ha->mailbox_wait)) {
3502 dprintk(2, "qla1280_isr: Response pointer Error\n");
3503 goto out;
3504 }
3505
3506 if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3507 goto out;
3508
3509 while (ha->rsp_ring_index != mailbox[5]) {
3510 pkt = ha->response_ring_ptr;
3511
3512 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3513 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3514 dprintk(5,"qla1280_isr: response packet data\n");
3515 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3516
3517 if (pkt->entry_type == STATUS_TYPE) {
3518 if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3519 || pkt->comp_status || pkt->entry_status) {
3520 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3521 "0x%x mailbox[5] = 0x%x, comp_status "
3522 "= 0x%x, scsi_status = 0x%x\n",
3523 ha->rsp_ring_index, mailbox[5],
3524 le16_to_cpu(pkt->comp_status),
3525 le16_to_cpu(pkt->scsi_status));
3526 }
3527 } else {
3528 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3529 "0x%x, mailbox[5] = 0x%x\n",
3530 ha->rsp_ring_index, mailbox[5]);
3531 dprintk(2, "qla1280_isr: response packet data\n");
3532 qla1280_dump_buffer(2, (char *)pkt,
3533 RESPONSE_ENTRY_SIZE);
3534 }
3535
3536 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3537 dprintk(2, "status: Cmd %p, handle %i\n",
3538 ha->outstanding_cmds[pkt->handle]->cmd,
3539 pkt->handle);
3540 if (pkt->entry_type == STATUS_TYPE)
3541 qla1280_status_entry(ha, pkt, done_q);
3542 else
3543 qla1280_error_entry(ha, pkt, done_q);
3544
3545 ha->rsp_ring_index++;
3546 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3547 ha->rsp_ring_index = 0;
3548 ha->response_ring_ptr = ha->response_ring;
3549 } else
3550 ha->response_ring_ptr++;
3551 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index);
3552 }
3553 }
3554
3555 out:
3556 LEAVE("qla1280_isr");
3557}
3558
3559
3560
3561
3562
3563
3564
3565
3566static void
3567qla1280_rst_aen(struct scsi_qla_host *ha)
3568{
3569 uint8_t bus;
3570
3571 ENTER("qla1280_rst_aen");
3572
3573 if (ha->flags.online && !ha->flags.reset_active &&
3574 !ha->flags.abort_isp_active) {
3575 ha->flags.reset_active = 1;
3576 while (ha->flags.reset_marker) {
3577
3578 ha->flags.reset_marker = 0;
3579 for (bus = 0; bus < ha->ports &&
3580 !ha->flags.reset_marker; bus++) {
3581 if (ha->bus_settings[bus].reset_marker) {
3582 ha->bus_settings[bus].reset_marker = 0;
3583 qla1280_marker(ha, bus, 0, 0,
3584 MK_SYNC_ALL);
3585 }
3586 }
3587 }
3588 }
3589
3590 LEAVE("qla1280_rst_aen");
3591}
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603static void
3604qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3605 struct list_head *done_q)
3606{
3607 unsigned int bus, target, lun;
3608 int sense_sz;
3609 struct srb *sp;
3610 struct scsi_cmnd *cmd;
3611 uint32_t handle = le32_to_cpu(pkt->handle);
3612 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3613 uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3614
3615 ENTER("qla1280_status_entry");
3616
3617
3618 if (handle < MAX_OUTSTANDING_COMMANDS)
3619 sp = ha->outstanding_cmds[handle];
3620 else
3621 sp = NULL;
3622
3623 if (!sp) {
3624 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3625 goto out;
3626 }
3627
3628
3629 ha->outstanding_cmds[handle] = NULL;
3630
3631 cmd = sp->cmd;
3632
3633
3634 bus = SCSI_BUS_32(cmd);
3635 target = SCSI_TCN_32(cmd);
3636 lun = SCSI_LUN_32(cmd);
3637
3638 if (comp_status || scsi_status) {
3639 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3640 "0x%x, handle = 0x%x\n", comp_status,
3641 scsi_status, handle);
3642 }
3643
3644
3645 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3646 (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3647 CMD_RESULT(cmd) = scsi_status & 0xff;
3648 } else {
3649
3650
3651 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3652
3653 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3654 if (comp_status != CS_ARS_FAILED) {
3655 uint16_t req_sense_length =
3656 le16_to_cpu(pkt->req_sense_length);
3657 if (req_sense_length < CMD_SNSLEN(cmd))
3658 sense_sz = req_sense_length;
3659 else
3660
3661
3662
3663
3664
3665 sense_sz = CMD_SNSLEN(cmd) - 1;
3666
3667 memcpy(cmd->sense_buffer,
3668 &pkt->req_sense_data, sense_sz);
3669 } else
3670 sense_sz = 0;
3671 memset(cmd->sense_buffer + sense_sz, 0,
3672 SCSI_SENSE_BUFFERSIZE - sense_sz);
3673
3674 dprintk(2, "qla1280_status_entry: Check "
3675 "condition Sense data, b %i, t %i, "
3676 "l %i\n", bus, target, lun);
3677 if (sense_sz)
3678 qla1280_dump_buffer(2,
3679 (char *)cmd->sense_buffer,
3680 sense_sz);
3681 }
3682 }
3683
3684 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3685
3686
3687 list_add_tail(&sp->list, done_q);
3688 out:
3689 LEAVE("qla1280_status_entry");
3690}
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701static void
3702qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3703 struct list_head *done_q)
3704{
3705 struct srb *sp;
3706 uint32_t handle = le32_to_cpu(pkt->handle);
3707
3708 ENTER("qla1280_error_entry");
3709
3710 if (pkt->entry_status & BIT_3)
3711 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3712 else if (pkt->entry_status & BIT_2)
3713 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3714 else if (pkt->entry_status & BIT_1)
3715 dprintk(2, "qla1280_error_entry: FULL flag error\n");
3716 else
3717 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3718
3719
3720 if (handle < MAX_OUTSTANDING_COMMANDS)
3721 sp = ha->outstanding_cmds[handle];
3722 else
3723 sp = NULL;
3724
3725 if (sp) {
3726
3727 ha->outstanding_cmds[handle] = NULL;
3728
3729
3730 if (pkt->entry_status & (BIT_3 + BIT_2)) {
3731
3732
3733 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3734 } else if (pkt->entry_status & BIT_1) {
3735 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3736 } else {
3737
3738 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3739 }
3740
3741 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3742
3743
3744 list_add_tail(&sp->list, done_q);
3745 }
3746#ifdef QLA_64BIT_PTR
3747 else if (pkt->entry_type == COMMAND_A64_TYPE) {
3748 printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3749 }
3750#endif
3751
3752 LEAVE("qla1280_error_entry");
3753}
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765static int
3766qla1280_abort_isp(struct scsi_qla_host *ha)
3767{
3768 struct device_reg __iomem *reg = ha->iobase;
3769 struct srb *sp;
3770 int status = 0;
3771 int cnt;
3772 int bus;
3773
3774 ENTER("qla1280_abort_isp");
3775
3776 if (ha->flags.abort_isp_active || !ha->flags.online)
3777 goto out;
3778
3779 ha->flags.abort_isp_active = 1;
3780
3781
3782 qla1280_disable_intrs(ha);
3783 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3784 RD_REG_WORD(®->id_l);
3785
3786 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3787 ha->host_no);
3788
3789 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3790 struct scsi_cmnd *cmd;
3791 sp = ha->outstanding_cmds[cnt];
3792 if (sp) {
3793 cmd = sp->cmd;
3794 CMD_RESULT(cmd) = DID_RESET << 16;
3795 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3796 ha->outstanding_cmds[cnt] = NULL;
3797 list_add_tail(&sp->list, &ha->done_q);
3798 }
3799 }
3800
3801 qla1280_done(ha);
3802
3803 status = qla1280_load_firmware(ha);
3804 if (status)
3805 goto out;
3806
3807
3808 qla1280_nvram_config (ha);
3809
3810 status = qla1280_init_rings(ha);
3811 if (status)
3812 goto out;
3813
3814
3815 for (bus = 0; bus < ha->ports; bus++)
3816 qla1280_bus_reset(ha, bus);
3817
3818 ha->flags.abort_isp_active = 0;
3819 out:
3820 if (status) {
3821 printk(KERN_WARNING
3822 "qla1280: ISP error recovery failed, board disabled");
3823 qla1280_reset_adapter(ha);
3824 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3825 }
3826
3827 LEAVE("qla1280_abort_isp");
3828 return status;
3829}
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842static u16
3843qla1280_debounce_register(volatile u16 __iomem * addr)
3844{
3845 volatile u16 ret;
3846 volatile u16 ret2;
3847
3848 ret = RD_REG_WORD(addr);
3849 ret2 = RD_REG_WORD(addr);
3850
3851 if (ret == ret2)
3852 return ret;
3853
3854 do {
3855 cpu_relax();
3856 ret = RD_REG_WORD(addr);
3857 ret2 = RD_REG_WORD(addr);
3858 } while (ret != ret2);
3859
3860 return ret;
3861}
3862
3863
3864
3865
3866
3867
3868
3869#define SET_SXP_BANK 0x0100
3870#define SCSI_PHASE_INVALID 0x87FF
3871static int
3872qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3873{
3874 uint16_t config_reg, scsi_control;
3875 struct device_reg __iomem *reg = ha->iobase;
3876
3877 if (ha->bus_settings[bus].scsi_bus_dead) {
3878 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3879 config_reg = RD_REG_WORD(®->cfg_1);
3880 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK);
3881 scsi_control = RD_REG_WORD(®->scsiControlPins);
3882 WRT_REG_WORD(®->cfg_1, config_reg);
3883 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC);
3884
3885 if (scsi_control == SCSI_PHASE_INVALID) {
3886 ha->bus_settings[bus].scsi_bus_dead = 1;
3887 return 1;
3888 } else {
3889 ha->bus_settings[bus].scsi_bus_dead = 0;
3890 ha->bus_settings[bus].failed_reset_count = 0;
3891 }
3892 }
3893 return 0;
3894}
3895
3896static void
3897qla1280_get_target_parameters(struct scsi_qla_host *ha,
3898 struct scsi_device *device)
3899{
3900 uint16_t mb[MAILBOX_REGISTER_COUNT];
3901 int bus, target, lun;
3902
3903 bus = device->channel;
3904 target = device->id;
3905 lun = device->lun;
3906
3907
3908 mb[0] = MBC_GET_TARGET_PARAMETERS;
3909 mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3910 mb[1] <<= 8;
3911 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3912 &mb[0]);
3913
3914 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3915
3916 if (mb[3] != 0) {
3917 printk(" Sync: period %d, offset %d",
3918 (mb[3] & 0xff), (mb[3] >> 8));
3919 if (mb[2] & BIT_13)
3920 printk(", Wide");
3921 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
3922 printk(", DT");
3923 } else
3924 printk(" Async");
3925
3926 if (device->simple_tags)
3927 printk(", Tagged queuing: depth %d", device->queue_depth);
3928 printk("\n");
3929}
3930
3931
3932#if DEBUG_QLA1280
3933static void
3934__qla1280_dump_buffer(char *b, int size)
3935{
3936 int cnt;
3937 u8 c;
3938
3939 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
3940 "Bh Ch Dh Eh Fh\n");
3941 printk(KERN_DEBUG "---------------------------------------------"
3942 "------------------\n");
3943
3944 for (cnt = 0; cnt < size;) {
3945 c = *b++;
3946
3947 printk("0x%02x", c);
3948 cnt++;
3949 if (!(cnt % 16))
3950 printk("\n");
3951 else
3952 printk(" ");
3953 }
3954 if (cnt % 16)
3955 printk("\n");
3956}
3957
3958
3959
3960
3961
3962static void
3963__qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
3964{
3965 struct scsi_qla_host *ha;
3966 struct Scsi_Host *host = CMD_HOST(cmd);
3967 struct srb *sp;
3968
3969
3970 int i;
3971 ha = (struct scsi_qla_host *)host->hostdata;
3972
3973 sp = (struct srb *)CMD_SP(cmd);
3974 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
3975 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
3976 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
3977 CMD_CDBLEN(cmd));
3978 printk(" CDB = ");
3979 for (i = 0; i < cmd->cmd_len; i++) {
3980 printk("0x%02x ", cmd->cmnd[i]);
3981 }
3982 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
3983 printk(" request buffer=0x%p, request buffer len=0x%x\n",
3984 scsi_sglist(cmd), scsi_bufflen(cmd));
3985
3986
3987
3988
3989
3990
3991 printk(" tag=%d, transfersize=0x%x \n",
3992 cmd->tag, cmd->transfersize);
3993 printk(" SP=0x%p\n", CMD_SP(cmd));
3994 printk(" underflow size = 0x%x, direction=0x%x\n",
3995 cmd->underflow, cmd->sc_data_direction);
3996}
3997
3998
3999
4000
4001
4002static void
4003ql1280_dump_device(struct scsi_qla_host *ha)
4004{
4005
4006 struct scsi_cmnd *cp;
4007 struct srb *sp;
4008 int i;
4009
4010 printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4011
4012 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4013 if ((sp = ha->outstanding_cmds[i]) == NULL)
4014 continue;
4015 if ((cp = sp->cmd) == NULL)
4016 continue;
4017 qla1280_print_scsi_cmd(1, cp);
4018 }
4019}
4020#endif
4021
4022
4023enum tokens {
4024 TOKEN_NVRAM,
4025 TOKEN_SYNC,
4026 TOKEN_WIDE,
4027 TOKEN_PPR,
4028 TOKEN_VERBOSE,
4029 TOKEN_DEBUG,
4030};
4031
4032struct setup_tokens {
4033 char *token;
4034 int val;
4035};
4036
4037static struct setup_tokens setup_token[] __initdata =
4038{
4039 { "nvram", TOKEN_NVRAM },
4040 { "sync", TOKEN_SYNC },
4041 { "wide", TOKEN_WIDE },
4042 { "ppr", TOKEN_PPR },
4043 { "verbose", TOKEN_VERBOSE },
4044 { "debug", TOKEN_DEBUG },
4045};
4046
4047
4048
4049
4050
4051
4052
4053
4054static int __init
4055qla1280_setup(char *s)
4056{
4057 char *cp, *ptr;
4058 unsigned long val;
4059 int toke;
4060
4061 cp = s;
4062
4063 while (cp && (ptr = strchr(cp, ':'))) {
4064 ptr++;
4065 if (!strcmp(ptr, "yes")) {
4066 val = 0x10000;
4067 ptr += 3;
4068 } else if (!strcmp(ptr, "no")) {
4069 val = 0;
4070 ptr += 2;
4071 } else
4072 val = simple_strtoul(ptr, &ptr, 0);
4073
4074 switch ((toke = qla1280_get_token(cp))) {
4075 case TOKEN_NVRAM:
4076 if (!val)
4077 driver_setup.no_nvram = 1;
4078 break;
4079 case TOKEN_SYNC:
4080 if (!val)
4081 driver_setup.no_sync = 1;
4082 else if (val != 0x10000)
4083 driver_setup.sync_mask = val;
4084 break;
4085 case TOKEN_WIDE:
4086 if (!val)
4087 driver_setup.no_wide = 1;
4088 else if (val != 0x10000)
4089 driver_setup.wide_mask = val;
4090 break;
4091 case TOKEN_PPR:
4092 if (!val)
4093 driver_setup.no_ppr = 1;
4094 else if (val != 0x10000)
4095 driver_setup.ppr_mask = val;
4096 break;
4097 case TOKEN_VERBOSE:
4098 qla1280_verbose = val;
4099 break;
4100 default:
4101 printk(KERN_INFO "qla1280: unknown boot option %s\n",
4102 cp);
4103 }
4104
4105 cp = strchr(ptr, ';');
4106 if (cp)
4107 cp++;
4108 else {
4109 break;
4110 }
4111 }
4112 return 1;
4113}
4114
4115
4116static int __init
4117qla1280_get_token(char *str)
4118{
4119 char *sep;
4120 long ret = -1;
4121 int i;
4122
4123 sep = strchr(str, ':');
4124
4125 if (sep) {
4126 for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4127 if (!strncmp(setup_token[i].token, str, (sep - str))) {
4128 ret = setup_token[i].val;
4129 break;
4130 }
4131 }
4132 }
4133
4134 return ret;
4135}
4136
4137
4138static struct scsi_host_template qla1280_driver_template = {
4139 .module = THIS_MODULE,
4140 .proc_name = "qla1280",
4141 .name = "Qlogic ISP 1280/12160",
4142 .info = qla1280_info,
4143 .slave_configure = qla1280_slave_configure,
4144 .queuecommand = qla1280_queuecommand,
4145 .eh_abort_handler = qla1280_eh_abort,
4146 .eh_device_reset_handler= qla1280_eh_device_reset,
4147 .eh_bus_reset_handler = qla1280_eh_bus_reset,
4148 .eh_host_reset_handler = qla1280_eh_adapter_reset,
4149 .bios_param = qla1280_biosparam,
4150 .can_queue = MAX_OUTSTANDING_COMMANDS,
4151 .this_id = -1,
4152 .sg_tablesize = SG_ALL,
4153};
4154
4155
4156static int
4157qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4158{
4159 int devnum = id->driver_data;
4160 struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4161 struct Scsi_Host *host;
4162 struct scsi_qla_host *ha;
4163 int error = -ENODEV;
4164
4165
4166 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4167 printk(KERN_INFO
4168 "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4169 goto error;
4170 }
4171
4172 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4173 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4174
4175 if (pci_enable_device(pdev)) {
4176 printk(KERN_WARNING
4177 "qla1280: Failed to enabled pci device, aborting.\n");
4178 goto error;
4179 }
4180
4181 pci_set_master(pdev);
4182
4183 error = -ENOMEM;
4184 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4185 if (!host) {
4186 printk(KERN_WARNING
4187 "qla1280: Failed to register host, aborting.\n");
4188 goto error_disable_device;
4189 }
4190
4191 ha = (struct scsi_qla_host *)host->hostdata;
4192 memset(ha, 0, sizeof(struct scsi_qla_host));
4193
4194 ha->pdev = pdev;
4195 ha->devnum = devnum;
4196
4197#ifdef QLA_64BIT_PTR
4198 if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
4199 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
4200 printk(KERN_WARNING "scsi(%li): Unable to set a "
4201 "suitable DMA mask - aborting\n", ha->host_no);
4202 error = -ENODEV;
4203 goto error_put_host;
4204 }
4205 } else
4206 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4207 ha->host_no);
4208#else
4209 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
4210 printk(KERN_WARNING "scsi(%li): Unable to set a "
4211 "suitable DMA mask - aborting\n", ha->host_no);
4212 error = -ENODEV;
4213 goto error_put_host;
4214 }
4215#endif
4216
4217 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
4218 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4219 &ha->request_dma, GFP_KERNEL);
4220 if (!ha->request_ring) {
4221 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4222 goto error_put_host;
4223 }
4224
4225 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
4226 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4227 &ha->response_dma, GFP_KERNEL);
4228 if (!ha->response_ring) {
4229 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4230 goto error_free_request_ring;
4231 }
4232
4233 ha->ports = bdp->numPorts;
4234
4235 ha->host = host;
4236 ha->host_no = host->host_no;
4237
4238 host->irq = pdev->irq;
4239 host->max_channel = bdp->numPorts - 1;
4240 host->max_lun = MAX_LUNS - 1;
4241 host->max_id = MAX_TARGETS;
4242 host->max_sectors = 1024;
4243 host->unique_id = host->host_no;
4244
4245 error = -ENODEV;
4246
4247#if MEMORY_MAPPED_IO
4248 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4249 if (!ha->mmpbase) {
4250 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4251 goto error_free_response_ring;
4252 }
4253
4254 host->base = (unsigned long)ha->mmpbase;
4255 ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4256#else
4257 host->io_port = pci_resource_start(ha->pdev, 0);
4258 if (!request_region(host->io_port, 0xff, "qla1280")) {
4259 printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4260 "0x%04lx-0x%04lx - already in use\n",
4261 host->io_port, host->io_port + 0xff);
4262 goto error_free_response_ring;
4263 }
4264
4265 ha->iobase = (struct device_reg *)host->io_port;
4266#endif
4267
4268 INIT_LIST_HEAD(&ha->done_q);
4269
4270
4271 qla1280_disable_intrs(ha);
4272
4273 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4274 "qla1280", ha)) {
4275 printk("qla1280 : Failed to reserve interrupt %d already "
4276 "in use\n", pdev->irq);
4277 goto error_release_region;
4278 }
4279
4280
4281 if (qla1280_initialize_adapter(ha)) {
4282 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4283 goto error_free_irq;
4284 }
4285
4286
4287 host->this_id = ha->bus_settings[0].id;
4288
4289 pci_set_drvdata(pdev, host);
4290
4291 error = scsi_add_host(host, &pdev->dev);
4292 if (error)
4293 goto error_disable_adapter;
4294 scsi_scan_host(host);
4295
4296 return 0;
4297
4298 error_disable_adapter:
4299 qla1280_disable_intrs(ha);
4300 error_free_irq:
4301 free_irq(pdev->irq, ha);
4302 error_release_region:
4303#if MEMORY_MAPPED_IO
4304 iounmap(ha->mmpbase);
4305#else
4306 release_region(host->io_port, 0xff);
4307#endif
4308 error_free_response_ring:
4309 dma_free_coherent(&ha->pdev->dev,
4310 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4311 ha->response_ring, ha->response_dma);
4312 error_free_request_ring:
4313 dma_free_coherent(&ha->pdev->dev,
4314 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4315 ha->request_ring, ha->request_dma);
4316 error_put_host:
4317 scsi_host_put(host);
4318 error_disable_device:
4319 pci_disable_device(pdev);
4320 error:
4321 return error;
4322}
4323
4324
4325static void
4326qla1280_remove_one(struct pci_dev *pdev)
4327{
4328 struct Scsi_Host *host = pci_get_drvdata(pdev);
4329 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4330
4331 scsi_remove_host(host);
4332
4333 qla1280_disable_intrs(ha);
4334
4335 free_irq(pdev->irq, ha);
4336
4337#if MEMORY_MAPPED_IO
4338 iounmap(ha->mmpbase);
4339#else
4340 release_region(host->io_port, 0xff);
4341#endif
4342
4343 dma_free_coherent(&ha->pdev->dev,
4344 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4345 ha->request_ring, ha->request_dma);
4346 dma_free_coherent(&ha->pdev->dev,
4347 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4348 ha->response_ring, ha->response_dma);
4349
4350 pci_disable_device(pdev);
4351
4352 scsi_host_put(host);
4353}
4354
4355static struct pci_driver qla1280_pci_driver = {
4356 .name = "qla1280",
4357 .id_table = qla1280_pci_tbl,
4358 .probe = qla1280_probe_one,
4359 .remove = qla1280_remove_one,
4360};
4361
4362static int __init
4363qla1280_init(void)
4364{
4365 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4366 printk(KERN_WARNING
4367 "qla1280: struct srb too big, aborting\n");
4368 return -EINVAL;
4369 }
4370
4371#ifdef MODULE
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384 if (qla1280)
4385 qla1280_setup(qla1280);
4386#endif
4387
4388 return pci_register_driver(&qla1280_pci_driver);
4389}
4390
4391static void __exit
4392qla1280_exit(void)
4393{
4394 int i;
4395
4396 pci_unregister_driver(&qla1280_pci_driver);
4397
4398 for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
4399 release_firmware(qla1280_fw_tbl[i].fw);
4400 qla1280_fw_tbl[i].fw = NULL;
4401 }
4402}
4403
4404module_init(qla1280_init);
4405module_exit(qla1280_exit);
4406
4407MODULE_AUTHOR("Qlogic & Jes Sorensen");
4408MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4409MODULE_LICENSE("GPL");
4410MODULE_FIRMWARE("qlogic/1040.bin");
4411MODULE_FIRMWARE("qlogic/1280.bin");
4412MODULE_FIRMWARE("qlogic/12160.bin");
4413MODULE_VERSION(QLA1280_VERSION);
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426