1
2
3
4
5
6
7
8
9
10
11#define QLA1280_VERSION "3.27.1"
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330#include <linux/module.h>
331
332#include <linux/types.h>
333#include <linux/string.h>
334#include <linux/errno.h>
335#include <linux/kernel.h>
336#include <linux/ioport.h>
337#include <linux/delay.h>
338#include <linux/timer.h>
339#include <linux/pci.h>
340#include <linux/proc_fs.h>
341#include <linux/stat.h>
342#include <linux/pci_ids.h>
343#include <linux/interrupt.h>
344#include <linux/init.h>
345#include <linux/dma-mapping.h>
346#include <linux/firmware.h>
347
348#include <asm/io.h>
349#include <asm/irq.h>
350#include <asm/byteorder.h>
351#include <asm/processor.h>
352#include <asm/types.h>
353
354#include <scsi/scsi.h>
355#include <scsi/scsi_cmnd.h>
356#include <scsi/scsi_device.h>
357#include <scsi/scsi_host.h>
358#include <scsi/scsi_tcq.h>
359
360
361
362
363
364
365#define DEBUG_QLA1280_INTR 0
366#define DEBUG_PRINT_NVRAM 0
367#define DEBUG_QLA1280 0
368
369#define MEMORY_MAPPED_IO 1
370
371#include "qla1280.h"
372
373#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
374#define QLA_64BIT_PTR 1
375#endif
376
377#define NVRAM_DELAY() udelay(500)
378
379#define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
380#define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
381 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
382#define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
383 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
384
385
386static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
387static void qla1280_remove_one(struct pci_dev *);
388
389
390
391
392static void qla1280_done(struct scsi_qla_host *);
393static int qla1280_get_token(char *);
394static int qla1280_setup(char *s) __init;
395
396
397
398
399static int qla1280_load_firmware(struct scsi_qla_host *);
400static int qla1280_init_rings(struct scsi_qla_host *);
401static int qla1280_nvram_config(struct scsi_qla_host *);
402static int qla1280_mailbox_command(struct scsi_qla_host *,
403 uint8_t, uint16_t *);
404static int qla1280_bus_reset(struct scsi_qla_host *, int);
405static int qla1280_device_reset(struct scsi_qla_host *, int, int);
406static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
407static int qla1280_abort_isp(struct scsi_qla_host *);
408#ifdef QLA_64BIT_PTR
409static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
410#else
411static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
412#endif
413static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
414static void qla1280_poll(struct scsi_qla_host *);
415static void qla1280_reset_adapter(struct scsi_qla_host *);
416static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
417static void qla1280_isp_cmd(struct scsi_qla_host *);
418static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
419static void qla1280_rst_aen(struct scsi_qla_host *);
420static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
421 struct list_head *);
422static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
423 struct list_head *);
424static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
425static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
426static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
427static request_t *qla1280_req_pkt(struct scsi_qla_host *);
428static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
429 unsigned int);
430static void qla1280_get_target_parameters(struct scsi_qla_host *,
431 struct scsi_device *);
432static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
433
434
435static struct qla_driver_setup driver_setup;
436
437
438
439
440static inline uint16_t
441qla1280_data_direction(struct scsi_cmnd *cmnd)
442{
443 switch(cmnd->sc_data_direction) {
444 case DMA_FROM_DEVICE:
445 return BIT_5;
446 case DMA_TO_DEVICE:
447 return BIT_6;
448 case DMA_BIDIRECTIONAL:
449 return BIT_5 | BIT_6;
450
451
452
453
454
455 case DMA_NONE:
456 default:
457 return 0;
458 }
459}
460
461#if DEBUG_QLA1280
462static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
463static void __qla1280_dump_buffer(char *, int);
464#endif
465
466
467
468
469
470#ifdef MODULE
471static char *qla1280;
472
473
474module_param(qla1280, charp, 0);
475#else
476__setup("qla1280=", qla1280_setup);
477#endif
478
479
480
481
482
483
484
485
486#define CMD_SP(Cmnd) &Cmnd->SCp
487#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
488#define CMD_CDBP(Cmnd) Cmnd->cmnd
489#define CMD_SNSP(Cmnd) Cmnd->sense_buffer
490#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
491#define CMD_RESULT(Cmnd) Cmnd->result
492#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
493
494#define CMD_HOST(Cmnd) Cmnd->device->host
495#define SCSI_BUS_32(Cmnd) Cmnd->device->channel
496#define SCSI_TCN_32(Cmnd) Cmnd->device->id
497#define SCSI_LUN_32(Cmnd) Cmnd->device->lun
498
499
500
501
502
503
504struct qla_boards {
505 char *name;
506 int numPorts;
507 int fw_index;
508};
509
510
511static struct pci_device_id qla1280_pci_tbl[] = {
512 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
513 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
514 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
515 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
516 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
517 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
518 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
519 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
520 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
521 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
522 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
523 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
524 {0,}
525};
526MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
527
528static DEFINE_MUTEX(qla1280_firmware_mutex);
529
530struct qla_fw {
531 char *fwname;
532 const struct firmware *fw;
533};
534
535#define QL_NUM_FW_IMAGES 3
536
537static struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
538 {"qlogic/1040.bin", NULL},
539 {"qlogic/1280.bin", NULL},
540 {"qlogic/12160.bin", NULL},
541};
542
543
544static struct qla_boards ql1280_board_tbl[] = {
545 {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
546 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
547 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
548 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
549 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
550 {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
551 {.name = " ", .numPorts = 0, .fw_index = -1},
552};
553
554static int qla1280_verbose = 1;
555
556#if DEBUG_QLA1280
557static int ql_debug_level = 1;
558#define dprintk(level, format, a...) \
559 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
560#define qla1280_dump_buffer(level, buf, size) \
561 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
562#define qla1280_print_scsi_cmd(level, cmd) \
563 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
564#else
565#define ql_debug_level 0
566#define dprintk(level, format, a...) do{}while(0)
567#define qla1280_dump_buffer(a, b, c) do{}while(0)
568#define qla1280_print_scsi_cmd(a, b) do{}while(0)
569#endif
570
571#define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
572#define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
573#define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
574#define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
575
576
577static int qla1280_read_nvram(struct scsi_qla_host *ha)
578{
579 uint16_t *wptr;
580 uint8_t chksum;
581 int cnt, i;
582 struct nvram *nv;
583
584 ENTER("qla1280_read_nvram");
585
586 if (driver_setup.no_nvram)
587 return 1;
588
589 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
590
591 wptr = (uint16_t *)&ha->nvram;
592 nv = &ha->nvram;
593 chksum = 0;
594 for (cnt = 0; cnt < 3; cnt++) {
595 *wptr = qla1280_get_nvram_word(ha, cnt);
596 chksum += *wptr & 0xff;
597 chksum += (*wptr >> 8) & 0xff;
598 wptr++;
599 }
600
601 if (nv->id0 != 'I' || nv->id1 != 'S' ||
602 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
603 dprintk(2, "Invalid nvram ID or version!\n");
604 chksum = 1;
605 } else {
606 for (; cnt < sizeof(struct nvram); cnt++) {
607 *wptr = qla1280_get_nvram_word(ha, cnt);
608 chksum += *wptr & 0xff;
609 chksum += (*wptr >> 8) & 0xff;
610 wptr++;
611 }
612 }
613
614 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
615 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
616 nv->version);
617
618
619 if (chksum) {
620 if (!driver_setup.no_nvram)
621 printk(KERN_WARNING "scsi(%ld): Unable to identify or "
622 "validate NVRAM checksum, using default "
623 "settings\n", ha->host_no);
624 ha->nvram_valid = 0;
625 } else
626 ha->nvram_valid = 1;
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644 nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
645 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
646 for(i = 0; i < MAX_BUSES; i++) {
647 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
648 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
649 }
650 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
651 LEAVE("qla1280_read_nvram");
652
653 return chksum;
654}
655
656
657
658
659
660static const char *
661qla1280_info(struct Scsi_Host *host)
662{
663 static char qla1280_scsi_name_buffer[125];
664 char *bp;
665 struct scsi_qla_host *ha;
666 struct qla_boards *bdp;
667
668 bp = &qla1280_scsi_name_buffer[0];
669 ha = (struct scsi_qla_host *)host->hostdata;
670 bdp = &ql1280_board_tbl[ha->devnum];
671 memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
672
673 sprintf (bp,
674 "QLogic %s PCI to SCSI Host Adapter\n"
675 " Firmware version: %2d.%02d.%02d, Driver version %s",
676 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
677 QLA1280_VERSION);
678 return bp;
679}
680
681
682
683
684
685
686
687
688
689
690
691
692static int qla1280_queuecommand_lck(struct scsi_cmnd *cmd)
693{
694 struct Scsi_Host *host = cmd->device->host;
695 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
696 struct srb *sp = (struct srb *)CMD_SP(cmd);
697 int status;
698
699 sp->cmd = cmd;
700 sp->flags = 0;
701 sp->wait = NULL;
702 CMD_HANDLE(cmd) = (unsigned char *)NULL;
703
704 qla1280_print_scsi_cmd(5, cmd);
705
706#ifdef QLA_64BIT_PTR
707
708
709
710
711
712
713 status = qla1280_64bit_start_scsi(ha, sp);
714#else
715 status = qla1280_32bit_start_scsi(ha, sp);
716#endif
717 return status;
718}
719
720static DEF_SCSI_QCMD(qla1280_queuecommand)
721
722enum action {
723 ABORT_COMMAND,
724 DEVICE_RESET,
725 BUS_RESET,
726 ADAPTER_RESET,
727};
728
729
730static void qla1280_mailbox_timeout(struct timer_list *t)
731{
732 struct scsi_qla_host *ha = from_timer(ha, t, mailbox_timer);
733 struct device_reg __iomem *reg;
734 reg = ha->iobase;
735
736 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0);
737 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
738 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
739 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus));
740 complete(ha->mailbox_wait);
741}
742
743static int
744_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
745 struct completion *wait)
746{
747 int status = FAILED;
748 struct scsi_cmnd *cmd = sp->cmd;
749
750 spin_unlock_irq(ha->host->host_lock);
751 wait_for_completion_timeout(wait, 4*HZ);
752 spin_lock_irq(ha->host->host_lock);
753 sp->wait = NULL;
754 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
755 status = SUCCESS;
756 scsi_done(cmd);
757 }
758 return status;
759}
760
761static int
762qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
763{
764 DECLARE_COMPLETION_ONSTACK(wait);
765
766 sp->wait = &wait;
767 return _qla1280_wait_for_single_command(ha, sp, &wait);
768}
769
770static int
771qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
772{
773 int cnt;
774 int status;
775 struct srb *sp;
776 struct scsi_cmnd *cmd;
777
778 status = SUCCESS;
779
780
781
782
783
784 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
785 sp = ha->outstanding_cmds[cnt];
786 if (sp) {
787 cmd = sp->cmd;
788
789 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
790 continue;
791 if (target >= 0 && SCSI_TCN_32(cmd) != target)
792 continue;
793
794 status = qla1280_wait_for_single_command(ha, sp);
795 if (status == FAILED)
796 break;
797 }
798 }
799 return status;
800}
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816static int
817qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
818{
819 struct scsi_qla_host *ha;
820 int bus, target, lun;
821 struct srb *sp;
822 int i, found;
823 int result=FAILED;
824 int wait_for_bus=-1;
825 int wait_for_target = -1;
826 DECLARE_COMPLETION_ONSTACK(wait);
827
828 ENTER("qla1280_error_action");
829
830 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
831 sp = (struct srb *)CMD_SP(cmd);
832 bus = SCSI_BUS_32(cmd);
833 target = SCSI_TCN_32(cmd);
834 lun = SCSI_LUN_32(cmd);
835
836 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
837 RD_REG_WORD(&ha->iobase->istatus));
838
839 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
840 RD_REG_WORD(&ha->iobase->host_cmd),
841 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
842
843 if (qla1280_verbose)
844 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
845 "Handle=0x%p, action=0x%x\n",
846 ha->host_no, cmd, CMD_HANDLE(cmd), action);
847
848
849
850
851
852
853
854 found = -1;
855 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
856 if (sp == ha->outstanding_cmds[i]) {
857 found = i;
858 sp->wait = &wait;
859 break;
860 }
861 }
862
863 if (found < 0) {
864 result = SUCCESS;
865 if (qla1280_verbose) {
866 printk(KERN_INFO
867 "scsi(%ld:%d:%d:%d): specified command has "
868 "already completed.\n", ha->host_no, bus,
869 target, lun);
870 }
871 }
872
873 switch (action) {
874
875 case ABORT_COMMAND:
876 dprintk(1, "qla1280: RISC aborting command\n");
877
878
879
880
881
882 if (found >= 0)
883 qla1280_abort_command(ha, sp, found);
884 break;
885
886 case DEVICE_RESET:
887 if (qla1280_verbose)
888 printk(KERN_INFO
889 "scsi(%ld:%d:%d:%d): Queueing device reset "
890 "command.\n", ha->host_no, bus, target, lun);
891 if (qla1280_device_reset(ha, bus, target) == 0) {
892
893 wait_for_bus = bus;
894 wait_for_target = target;
895 }
896 break;
897
898 case BUS_RESET:
899 if (qla1280_verbose)
900 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
901 "reset.\n", ha->host_no, bus);
902 if (qla1280_bus_reset(ha, bus) == 0) {
903
904 wait_for_bus = bus;
905 }
906 break;
907
908 case ADAPTER_RESET:
909 default:
910 if (qla1280_verbose) {
911 printk(KERN_INFO
912 "scsi(%ld): Issued ADAPTER RESET\n",
913 ha->host_no);
914 printk(KERN_INFO "scsi(%ld): I/O processing will "
915 "continue automatically\n", ha->host_no);
916 }
917 ha->flags.reset_active = 1;
918
919 if (qla1280_abort_isp(ha) != 0) {
920 result = FAILED;
921 }
922
923 ha->flags.reset_active = 0;
924 }
925
926
927
928
929
930
931
932
933
934
935 if (found >= 0)
936 result = _qla1280_wait_for_single_command(ha, sp, &wait);
937
938 if (action == ABORT_COMMAND && result != SUCCESS) {
939 printk(KERN_WARNING
940 "scsi(%li:%i:%i:%i): "
941 "Unable to abort command!\n",
942 ha->host_no, bus, target, lun);
943 }
944
945
946
947
948
949
950
951
952
953
954
955 if (result == SUCCESS && wait_for_bus >= 0) {
956 result = qla1280_wait_for_pending_commands(ha,
957 wait_for_bus, wait_for_target);
958 }
959
960 dprintk(1, "RESET returning %d\n", result);
961
962 LEAVE("qla1280_error_action");
963 return result;
964}
965
966
967
968
969
970static int
971qla1280_eh_abort(struct scsi_cmnd * cmd)
972{
973 int rc;
974
975 spin_lock_irq(cmd->device->host->host_lock);
976 rc = qla1280_error_action(cmd, ABORT_COMMAND);
977 spin_unlock_irq(cmd->device->host->host_lock);
978
979 return rc;
980}
981
982
983
984
985
986static int
987qla1280_eh_device_reset(struct scsi_cmnd *cmd)
988{
989 int rc;
990
991 spin_lock_irq(cmd->device->host->host_lock);
992 rc = qla1280_error_action(cmd, DEVICE_RESET);
993 spin_unlock_irq(cmd->device->host->host_lock);
994
995 return rc;
996}
997
998
999
1000
1001
1002static int
1003qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1004{
1005 int rc;
1006
1007 spin_lock_irq(cmd->device->host->host_lock);
1008 rc = qla1280_error_action(cmd, BUS_RESET);
1009 spin_unlock_irq(cmd->device->host->host_lock);
1010
1011 return rc;
1012}
1013
1014
1015
1016
1017
1018static int
1019qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1020{
1021 int rc;
1022
1023 spin_lock_irq(cmd->device->host->host_lock);
1024 rc = qla1280_error_action(cmd, ADAPTER_RESET);
1025 spin_unlock_irq(cmd->device->host->host_lock);
1026
1027 return rc;
1028}
1029
1030static int
1031qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1032 sector_t capacity, int geom[])
1033{
1034 int heads, sectors, cylinders;
1035
1036 heads = 64;
1037 sectors = 32;
1038 cylinders = (unsigned long)capacity / (heads * sectors);
1039 if (cylinders > 1024) {
1040 heads = 255;
1041 sectors = 63;
1042 cylinders = (unsigned long)capacity / (heads * sectors);
1043
1044
1045 }
1046
1047 geom[0] = heads;
1048 geom[1] = sectors;
1049 geom[2] = cylinders;
1050
1051 return 0;
1052}
1053
1054
1055
1056static inline void
1057qla1280_disable_intrs(struct scsi_qla_host *ha)
1058{
1059 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1060 RD_REG_WORD(&ha->iobase->ictrl);
1061}
1062
1063
1064static inline void
1065qla1280_enable_intrs(struct scsi_qla_host *ha)
1066{
1067 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1068 RD_REG_WORD(&ha->iobase->ictrl);
1069}
1070
1071
1072
1073
1074
1075static irqreturn_t
1076qla1280_intr_handler(int irq, void *dev_id)
1077{
1078 struct scsi_qla_host *ha;
1079 struct device_reg __iomem *reg;
1080 u16 data;
1081 int handled = 0;
1082
1083 ENTER_INTR ("qla1280_intr_handler");
1084 ha = (struct scsi_qla_host *)dev_id;
1085
1086 spin_lock(ha->host->host_lock);
1087
1088 ha->isr_count++;
1089 reg = ha->iobase;
1090
1091 qla1280_disable_intrs(ha);
1092
1093 data = qla1280_debounce_register(®->istatus);
1094
1095 if (data & RISC_INT) {
1096 qla1280_isr(ha, &ha->done_q);
1097 handled = 1;
1098 }
1099 if (!list_empty(&ha->done_q))
1100 qla1280_done(ha);
1101
1102 spin_unlock(ha->host->host_lock);
1103
1104 qla1280_enable_intrs(ha);
1105
1106 LEAVE_INTR("qla1280_intr_handler");
1107 return IRQ_RETVAL(handled);
1108}
1109
1110
1111static int
1112qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1113{
1114 uint8_t mr;
1115 uint16_t mb[MAILBOX_REGISTER_COUNT];
1116 struct nvram *nv;
1117 int status, lun;
1118
1119 nv = &ha->nvram;
1120
1121 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1122
1123
1124 mb[0] = MBC_SET_TARGET_PARAMETERS;
1125 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1126 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1127 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1128 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1129 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1130 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1131 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1132 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1133 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1134
1135 if (IS_ISP1x160(ha)) {
1136 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1137 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1138 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1139 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1140 mr |= BIT_6;
1141 } else {
1142 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1143 }
1144 mb[3] |= nv->bus[bus].target[target].sync_period;
1145
1146 status = qla1280_mailbox_command(ha, mr, mb);
1147
1148
1149 for (lun = 0; lun < MAX_LUNS; lun++) {
1150 mb[0] = MBC_SET_DEVICE_QUEUE;
1151 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1152 mb[1] |= lun;
1153 mb[2] = nv->bus[bus].max_queue_depth;
1154 mb[3] = nv->bus[bus].target[target].execution_throttle;
1155 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1156 }
1157
1158 if (status)
1159 printk(KERN_WARNING "scsi(%ld:%i:%i): "
1160 "qla1280_set_target_parameters() failed\n",
1161 ha->host_no, bus, target);
1162 return status;
1163}
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177static int
1178qla1280_slave_configure(struct scsi_device *device)
1179{
1180 struct scsi_qla_host *ha;
1181 int default_depth = 3;
1182 int bus = device->channel;
1183 int target = device->id;
1184 int status = 0;
1185 struct nvram *nv;
1186 unsigned long flags;
1187
1188 ha = (struct scsi_qla_host *)device->host->hostdata;
1189 nv = &ha->nvram;
1190
1191 if (qla1280_check_for_dead_scsi_bus(ha, bus))
1192 return 1;
1193
1194 if (device->tagged_supported &&
1195 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1196 scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat);
1197 } else {
1198 scsi_change_queue_depth(device, default_depth);
1199 }
1200
1201 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1202 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1203 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1204
1205 if (driver_setup.no_sync ||
1206 (driver_setup.sync_mask &&
1207 (~driver_setup.sync_mask & (1 << target))))
1208 nv->bus[bus].target[target].parameter.enable_sync = 0;
1209 if (driver_setup.no_wide ||
1210 (driver_setup.wide_mask &&
1211 (~driver_setup.wide_mask & (1 << target))))
1212 nv->bus[bus].target[target].parameter.enable_wide = 0;
1213 if (IS_ISP1x160(ha)) {
1214 if (driver_setup.no_ppr ||
1215 (driver_setup.ppr_mask &&
1216 (~driver_setup.ppr_mask & (1 << target))))
1217 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1218 }
1219
1220 spin_lock_irqsave(ha->host->host_lock, flags);
1221 if (nv->bus[bus].target[target].parameter.enable_sync)
1222 status = qla1280_set_target_parameters(ha, bus, target);
1223 qla1280_get_target_parameters(ha, device);
1224 spin_unlock_irqrestore(ha->host->host_lock, flags);
1225 return status;
1226}
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236static void
1237qla1280_done(struct scsi_qla_host *ha)
1238{
1239 struct srb *sp;
1240 struct list_head *done_q;
1241 int bus, target;
1242 struct scsi_cmnd *cmd;
1243
1244 ENTER("qla1280_done");
1245
1246 done_q = &ha->done_q;
1247
1248 while (!list_empty(done_q)) {
1249 sp = list_entry(done_q->next, struct srb, list);
1250
1251 list_del(&sp->list);
1252
1253 cmd = sp->cmd;
1254 bus = SCSI_BUS_32(cmd);
1255 target = SCSI_TCN_32(cmd);
1256
1257 switch ((CMD_RESULT(cmd) >> 16)) {
1258 case DID_RESET:
1259
1260 if (!ha->flags.abort_isp_active)
1261 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1262 break;
1263 case DID_ABORT:
1264 sp->flags &= ~SRB_ABORT_PENDING;
1265 sp->flags |= SRB_ABORTED;
1266 break;
1267 default:
1268 break;
1269 }
1270
1271
1272 scsi_dma_unmap(cmd);
1273
1274
1275 ha->actthreads--;
1276
1277 if (sp->wait == NULL)
1278 scsi_done(cmd);
1279 else
1280 complete(sp->wait);
1281 }
1282 LEAVE("qla1280_done");
1283}
1284
1285
1286
1287
1288static int
1289qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1290{
1291 int host_status = DID_ERROR;
1292 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1293 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1294 uint32_t residual_length = le32_to_cpu(sts->residual_length);
1295 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1296#if DEBUG_QLA1280_INTR
1297 static char *reason[] = {
1298 "DID_OK",
1299 "DID_NO_CONNECT",
1300 "DID_BUS_BUSY",
1301 "DID_TIME_OUT",
1302 "DID_BAD_TARGET",
1303 "DID_ABORT",
1304 "DID_PARITY",
1305 "DID_ERROR",
1306 "DID_RESET",
1307 "DID_BAD_INTR"
1308 };
1309#endif
1310
1311 ENTER("qla1280_return_status");
1312
1313#if DEBUG_QLA1280_INTR
1314
1315
1316
1317
1318#endif
1319
1320 switch (comp_status) {
1321 case CS_COMPLETE:
1322 host_status = DID_OK;
1323 break;
1324
1325 case CS_INCOMPLETE:
1326 if (!(state_flags & SF_GOT_BUS))
1327 host_status = DID_NO_CONNECT;
1328 else if (!(state_flags & SF_GOT_TARGET))
1329 host_status = DID_BAD_TARGET;
1330 else if (!(state_flags & SF_SENT_CDB))
1331 host_status = DID_ERROR;
1332 else if (!(state_flags & SF_TRANSFERRED_DATA))
1333 host_status = DID_ERROR;
1334 else if (!(state_flags & SF_GOT_STATUS))
1335 host_status = DID_ERROR;
1336 else if (!(state_flags & SF_GOT_SENSE))
1337 host_status = DID_ERROR;
1338 break;
1339
1340 case CS_RESET:
1341 host_status = DID_RESET;
1342 break;
1343
1344 case CS_ABORTED:
1345 host_status = DID_ABORT;
1346 break;
1347
1348 case CS_TIMEOUT:
1349 host_status = DID_TIME_OUT;
1350 break;
1351
1352 case CS_DATA_OVERRUN:
1353 dprintk(2, "Data overrun 0x%x\n", residual_length);
1354 dprintk(2, "qla1280_return_status: response packet data\n");
1355 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1356 host_status = DID_ERROR;
1357 break;
1358
1359 case CS_DATA_UNDERRUN:
1360 if ((scsi_bufflen(cp) - residual_length) <
1361 cp->underflow) {
1362 printk(KERN_WARNING
1363 "scsi: Underflow detected - retrying "
1364 "command.\n");
1365 host_status = DID_ERROR;
1366 } else {
1367 scsi_set_resid(cp, residual_length);
1368 host_status = DID_OK;
1369 }
1370 break;
1371
1372 default:
1373 host_status = DID_ERROR;
1374 break;
1375 }
1376
1377#if DEBUG_QLA1280_INTR
1378 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1379 reason[host_status], scsi_status);
1380#endif
1381
1382 LEAVE("qla1280_return_status");
1383
1384 return (scsi_status & 0xff) | (host_status << 16);
1385}
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401static int
1402qla1280_initialize_adapter(struct scsi_qla_host *ha)
1403{
1404 struct device_reg __iomem *reg;
1405 int status;
1406 int bus;
1407 unsigned long flags;
1408
1409 ENTER("qla1280_initialize_adapter");
1410
1411
1412 ha->flags.online = 0;
1413 ha->flags.disable_host_adapter = 0;
1414 ha->flags.reset_active = 0;
1415 ha->flags.abort_isp_active = 0;
1416
1417
1418 if (IS_ISP1040(ha))
1419 driver_setup.no_nvram = 1;
1420
1421 dprintk(1, "Configure PCI space for adapter...\n");
1422
1423 reg = ha->iobase;
1424
1425
1426 WRT_REG_WORD(®->semaphore, 0);
1427 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
1428 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT);
1429 RD_REG_WORD(®->host_cmd);
1430
1431 if (qla1280_read_nvram(ha)) {
1432 dprintk(2, "qla1280_initialize_adapter: failed to read "
1433 "NVRAM\n");
1434 }
1435
1436
1437
1438
1439
1440
1441 spin_lock_irqsave(ha->host->host_lock, flags);
1442
1443 status = qla1280_load_firmware(ha);
1444 if (status) {
1445 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1446 ha->host_no);
1447 goto out;
1448 }
1449
1450
1451 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1452 qla1280_nvram_config(ha);
1453
1454 if (ha->flags.disable_host_adapter) {
1455 status = 1;
1456 goto out;
1457 }
1458
1459 status = qla1280_init_rings(ha);
1460 if (status)
1461 goto out;
1462
1463
1464 for (bus = 0; bus < ha->ports; bus++) {
1465 if (!ha->bus_settings[bus].disable_scsi_reset &&
1466 qla1280_bus_reset(ha, bus) &&
1467 qla1280_bus_reset(ha, bus))
1468 ha->bus_settings[bus].scsi_bus_dead = 1;
1469 }
1470
1471 ha->flags.online = 1;
1472 out:
1473 spin_unlock_irqrestore(ha->host->host_lock, flags);
1474
1475 if (status)
1476 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1477
1478 LEAVE("qla1280_initialize_adapter");
1479 return status;
1480}
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494static const struct firmware *
1495qla1280_request_firmware(struct scsi_qla_host *ha)
1496{
1497 const struct firmware *fw;
1498 int err;
1499 int index;
1500 char *fwname;
1501
1502 spin_unlock_irq(ha->host->host_lock);
1503 mutex_lock(&qla1280_firmware_mutex);
1504
1505 index = ql1280_board_tbl[ha->devnum].fw_index;
1506 fw = qla1280_fw_tbl[index].fw;
1507 if (fw)
1508 goto out;
1509
1510 fwname = qla1280_fw_tbl[index].fwname;
1511 err = request_firmware(&fw, fwname, &ha->pdev->dev);
1512
1513 if (err) {
1514 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1515 fwname, err);
1516 fw = ERR_PTR(err);
1517 goto unlock;
1518 }
1519 if ((fw->size % 2) || (fw->size < 6)) {
1520 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
1521 fw->size, fwname);
1522 release_firmware(fw);
1523 fw = ERR_PTR(-EINVAL);
1524 goto unlock;
1525 }
1526
1527 qla1280_fw_tbl[index].fw = fw;
1528
1529 out:
1530 ha->fwver1 = fw->data[0];
1531 ha->fwver2 = fw->data[1];
1532 ha->fwver3 = fw->data[2];
1533 unlock:
1534 mutex_unlock(&qla1280_firmware_mutex);
1535 spin_lock_irq(ha->host->host_lock);
1536 return fw;
1537}
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549static int
1550qla1280_chip_diag(struct scsi_qla_host *ha)
1551{
1552 uint16_t mb[MAILBOX_REGISTER_COUNT];
1553 struct device_reg __iomem *reg = ha->iobase;
1554 int status = 0;
1555 int cnt;
1556 uint16_t data;
1557 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l);
1558
1559 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1560
1561
1562 WRT_REG_WORD(®->ictrl, ISP_RESET);
1563
1564
1565
1566
1567
1568
1569
1570
1571 udelay(20);
1572 data = qla1280_debounce_register(®->ictrl);
1573
1574
1575
1576 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1577 udelay(5);
1578 data = RD_REG_WORD(®->ictrl);
1579 }
1580
1581 if (!cnt)
1582 goto fail;
1583
1584
1585 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1586
1587 WRT_REG_WORD(®->cfg_1, 0);
1588
1589
1590
1591 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
1592 HC_RELEASE_RISC | HC_DISABLE_BIOS);
1593
1594 RD_REG_WORD(®->id_l);
1595 data = qla1280_debounce_register(®->mailbox0);
1596
1597
1598
1599
1600 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1601 udelay(5);
1602 data = RD_REG_WORD(®->mailbox0);
1603 }
1604
1605 if (!cnt)
1606 goto fail;
1607
1608
1609 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1610
1611 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
1612 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
1613 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
1614 RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
1615 RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
1616 printk(KERN_INFO "qla1280: Wrong product ID = "
1617 "0x%x,0x%x,0x%x,0x%x\n",
1618 RD_REG_WORD(®->mailbox1),
1619 RD_REG_WORD(®->mailbox2),
1620 RD_REG_WORD(®->mailbox3),
1621 RD_REG_WORD(®->mailbox4));
1622 goto fail;
1623 }
1624
1625
1626
1627
1628 qla1280_enable_intrs(ha);
1629
1630 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1631
1632 mb[0] = MBC_MAILBOX_REGISTER_TEST;
1633 mb[1] = 0xAAAA;
1634 mb[2] = 0x5555;
1635 mb[3] = 0xAA55;
1636 mb[4] = 0x55AA;
1637 mb[5] = 0xA5A5;
1638 mb[6] = 0x5A5A;
1639 mb[7] = 0x2525;
1640
1641 status = qla1280_mailbox_command(ha, 0xff, mb);
1642 if (status)
1643 goto fail;
1644
1645 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1646 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1647 mb[7] != 0x2525) {
1648 printk(KERN_INFO "qla1280: Failed mbox check\n");
1649 goto fail;
1650 }
1651
1652 dprintk(3, "qla1280_chip_diag: exiting normally\n");
1653 return 0;
1654 fail:
1655 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1656 return status;
1657}
1658
1659static int
1660qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1661{
1662
1663
1664 const struct firmware *fw;
1665 const __le16 *fw_data;
1666 uint16_t risc_address, risc_code_size;
1667 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1668 int err = 0;
1669
1670 fw = qla1280_request_firmware(ha);
1671 if (IS_ERR(fw))
1672 return PTR_ERR(fw);
1673
1674 fw_data = (const __le16 *)&fw->data[0];
1675 ha->fwstart = __le16_to_cpu(fw_data[2]);
1676
1677
1678 risc_address = ha->fwstart;
1679 fw_data = (const __le16 *)&fw->data[6];
1680 risc_code_size = (fw->size - 6) / 2;
1681
1682 for (i = 0; i < risc_code_size; i++) {
1683 mb[0] = MBC_WRITE_RAM_WORD;
1684 mb[1] = risc_address + i;
1685 mb[2] = __le16_to_cpu(fw_data[i]);
1686
1687 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1688 if (err) {
1689 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1690 ha->host_no);
1691 break;
1692 }
1693 }
1694
1695 return err;
1696}
1697
1698#ifdef QLA_64BIT_PTR
1699#define LOAD_CMD MBC_LOAD_RAM_A64_ROM
1700#define DUMP_CMD MBC_DUMP_RAM_A64_ROM
1701#define CMD_ARGS (BIT_7 | BIT_6 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
1702#else
1703#define LOAD_CMD MBC_LOAD_RAM
1704#define DUMP_CMD MBC_DUMP_RAM
1705#define CMD_ARGS (BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
1706#endif
1707
1708#define DUMP_IT_BACK 0
1709static int
1710qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1711{
1712
1713 const struct firmware *fw;
1714 const __le16 *fw_data;
1715 uint16_t risc_address, risc_code_size;
1716 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1717 int err = 0, num, i;
1718#if DUMP_IT_BACK
1719 uint8_t *sp, *tbuf;
1720 dma_addr_t p_tbuf;
1721
1722 tbuf = dma_alloc_coherent(&ha->pdev->dev, 8000, &p_tbuf, GFP_KERNEL);
1723 if (!tbuf)
1724 return -ENOMEM;
1725#endif
1726
1727 fw = qla1280_request_firmware(ha);
1728 if (IS_ERR(fw))
1729 return PTR_ERR(fw);
1730
1731 fw_data = (const __le16 *)&fw->data[0];
1732 ha->fwstart = __le16_to_cpu(fw_data[2]);
1733
1734
1735 risc_address = ha->fwstart;
1736 fw_data = (const __le16 *)&fw->data[6];
1737 risc_code_size = (fw->size - 6) / 2;
1738
1739 dprintk(1, "%s: DMA RISC code (%i) words\n",
1740 __func__, risc_code_size);
1741
1742 num = 0;
1743 while (risc_code_size > 0) {
1744 int warn __attribute__((unused)) = 0;
1745
1746 cnt = 2000 >> 1;
1747
1748 if (cnt > risc_code_size)
1749 cnt = risc_code_size;
1750
1751 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1752 "%d,%d(0x%x)\n",
1753 fw_data, cnt, num, risc_address);
1754 for(i = 0; i < cnt; i++)
1755 ((__le16 *)ha->request_ring)[i] = fw_data[i];
1756
1757 mb[0] = LOAD_CMD;
1758 mb[1] = risc_address;
1759 mb[4] = cnt;
1760 mb[3] = ha->request_dma & 0xffff;
1761 mb[2] = (ha->request_dma >> 16) & 0xffff;
1762 mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
1763 mb[6] = upper_32_bits(ha->request_dma) >> 16;
1764 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1765 __func__, mb[0],
1766 (void *)(long)ha->request_dma,
1767 mb[6], mb[7], mb[2], mb[3]);
1768 err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
1769 if (err) {
1770 printk(KERN_ERR "scsi(%li): Failed to load partial "
1771 "segment of f\n", ha->host_no);
1772 goto out;
1773 }
1774
1775#if DUMP_IT_BACK
1776 mb[0] = DUMP_CMD;
1777 mb[1] = risc_address;
1778 mb[4] = cnt;
1779 mb[3] = p_tbuf & 0xffff;
1780 mb[2] = (p_tbuf >> 16) & 0xffff;
1781 mb[7] = upper_32_bits(p_tbuf) & 0xffff;
1782 mb[6] = upper_32_bits(p_tbuf) >> 16;
1783
1784 err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
1785 if (err) {
1786 printk(KERN_ERR
1787 "Failed to dump partial segment of f/w\n");
1788 goto out;
1789 }
1790 sp = (uint8_t *)ha->request_ring;
1791 for (i = 0; i < (cnt << 1); i++) {
1792 if (tbuf[i] != sp[i] && warn++ < 10) {
1793 printk(KERN_ERR "%s: FW compare error @ "
1794 "byte(0x%x) loop#=%x\n",
1795 __func__, i, num);
1796 printk(KERN_ERR "%s: FWbyte=%x "
1797 "FWfromChip=%x\n",
1798 __func__, sp[i], tbuf[i]);
1799
1800 }
1801 }
1802#endif
1803 risc_address += cnt;
1804 risc_code_size = risc_code_size - cnt;
1805 fw_data = fw_data + cnt;
1806 num++;
1807 }
1808
1809 out:
1810#if DUMP_IT_BACK
1811 dma_free_coherent(&ha->pdev->dev, 8000, tbuf, p_tbuf);
1812#endif
1813 return err;
1814}
1815
1816static int
1817qla1280_start_firmware(struct scsi_qla_host *ha)
1818{
1819 uint16_t mb[MAILBOX_REGISTER_COUNT];
1820 int err;
1821
1822 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1823 __func__);
1824
1825
1826 mb[0] = MBC_VERIFY_CHECKSUM;
1827
1828 mb[1] = ha->fwstart;
1829 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1830 if (err) {
1831 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1832 return err;
1833 }
1834
1835
1836 dprintk(1, "%s: start firmware running.\n", __func__);
1837 mb[0] = MBC_EXECUTE_FIRMWARE;
1838 mb[1] = ha->fwstart;
1839 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1840 if (err) {
1841 printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1842 ha->host_no);
1843 }
1844
1845 return err;
1846}
1847
1848static int
1849qla1280_load_firmware(struct scsi_qla_host *ha)
1850{
1851
1852 int err;
1853
1854 err = qla1280_chip_diag(ha);
1855 if (err)
1856 goto out;
1857 if (IS_ISP1040(ha))
1858 err = qla1280_load_firmware_pio(ha);
1859 else
1860 err = qla1280_load_firmware_dma(ha);
1861 if (err)
1862 goto out;
1863 err = qla1280_start_firmware(ha);
1864 out:
1865 return err;
1866}
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881static int
1882qla1280_init_rings(struct scsi_qla_host *ha)
1883{
1884 uint16_t mb[MAILBOX_REGISTER_COUNT];
1885 int status = 0;
1886
1887 ENTER("qla1280_init_rings");
1888
1889
1890 memset(ha->outstanding_cmds, 0,
1891 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1892
1893
1894 ha->request_ring_ptr = ha->request_ring;
1895 ha->req_ring_index = 0;
1896 ha->req_q_cnt = REQUEST_ENTRY_CNT;
1897
1898 mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1899 mb[1] = REQUEST_ENTRY_CNT;
1900 mb[3] = ha->request_dma & 0xffff;
1901 mb[2] = (ha->request_dma >> 16) & 0xffff;
1902 mb[4] = 0;
1903 mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
1904 mb[6] = upper_32_bits(ha->request_dma) >> 16;
1905 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1906 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1907 &mb[0]))) {
1908
1909 ha->response_ring_ptr = ha->response_ring;
1910 ha->rsp_ring_index = 0;
1911
1912 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1913 mb[1] = RESPONSE_ENTRY_CNT;
1914 mb[3] = ha->response_dma & 0xffff;
1915 mb[2] = (ha->response_dma >> 16) & 0xffff;
1916 mb[5] = 0;
1917 mb[7] = upper_32_bits(ha->response_dma) & 0xffff;
1918 mb[6] = upper_32_bits(ha->response_dma) >> 16;
1919 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1920 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1921 &mb[0]);
1922 }
1923
1924 if (status)
1925 dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1926
1927 LEAVE("qla1280_init_rings");
1928 return status;
1929}
1930
1931static void
1932qla1280_print_settings(struct nvram *nv)
1933{
1934 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1935 nv->bus[0].config_1.initiator_id);
1936 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1937 nv->bus[1].config_1.initiator_id);
1938
1939 dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1940 nv->bus[0].bus_reset_delay);
1941 dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1942 nv->bus[1].bus_reset_delay);
1943
1944 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1945 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1946 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1947 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1948
1949 dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1950 nv->bus[0].config_2.async_data_setup_time);
1951 dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1952 nv->bus[1].config_2.async_data_setup_time);
1953
1954 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1955 nv->bus[0].config_2.req_ack_active_negation);
1956 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1957 nv->bus[1].config_2.req_ack_active_negation);
1958
1959 dprintk(1, "qla1280 : data line active negation[0]=%d\n",
1960 nv->bus[0].config_2.data_line_active_negation);
1961 dprintk(1, "qla1280 : data line active negation[1]=%d\n",
1962 nv->bus[1].config_2.data_line_active_negation);
1963
1964 dprintk(1, "qla1280 : disable loading risc code=%d\n",
1965 nv->cntr_flags_1.disable_loading_risc_code);
1966
1967 dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
1968 nv->cntr_flags_1.enable_64bit_addressing);
1969
1970 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
1971 nv->bus[0].selection_timeout);
1972 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
1973 nv->bus[1].selection_timeout);
1974
1975 dprintk(1, "qla1280 : max queue depth[0]=%d\n",
1976 nv->bus[0].max_queue_depth);
1977 dprintk(1, "qla1280 : max queue depth[1]=%d\n",
1978 nv->bus[1].max_queue_depth);
1979}
1980
1981static void
1982qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
1983{
1984 struct nvram *nv = &ha->nvram;
1985
1986 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
1987 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
1988 nv->bus[bus].target[target].parameter.tag_queuing = 1;
1989 nv->bus[bus].target[target].parameter.enable_sync = 1;
1990#if 1
1991 nv->bus[bus].target[target].parameter.enable_wide = 1;
1992#endif
1993 nv->bus[bus].target[target].execution_throttle =
1994 nv->bus[bus].max_queue_depth - 1;
1995 nv->bus[bus].target[target].parameter.parity_checking = 1;
1996 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
1997
1998 if (IS_ISP1x160(ha)) {
1999 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
2000 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
2001 nv->bus[bus].target[target].sync_period = 9;
2002 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
2003 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
2004 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
2005 } else {
2006 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
2007 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
2008 nv->bus[bus].target[target].sync_period = 10;
2009 }
2010}
2011
2012static void
2013qla1280_set_defaults(struct scsi_qla_host *ha)
2014{
2015 struct nvram *nv = &ha->nvram;
2016 int bus, target;
2017
2018 dprintk(1, "Using defaults for NVRAM: \n");
2019 memset(nv, 0, sizeof(struct nvram));
2020
2021
2022 nv->firmware_feature.f.enable_fast_posting = 1;
2023 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2024 nv->termination.scsi_bus_0_control = 3;
2025 nv->termination.scsi_bus_1_control = 3;
2026 nv->termination.auto_term_support = 1;
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036 nv->isp_config.burst_enable = 1;
2037 if (IS_ISP1040(ha))
2038 nv->isp_config.fifo_threshold |= 3;
2039 else
2040 nv->isp_config.fifo_threshold |= 4;
2041
2042 if (IS_ISP1x160(ha))
2043 nv->isp_parameter = 0x01;
2044
2045 for (bus = 0; bus < MAX_BUSES; bus++) {
2046 nv->bus[bus].config_1.initiator_id = 7;
2047 nv->bus[bus].config_2.req_ack_active_negation = 1;
2048 nv->bus[bus].config_2.data_line_active_negation = 1;
2049 nv->bus[bus].selection_timeout = 250;
2050 nv->bus[bus].max_queue_depth = 32;
2051
2052 if (IS_ISP1040(ha)) {
2053 nv->bus[bus].bus_reset_delay = 3;
2054 nv->bus[bus].config_2.async_data_setup_time = 6;
2055 nv->bus[bus].retry_delay = 1;
2056 } else {
2057 nv->bus[bus].bus_reset_delay = 5;
2058 nv->bus[bus].config_2.async_data_setup_time = 8;
2059 }
2060
2061 for (target = 0; target < MAX_TARGETS; target++)
2062 qla1280_set_target_defaults(ha, bus, target);
2063 }
2064}
2065
2066static int
2067qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2068{
2069 struct nvram *nv = &ha->nvram;
2070 uint16_t mb[MAILBOX_REGISTER_COUNT];
2071 int status, lun;
2072 uint16_t flag;
2073
2074
2075 mb[0] = MBC_SET_TARGET_PARAMETERS;
2076 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2077
2078
2079
2080
2081
2082
2083 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2084 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2085
2086 if (IS_ISP1x160(ha))
2087 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2088 else
2089 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2090 mb[3] |= nv->bus[bus].target[target].sync_period;
2091 status = qla1280_mailbox_command(ha, 0x0f, mb);
2092
2093
2094 flag = (BIT_0 << target);
2095 if (nv->bus[bus].target[target].parameter.tag_queuing)
2096 ha->bus_settings[bus].qtag_enables |= flag;
2097
2098
2099 if (IS_ISP1x160(ha)) {
2100 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2101 ha->bus_settings[bus].device_enables |= flag;
2102 ha->bus_settings[bus].lun_disables |= 0;
2103 } else {
2104 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2105 ha->bus_settings[bus].device_enables |= flag;
2106
2107 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2108 ha->bus_settings[bus].lun_disables |= flag;
2109 }
2110
2111
2112 for (lun = 0; lun < MAX_LUNS; lun++) {
2113 mb[0] = MBC_SET_DEVICE_QUEUE;
2114 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2115 mb[1] |= lun;
2116 mb[2] = nv->bus[bus].max_queue_depth;
2117 mb[3] = nv->bus[bus].target[target].execution_throttle;
2118 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2119 }
2120
2121 return status;
2122}
2123
2124static int
2125qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2126{
2127 struct nvram *nv = &ha->nvram;
2128 uint16_t mb[MAILBOX_REGISTER_COUNT];
2129 int target, status;
2130
2131
2132 ha->bus_settings[bus].disable_scsi_reset =
2133 nv->bus[bus].config_1.scsi_reset_disable;
2134
2135
2136 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2137 mb[0] = MBC_SET_INITIATOR_ID;
2138 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2139 ha->bus_settings[bus].id;
2140 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2141
2142
2143 ha->bus_settings[bus].bus_reset_delay =
2144 nv->bus[bus].bus_reset_delay;
2145
2146
2147 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2148
2149
2150 for (target = 0; target < MAX_TARGETS; target++)
2151 status |= qla1280_config_target(ha, bus, target);
2152
2153 return status;
2154}
2155
2156static int
2157qla1280_nvram_config(struct scsi_qla_host *ha)
2158{
2159 struct device_reg __iomem *reg = ha->iobase;
2160 struct nvram *nv = &ha->nvram;
2161 int bus, target, status = 0;
2162 uint16_t mb[MAILBOX_REGISTER_COUNT];
2163
2164 ENTER("qla1280_nvram_config");
2165
2166 if (ha->nvram_valid) {
2167
2168 for (bus = 0; bus < MAX_BUSES; bus++)
2169 for (target = 0; target < MAX_TARGETS; target++) {
2170 nv->bus[bus].target[target].parameter.
2171 auto_request_sense = 1;
2172 }
2173 } else {
2174 qla1280_set_defaults(ha);
2175 }
2176
2177 qla1280_print_settings(nv);
2178
2179
2180 ha->flags.disable_risc_code_load =
2181 nv->cntr_flags_1.disable_loading_risc_code;
2182
2183 if (IS_ISP1040(ha)) {
2184 uint16_t hwrev, cfg1, cdma_conf;
2185
2186 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
2187
2188 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2189 cdma_conf = RD_REG_WORD(®->cdma_cfg);
2190
2191
2192 if (hwrev != ISP_CFG0_1040A)
2193 cfg1 |= nv->isp_config.fifo_threshold << 4;
2194
2195 cfg1 |= nv->isp_config.burst_enable << 2;
2196 WRT_REG_WORD(®->cfg_1, cfg1);
2197
2198 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2199 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2200 } else {
2201 uint16_t cfg1, term;
2202
2203
2204 cfg1 = nv->isp_config.fifo_threshold << 4;
2205 cfg1 |= nv->isp_config.burst_enable << 2;
2206
2207 if (ha->ports > 1)
2208 cfg1 |= BIT_13;
2209 WRT_REG_WORD(®->cfg_1, cfg1);
2210
2211
2212 WRT_REG_WORD(®->gpio_enable,
2213 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2214 term = nv->termination.scsi_bus_1_control;
2215 term |= nv->termination.scsi_bus_0_control << 2;
2216 term |= nv->termination.auto_term_support << 7;
2217 RD_REG_WORD(®->id_l);
2218 WRT_REG_WORD(®->gpio_data, term);
2219 }
2220 RD_REG_WORD(®->id_l);
2221
2222
2223 mb[0] = MBC_SET_SYSTEM_PARAMETER;
2224 mb[1] = nv->isp_parameter;
2225 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2226
2227 if (IS_ISP1x40(ha)) {
2228
2229 mb[0] = MBC_SET_CLOCK_RATE;
2230 mb[1] = 40;
2231 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2232 }
2233
2234
2235 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2236 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2237 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2238 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2239 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2240
2241
2242 mb[0] = MBC_SET_RETRY_COUNT;
2243 mb[1] = nv->bus[0].retry_count;
2244 mb[2] = nv->bus[0].retry_delay;
2245 mb[6] = nv->bus[1].retry_count;
2246 mb[7] = nv->bus[1].retry_delay;
2247 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2248 BIT_1 | BIT_0, &mb[0]);
2249
2250
2251 mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2252 mb[1] = nv->bus[0].config_2.async_data_setup_time;
2253 mb[2] = nv->bus[1].config_2.async_data_setup_time;
2254 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2255
2256
2257 mb[0] = MBC_SET_ACTIVE_NEGATION;
2258 mb[1] = 0;
2259 if (nv->bus[0].config_2.req_ack_active_negation)
2260 mb[1] |= BIT_5;
2261 if (nv->bus[0].config_2.data_line_active_negation)
2262 mb[1] |= BIT_4;
2263 mb[2] = 0;
2264 if (nv->bus[1].config_2.req_ack_active_negation)
2265 mb[2] |= BIT_5;
2266 if (nv->bus[1].config_2.data_line_active_negation)
2267 mb[2] |= BIT_4;
2268 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2269
2270 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2271 mb[1] = 2;
2272 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2273
2274
2275 mb[0] = MBC_SET_PCI_CONTROL;
2276 mb[1] = BIT_1;
2277 mb[2] = BIT_1;
2278 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2279
2280 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2281 mb[1] = 8;
2282 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2283
2284
2285 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2286 mb[1] = nv->bus[0].selection_timeout;
2287 mb[2] = nv->bus[1].selection_timeout;
2288 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2289
2290 for (bus = 0; bus < ha->ports; bus++)
2291 status |= qla1280_config_bus(ha, bus);
2292
2293 if (status)
2294 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2295
2296 LEAVE("qla1280_nvram_config");
2297 return status;
2298}
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312static uint16_t
2313qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2314{
2315 uint32_t nv_cmd;
2316 uint16_t data;
2317
2318 nv_cmd = address << 16;
2319 nv_cmd |= NV_READ_OP;
2320
2321 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2322
2323 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2324 "0x%x", data);
2325
2326 return data;
2327}
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343static uint16_t
2344qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2345{
2346 struct device_reg __iomem *reg = ha->iobase;
2347 int cnt;
2348 uint16_t data = 0;
2349 uint16_t reg_data;
2350
2351
2352
2353 nv_cmd <<= 5;
2354 for (cnt = 0; cnt < 11; cnt++) {
2355 if (nv_cmd & BIT_31)
2356 qla1280_nv_write(ha, NV_DATA_OUT);
2357 else
2358 qla1280_nv_write(ha, 0);
2359 nv_cmd <<= 1;
2360 }
2361
2362
2363
2364 for (cnt = 0; cnt < 16; cnt++) {
2365 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK));
2366 RD_REG_WORD(®->id_l);
2367 NVRAM_DELAY();
2368 data <<= 1;
2369 reg_data = RD_REG_WORD(®->nvram);
2370 if (reg_data & NV_DATA_IN)
2371 data |= BIT_0;
2372 WRT_REG_WORD(®->nvram, NV_SELECT);
2373 RD_REG_WORD(®->id_l);
2374 NVRAM_DELAY();
2375 }
2376
2377
2378
2379 WRT_REG_WORD(®->nvram, NV_DESELECT);
2380 RD_REG_WORD(®->id_l);
2381 NVRAM_DELAY();
2382
2383 return data;
2384}
2385
2386static void
2387qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2388{
2389 struct device_reg __iomem *reg = ha->iobase;
2390
2391 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2392 RD_REG_WORD(®->id_l);
2393 NVRAM_DELAY();
2394 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK);
2395 RD_REG_WORD(®->id_l);
2396 NVRAM_DELAY();
2397 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2398 RD_REG_WORD(®->id_l);
2399 NVRAM_DELAY();
2400}
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417static int
2418qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2419{
2420 struct device_reg __iomem *reg = ha->iobase;
2421 int status = 0;
2422 int cnt;
2423 uint16_t *optr, *iptr;
2424 uint16_t __iomem *mptr;
2425 DECLARE_COMPLETION_ONSTACK(wait);
2426
2427 ENTER("qla1280_mailbox_command");
2428
2429 if (ha->mailbox_wait) {
2430 printk(KERN_ERR "Warning mailbox wait already in use!\n");
2431 }
2432 ha->mailbox_wait = &wait;
2433
2434
2435
2436
2437
2438
2439 mptr = (uint16_t __iomem *) ®->mailbox0;
2440 iptr = mb;
2441 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2442 if (mr & BIT_0) {
2443 WRT_REG_WORD(mptr, (*iptr));
2444 }
2445
2446 mr >>= 1;
2447 mptr++;
2448 iptr++;
2449 }
2450
2451
2452
2453
2454 timer_setup(&ha->mailbox_timer, qla1280_mailbox_timeout, 0);
2455 mod_timer(&ha->mailbox_timer, jiffies + 20 * HZ);
2456
2457 spin_unlock_irq(ha->host->host_lock);
2458 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
2459 qla1280_debounce_register(®->istatus);
2460
2461 wait_for_completion(&wait);
2462 del_timer_sync(&ha->mailbox_timer);
2463
2464 spin_lock_irq(ha->host->host_lock);
2465
2466 ha->mailbox_wait = NULL;
2467
2468
2469 if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2470 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2471 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2472 "0x%04x\n",
2473 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus));
2474 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2475 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1),
2476 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3));
2477 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2478 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5),
2479 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7));
2480 status = 1;
2481 }
2482
2483
2484 optr = mb;
2485 iptr = (uint16_t *) &ha->mailbox_out[0];
2486 mr = MAILBOX_REGISTER_COUNT;
2487 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2488
2489 if (ha->flags.reset_marker)
2490 qla1280_rst_aen(ha);
2491
2492 if (status)
2493 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2494 "0x%x ****\n", mb[0]);
2495
2496 LEAVE("qla1280_mailbox_command");
2497 return status;
2498}
2499
2500
2501
2502
2503
2504
2505
2506
2507static void
2508qla1280_poll(struct scsi_qla_host *ha)
2509{
2510 struct device_reg __iomem *reg = ha->iobase;
2511 uint16_t data;
2512 LIST_HEAD(done_q);
2513
2514
2515
2516
2517 data = RD_REG_WORD(®->istatus);
2518 if (data & RISC_INT)
2519 qla1280_isr(ha, &done_q);
2520
2521 if (!ha->mailbox_wait) {
2522 if (ha->flags.reset_marker)
2523 qla1280_rst_aen(ha);
2524 }
2525
2526 if (!list_empty(&done_q))
2527 qla1280_done(ha);
2528
2529
2530}
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543static int
2544qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2545{
2546 uint16_t mb[MAILBOX_REGISTER_COUNT];
2547 uint16_t reset_delay;
2548 int status;
2549
2550 dprintk(3, "qla1280_bus_reset: entered\n");
2551
2552 if (qla1280_verbose)
2553 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2554 ha->host_no, bus);
2555
2556 reset_delay = ha->bus_settings[bus].bus_reset_delay;
2557 mb[0] = MBC_BUS_RESET;
2558 mb[1] = reset_delay;
2559 mb[2] = (uint16_t) bus;
2560 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2561
2562 if (status) {
2563 if (ha->bus_settings[bus].failed_reset_count > 2)
2564 ha->bus_settings[bus].scsi_bus_dead = 1;
2565 ha->bus_settings[bus].failed_reset_count++;
2566 } else {
2567 spin_unlock_irq(ha->host->host_lock);
2568 ssleep(reset_delay);
2569 spin_lock_irq(ha->host->host_lock);
2570
2571 ha->bus_settings[bus].scsi_bus_dead = 0;
2572 ha->bus_settings[bus].failed_reset_count = 0;
2573 ha->bus_settings[bus].reset_marker = 0;
2574
2575 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2576 }
2577
2578
2579
2580
2581
2582
2583 if (status)
2584 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2585 else
2586 dprintk(3, "qla1280_bus_reset: exiting normally\n");
2587
2588 return status;
2589}
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603static int
2604qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2605{
2606 uint16_t mb[MAILBOX_REGISTER_COUNT];
2607 int status;
2608
2609 ENTER("qla1280_device_reset");
2610
2611 mb[0] = MBC_ABORT_TARGET;
2612 mb[1] = (bus ? (target | BIT_7) : target) << 8;
2613 mb[2] = 1;
2614 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2615
2616
2617 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2618
2619 if (status)
2620 dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2621
2622 LEAVE("qla1280_device_reset");
2623 return status;
2624}
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637static int
2638qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2639{
2640 uint16_t mb[MAILBOX_REGISTER_COUNT];
2641 unsigned int bus, target, lun;
2642 int status;
2643
2644 ENTER("qla1280_abort_command");
2645
2646 bus = SCSI_BUS_32(sp->cmd);
2647 target = SCSI_TCN_32(sp->cmd);
2648 lun = SCSI_LUN_32(sp->cmd);
2649
2650 sp->flags |= SRB_ABORT_PENDING;
2651
2652 mb[0] = MBC_ABORT_COMMAND;
2653 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2654 mb[2] = handle >> 16;
2655 mb[3] = handle & 0xffff;
2656 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2657
2658 if (status) {
2659 dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2660 sp->flags &= ~SRB_ABORT_PENDING;
2661 }
2662
2663
2664 LEAVE("qla1280_abort_command");
2665 return status;
2666}
2667
2668
2669
2670
2671
2672
2673
2674
2675static void
2676qla1280_reset_adapter(struct scsi_qla_host *ha)
2677{
2678 struct device_reg __iomem *reg = ha->iobase;
2679
2680 ENTER("qla1280_reset_adapter");
2681
2682
2683 ha->flags.online = 0;
2684 WRT_REG_WORD(®->ictrl, ISP_RESET);
2685 WRT_REG_WORD(®->host_cmd,
2686 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2687 RD_REG_WORD(®->id_l);
2688
2689 LEAVE("qla1280_reset_adapter");
2690}
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703static void
2704qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2705{
2706 struct mrk_entry *pkt;
2707
2708 ENTER("qla1280_marker");
2709
2710
2711 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2712 pkt->entry_type = MARKER_TYPE;
2713 pkt->lun = (uint8_t) lun;
2714 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2715 pkt->modifier = type;
2716 pkt->entry_status = 0;
2717
2718
2719 qla1280_isp_cmd(ha);
2720 }
2721
2722 LEAVE("qla1280_marker");
2723}
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738#ifdef QLA_64BIT_PTR
2739static int
2740qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2741{
2742 struct device_reg __iomem *reg = ha->iobase;
2743 struct scsi_cmnd *cmd = sp->cmd;
2744 cmd_a64_entry_t *pkt;
2745 __le32 *dword_ptr;
2746 dma_addr_t dma_handle;
2747 int status = 0;
2748 int cnt;
2749 int req_cnt;
2750 int seg_cnt;
2751 u8 dir;
2752
2753 ENTER("qla1280_64bit_start_scsi:");
2754
2755
2756 req_cnt = 1;
2757 seg_cnt = scsi_dma_map(cmd);
2758 if (seg_cnt > 0) {
2759 if (seg_cnt > 2) {
2760 req_cnt += (seg_cnt - 2) / 5;
2761 if ((seg_cnt - 2) % 5)
2762 req_cnt++;
2763 }
2764 } else if (seg_cnt < 0) {
2765 status = 1;
2766 goto out;
2767 }
2768
2769 if ((req_cnt + 2) >= ha->req_q_cnt) {
2770
2771 cnt = RD_REG_WORD(®->mailbox4);
2772 if (ha->req_ring_index < cnt)
2773 ha->req_q_cnt = cnt - ha->req_ring_index;
2774 else
2775 ha->req_q_cnt =
2776 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2777 }
2778
2779 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2780 ha->req_q_cnt, seg_cnt);
2781
2782
2783 if ((req_cnt + 2) >= ha->req_q_cnt) {
2784 status = SCSI_MLQUEUE_HOST_BUSY;
2785 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2786 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2787 req_cnt);
2788 goto out;
2789 }
2790
2791
2792 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2793 ha->outstanding_cmds[cnt] != NULL; cnt++);
2794
2795 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2796 status = SCSI_MLQUEUE_HOST_BUSY;
2797 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2798 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2799 goto out;
2800 }
2801
2802 ha->outstanding_cmds[cnt] = sp;
2803 ha->req_q_cnt -= req_cnt;
2804 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2805
2806 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2807 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2808 dprintk(2, " bus %i, target %i, lun %i\n",
2809 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2810 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2811
2812
2813
2814
2815 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2816
2817 pkt->entry_type = COMMAND_A64_TYPE;
2818 pkt->entry_count = (uint8_t) req_cnt;
2819 pkt->sys_define = (uint8_t) ha->req_ring_index;
2820 pkt->entry_status = 0;
2821 pkt->handle = cpu_to_le32(cnt);
2822
2823
2824 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2825
2826
2827 pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
2828
2829
2830 pkt->lun = SCSI_LUN_32(cmd);
2831 pkt->target = SCSI_BUS_32(cmd) ?
2832 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2833
2834
2835 if (cmd->device->simple_tags)
2836 pkt->control_flags |= cpu_to_le16(BIT_3);
2837
2838
2839 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2840 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2841
2842
2843
2844 dir = qla1280_data_direction(cmd);
2845 pkt->control_flags |= cpu_to_le16(dir);
2846
2847
2848 pkt->dseg_count = cpu_to_le16(seg_cnt);
2849
2850
2851
2852
2853 if (seg_cnt) {
2854 struct scatterlist *sg, *s;
2855 int remseg = seg_cnt;
2856
2857 sg = scsi_sglist(cmd);
2858
2859
2860 dword_ptr = (u32 *)&pkt->dseg_0_address;
2861
2862
2863 for_each_sg(sg, s, seg_cnt, cnt) {
2864 if (cnt == 2)
2865 break;
2866
2867 dma_handle = sg_dma_address(s);
2868 *dword_ptr++ =
2869 cpu_to_le32(lower_32_bits(dma_handle));
2870 *dword_ptr++ =
2871 cpu_to_le32(upper_32_bits(dma_handle));
2872 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2873 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2874 cpu_to_le32(upper_32_bits(dma_handle)),
2875 cpu_to_le32(lower_32_bits(dma_handle)),
2876 cpu_to_le32(sg_dma_len(sg_next(s))));
2877 remseg--;
2878 }
2879 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2880 "command packet data - b %i, t %i, l %i \n",
2881 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2882 SCSI_LUN_32(cmd));
2883 qla1280_dump_buffer(5, (char *)pkt,
2884 REQUEST_ENTRY_SIZE);
2885
2886
2887
2888
2889 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2890 "remains\n", seg_cnt);
2891
2892 while (remseg > 0) {
2893
2894 sg = s;
2895
2896 ha->req_ring_index++;
2897 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2898 ha->req_ring_index = 0;
2899 ha->request_ring_ptr =
2900 ha->request_ring;
2901 } else
2902 ha->request_ring_ptr++;
2903
2904 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2905
2906
2907 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2908
2909
2910 ((struct cont_a64_entry *) pkt)->entry_type =
2911 CONTINUE_A64_TYPE;
2912 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2913 ((struct cont_a64_entry *) pkt)->sys_define =
2914 (uint8_t)ha->req_ring_index;
2915
2916 dword_ptr =
2917 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2918
2919
2920 for_each_sg(sg, s, remseg, cnt) {
2921 if (cnt == 5)
2922 break;
2923 dma_handle = sg_dma_address(s);
2924 *dword_ptr++ =
2925 cpu_to_le32(lower_32_bits(dma_handle));
2926 *dword_ptr++ =
2927 cpu_to_le32(upper_32_bits(dma_handle));
2928 *dword_ptr++ =
2929 cpu_to_le32(sg_dma_len(s));
2930 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2931 cpu_to_le32(upper_32_bits(dma_handle)),
2932 cpu_to_le32(lower_32_bits(dma_handle)),
2933 cpu_to_le32(sg_dma_len(s)));
2934 }
2935 remseg -= cnt;
2936 dprintk(5, "qla1280_64bit_start_scsi: "
2937 "continuation packet data - b %i, t "
2938 "%i, l %i \n", SCSI_BUS_32(cmd),
2939 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2940 qla1280_dump_buffer(5, (char *)pkt,
2941 REQUEST_ENTRY_SIZE);
2942 }
2943 } else {
2944 dprintk(5, "qla1280_64bit_start_scsi: No data, command "
2945 "packet data - b %i, t %i, l %i \n",
2946 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2947 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
2948 }
2949
2950 ha->req_ring_index++;
2951 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2952 ha->req_ring_index = 0;
2953 ha->request_ring_ptr = ha->request_ring;
2954 } else
2955 ha->request_ring_ptr++;
2956
2957
2958 dprintk(2,
2959 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
2960 sp->flags |= SRB_SENT;
2961 ha->actthreads++;
2962 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
2963
2964 out:
2965 if (status)
2966 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
2967 else
2968 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
2969
2970 return status;
2971}
2972#else
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993static int
2994qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2995{
2996 struct device_reg __iomem *reg = ha->iobase;
2997 struct scsi_cmnd *cmd = sp->cmd;
2998 struct cmd_entry *pkt;
2999 __le32 *dword_ptr;
3000 int status = 0;
3001 int cnt;
3002 int req_cnt;
3003 int seg_cnt;
3004 u8 dir;
3005
3006 ENTER("qla1280_32bit_start_scsi");
3007
3008 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3009 cmd->cmnd[0]);
3010
3011
3012 req_cnt = 1;
3013 seg_cnt = scsi_dma_map(cmd);
3014 if (seg_cnt) {
3015
3016
3017
3018
3019 if (seg_cnt > 4) {
3020 req_cnt += (seg_cnt - 4) / 7;
3021 if ((seg_cnt - 4) % 7)
3022 req_cnt++;
3023 }
3024 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3025 cmd, seg_cnt, req_cnt);
3026 } else if (seg_cnt < 0) {
3027 status = 1;
3028 goto out;
3029 }
3030
3031 if ((req_cnt + 2) >= ha->req_q_cnt) {
3032
3033 cnt = RD_REG_WORD(®->mailbox4);
3034 if (ha->req_ring_index < cnt)
3035 ha->req_q_cnt = cnt - ha->req_ring_index;
3036 else
3037 ha->req_q_cnt =
3038 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3039 }
3040
3041 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3042 ha->req_q_cnt, seg_cnt);
3043
3044 if ((req_cnt + 2) >= ha->req_q_cnt) {
3045 status = SCSI_MLQUEUE_HOST_BUSY;
3046 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3047 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3048 ha->req_q_cnt, req_cnt);
3049 goto out;
3050 }
3051
3052
3053 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3054 ha->outstanding_cmds[cnt]; cnt++);
3055
3056 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3057 status = SCSI_MLQUEUE_HOST_BUSY;
3058 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3059 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3060 goto out;
3061 }
3062
3063 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3064 ha->outstanding_cmds[cnt] = sp;
3065 ha->req_q_cnt -= req_cnt;
3066
3067
3068
3069
3070 pkt = (struct cmd_entry *) ha->request_ring_ptr;
3071
3072 pkt->entry_type = COMMAND_TYPE;
3073 pkt->entry_count = (uint8_t) req_cnt;
3074 pkt->sys_define = (uint8_t) ha->req_ring_index;
3075 pkt->entry_status = 0;
3076 pkt->handle = cpu_to_le32(cnt);
3077
3078
3079 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3080
3081
3082 pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
3083
3084
3085 pkt->lun = SCSI_LUN_32(cmd);
3086 pkt->target = SCSI_BUS_32(cmd) ?
3087 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3088
3089
3090 if (cmd->device->simple_tags)
3091 pkt->control_flags |= cpu_to_le16(BIT_3);
3092
3093
3094 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3095 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3096
3097
3098
3099 dir = qla1280_data_direction(cmd);
3100 pkt->control_flags |= cpu_to_le16(dir);
3101
3102
3103 pkt->dseg_count = cpu_to_le16(seg_cnt);
3104
3105
3106
3107
3108 if (seg_cnt) {
3109 struct scatterlist *sg, *s;
3110 int remseg = seg_cnt;
3111
3112 sg = scsi_sglist(cmd);
3113
3114
3115 dword_ptr = &pkt->dseg_0_address;
3116
3117 dprintk(3, "Building S/G data segments..\n");
3118 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3119
3120
3121 for_each_sg(sg, s, seg_cnt, cnt) {
3122 if (cnt == 4)
3123 break;
3124 *dword_ptr++ =
3125 cpu_to_le32(lower_32_bits(sg_dma_address(s)));
3126 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3127 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3128 (lower_32_bits(sg_dma_address(s))),
3129 (sg_dma_len(s)));
3130 remseg--;
3131 }
3132
3133
3134
3135 dprintk(3, "S/G Building Continuation"
3136 "...seg_cnt=0x%x remains\n", seg_cnt);
3137 while (remseg > 0) {
3138
3139 sg = s;
3140
3141 ha->req_ring_index++;
3142 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3143 ha->req_ring_index = 0;
3144 ha->request_ring_ptr =
3145 ha->request_ring;
3146 } else
3147 ha->request_ring_ptr++;
3148
3149 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3150
3151
3152 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3153
3154
3155 ((struct cont_entry *) pkt)->
3156 entry_type = CONTINUE_TYPE;
3157 ((struct cont_entry *) pkt)->entry_count = 1;
3158
3159 ((struct cont_entry *) pkt)->sys_define =
3160 (uint8_t) ha->req_ring_index;
3161
3162
3163 dword_ptr =
3164 &((struct cont_entry *) pkt)->dseg_0_address;
3165
3166
3167 for_each_sg(sg, s, remseg, cnt) {
3168 if (cnt == 7)
3169 break;
3170 *dword_ptr++ =
3171 cpu_to_le32(lower_32_bits(sg_dma_address(s)));
3172 *dword_ptr++ =
3173 cpu_to_le32(sg_dma_len(s));
3174 dprintk(1,
3175 "S/G Segment Cont. phys_addr=0x%x, "
3176 "len=0x%x\n",
3177 cpu_to_le32(lower_32_bits(sg_dma_address(s))),
3178 cpu_to_le32(sg_dma_len(s)));
3179 }
3180 remseg -= cnt;
3181 dprintk(5, "qla1280_32bit_start_scsi: "
3182 "continuation packet data - "
3183 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3184 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3185 qla1280_dump_buffer(5, (char *)pkt,
3186 REQUEST_ENTRY_SIZE);
3187 }
3188 } else {
3189 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3190 "packet data - \n");
3191 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3192 }
3193 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3194 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3195 REQUEST_ENTRY_SIZE);
3196
3197
3198 ha->req_ring_index++;
3199 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3200 ha->req_ring_index = 0;
3201 ha->request_ring_ptr = ha->request_ring;
3202 } else
3203 ha->request_ring_ptr++;
3204
3205
3206 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3207 "for pending command\n");
3208 sp->flags |= SRB_SENT;
3209 ha->actthreads++;
3210 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3211
3212out:
3213 if (status)
3214 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3215
3216 LEAVE("qla1280_32bit_start_scsi");
3217
3218 return status;
3219}
3220#endif
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233static request_t *
3234qla1280_req_pkt(struct scsi_qla_host *ha)
3235{
3236 struct device_reg __iomem *reg = ha->iobase;
3237 request_t *pkt = NULL;
3238 int cnt;
3239 uint32_t timer;
3240
3241 ENTER("qla1280_req_pkt");
3242
3243
3244
3245
3246
3247 for (timer = 15000000; timer; timer--) {
3248 if (ha->req_q_cnt > 0) {
3249
3250 cnt = RD_REG_WORD(®->mailbox4);
3251 if (ha->req_ring_index < cnt)
3252 ha->req_q_cnt = cnt - ha->req_ring_index;
3253 else
3254 ha->req_q_cnt =
3255 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3256 }
3257
3258
3259 if (ha->req_q_cnt > 0) {
3260 ha->req_q_cnt--;
3261 pkt = ha->request_ring_ptr;
3262
3263
3264 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3265
3266
3267
3268
3269
3270
3271 pkt->sys_define = (uint8_t) ha->req_ring_index;
3272
3273
3274 pkt->entry_count = 1;
3275
3276 break;
3277 }
3278
3279 udelay(2);
3280
3281
3282 qla1280_poll(ha);
3283 }
3284
3285 if (!pkt)
3286 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3287 else
3288 dprintk(3, "qla1280_req_pkt: exiting normally\n");
3289
3290 return pkt;
3291}
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301static void
3302qla1280_isp_cmd(struct scsi_qla_host *ha)
3303{
3304 struct device_reg __iomem *reg = ha->iobase;
3305
3306 ENTER("qla1280_isp_cmd");
3307
3308 dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3309 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3310 REQUEST_ENTRY_SIZE);
3311
3312
3313 ha->req_ring_index++;
3314 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3315 ha->req_ring_index = 0;
3316 ha->request_ring_ptr = ha->request_ring;
3317 } else
3318 ha->request_ring_ptr++;
3319
3320
3321
3322
3323 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3324
3325 LEAVE("qla1280_isp_cmd");
3326}
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340static void
3341qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3342{
3343 struct device_reg __iomem *reg = ha->iobase;
3344 struct response *pkt;
3345 struct srb *sp = NULL;
3346 uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3347 uint16_t *wptr;
3348 uint32_t index;
3349 u16 istatus;
3350
3351 ENTER("qla1280_isr");
3352
3353 istatus = RD_REG_WORD(®->istatus);
3354 if (!(istatus & (RISC_INT | PCI_INT)))
3355 return;
3356
3357
3358 mailbox[5] = RD_REG_WORD(®->mailbox5);
3359
3360
3361
3362 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore);
3363
3364 if (mailbox[0] & BIT_0) {
3365
3366
3367
3368 wptr = &mailbox[0];
3369 *wptr++ = RD_REG_WORD(®->mailbox0);
3370 *wptr++ = RD_REG_WORD(®->mailbox1);
3371 *wptr = RD_REG_WORD(®->mailbox2);
3372 if (mailbox[0] != MBA_SCSI_COMPLETION) {
3373 wptr++;
3374 *wptr++ = RD_REG_WORD(®->mailbox3);
3375 *wptr++ = RD_REG_WORD(®->mailbox4);
3376 wptr++;
3377 *wptr++ = RD_REG_WORD(®->mailbox6);
3378 *wptr = RD_REG_WORD(®->mailbox7);
3379 }
3380
3381
3382
3383 WRT_REG_WORD(®->semaphore, 0);
3384 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3385
3386 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3387 mailbox[0]);
3388
3389
3390 switch (mailbox[0]) {
3391 case MBA_SCSI_COMPLETION:
3392 dprintk(5, "qla1280_isr: mailbox SCSI response "
3393 "completion\n");
3394
3395 if (ha->flags.online) {
3396
3397 index = mailbox[2] << 16 | mailbox[1];
3398
3399
3400 if (index < MAX_OUTSTANDING_COMMANDS)
3401 sp = ha->outstanding_cmds[index];
3402 else
3403 sp = NULL;
3404
3405 if (sp) {
3406
3407 ha->outstanding_cmds[index] = NULL;
3408
3409
3410 CMD_RESULT(sp->cmd) = 0;
3411 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3412
3413
3414 list_add_tail(&sp->list, done_q);
3415 } else {
3416
3417
3418
3419 printk(KERN_WARNING
3420 "qla1280: ISP invalid handle\n");
3421 }
3422 }
3423 break;
3424
3425 case MBA_BUS_RESET:
3426 ha->flags.reset_marker = 1;
3427 index = mailbox[6] & BIT_0;
3428 ha->bus_settings[index].reset_marker = 1;
3429
3430 printk(KERN_DEBUG "qla1280_isr(): index %i "
3431 "asynchronous BUS_RESET\n", index);
3432 break;
3433
3434 case MBA_SYSTEM_ERR:
3435 printk(KERN_WARNING
3436 "qla1280: ISP System Error - mbx1=%xh, mbx2="
3437 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3438 mailbox[3]);
3439 break;
3440
3441 case MBA_REQ_TRANSFER_ERR:
3442 printk(KERN_WARNING
3443 "qla1280: ISP Request Transfer Error\n");
3444 break;
3445
3446 case MBA_RSP_TRANSFER_ERR:
3447 printk(KERN_WARNING
3448 "qla1280: ISP Response Transfer Error\n");
3449 break;
3450
3451 case MBA_WAKEUP_THRES:
3452 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3453 break;
3454
3455 case MBA_TIMEOUT_RESET:
3456 dprintk(2,
3457 "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3458 break;
3459
3460 case MBA_DEVICE_RESET:
3461 printk(KERN_INFO "qla1280_isr(): asynchronous "
3462 "BUS_DEVICE_RESET\n");
3463
3464 ha->flags.reset_marker = 1;
3465 index = mailbox[6] & BIT_0;
3466 ha->bus_settings[index].reset_marker = 1;
3467 break;
3468
3469 case MBA_BUS_MODE_CHANGE:
3470 dprintk(2,
3471 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3472 break;
3473
3474 default:
3475
3476 if (mailbox[0] < MBA_ASYNC_EVENT) {
3477 wptr = &mailbox[0];
3478 memcpy((uint16_t *) ha->mailbox_out, wptr,
3479 MAILBOX_REGISTER_COUNT *
3480 sizeof(uint16_t));
3481
3482 if(ha->mailbox_wait != NULL)
3483 complete(ha->mailbox_wait);
3484 }
3485 break;
3486 }
3487 } else {
3488 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3489 }
3490
3491
3492
3493
3494
3495 if (!(ha->flags.online && !ha->mailbox_wait)) {
3496 dprintk(2, "qla1280_isr: Response pointer Error\n");
3497 goto out;
3498 }
3499
3500 if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3501 goto out;
3502
3503 while (ha->rsp_ring_index != mailbox[5]) {
3504 pkt = ha->response_ring_ptr;
3505
3506 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3507 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3508 dprintk(5,"qla1280_isr: response packet data\n");
3509 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3510
3511 if (pkt->entry_type == STATUS_TYPE) {
3512 if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3513 || pkt->comp_status || pkt->entry_status) {
3514 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3515 "0x%x mailbox[5] = 0x%x, comp_status "
3516 "= 0x%x, scsi_status = 0x%x\n",
3517 ha->rsp_ring_index, mailbox[5],
3518 le16_to_cpu(pkt->comp_status),
3519 le16_to_cpu(pkt->scsi_status));
3520 }
3521 } else {
3522 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3523 "0x%x, mailbox[5] = 0x%x\n",
3524 ha->rsp_ring_index, mailbox[5]);
3525 dprintk(2, "qla1280_isr: response packet data\n");
3526 qla1280_dump_buffer(2, (char *)pkt,
3527 RESPONSE_ENTRY_SIZE);
3528 }
3529
3530 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3531 dprintk(2, "status: Cmd %p, handle %i\n",
3532 ha->outstanding_cmds[pkt->handle]->cmd,
3533 pkt->handle);
3534 if (pkt->entry_type == STATUS_TYPE)
3535 qla1280_status_entry(ha, pkt, done_q);
3536 else
3537 qla1280_error_entry(ha, pkt, done_q);
3538
3539 ha->rsp_ring_index++;
3540 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3541 ha->rsp_ring_index = 0;
3542 ha->response_ring_ptr = ha->response_ring;
3543 } else
3544 ha->response_ring_ptr++;
3545 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index);
3546 }
3547 }
3548
3549 out:
3550 LEAVE("qla1280_isr");
3551}
3552
3553
3554
3555
3556
3557
3558
3559
3560static void
3561qla1280_rst_aen(struct scsi_qla_host *ha)
3562{
3563 uint8_t bus;
3564
3565 ENTER("qla1280_rst_aen");
3566
3567 if (ha->flags.online && !ha->flags.reset_active &&
3568 !ha->flags.abort_isp_active) {
3569 ha->flags.reset_active = 1;
3570 while (ha->flags.reset_marker) {
3571
3572 ha->flags.reset_marker = 0;
3573 for (bus = 0; bus < ha->ports &&
3574 !ha->flags.reset_marker; bus++) {
3575 if (ha->bus_settings[bus].reset_marker) {
3576 ha->bus_settings[bus].reset_marker = 0;
3577 qla1280_marker(ha, bus, 0, 0,
3578 MK_SYNC_ALL);
3579 }
3580 }
3581 }
3582 }
3583
3584 LEAVE("qla1280_rst_aen");
3585}
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597static void
3598qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3599 struct list_head *done_q)
3600{
3601 int sense_sz;
3602 struct srb *sp;
3603 struct scsi_cmnd *cmd;
3604 uint32_t handle = le32_to_cpu(pkt->handle);
3605 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3606 uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3607
3608 ENTER("qla1280_status_entry");
3609
3610
3611 if (handle < MAX_OUTSTANDING_COMMANDS)
3612 sp = ha->outstanding_cmds[handle];
3613 else
3614 sp = NULL;
3615
3616 if (!sp) {
3617 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3618 goto out;
3619 }
3620
3621
3622 ha->outstanding_cmds[handle] = NULL;
3623
3624 cmd = sp->cmd;
3625
3626 if (comp_status || scsi_status) {
3627 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3628 "0x%x, handle = 0x%x\n", comp_status,
3629 scsi_status, handle);
3630 }
3631
3632
3633 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3634 (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3635 CMD_RESULT(cmd) = scsi_status & 0xff;
3636 } else {
3637
3638
3639 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3640
3641 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3642 if (comp_status != CS_ARS_FAILED) {
3643 uint16_t req_sense_length =
3644 le16_to_cpu(pkt->req_sense_length);
3645 if (req_sense_length < CMD_SNSLEN(cmd))
3646 sense_sz = req_sense_length;
3647 else
3648
3649
3650
3651
3652
3653 sense_sz = CMD_SNSLEN(cmd) - 1;
3654
3655 memcpy(cmd->sense_buffer,
3656 &pkt->req_sense_data, sense_sz);
3657 } else
3658 sense_sz = 0;
3659 memset(cmd->sense_buffer + sense_sz, 0,
3660 SCSI_SENSE_BUFFERSIZE - sense_sz);
3661
3662 dprintk(2, "qla1280_status_entry: Check "
3663 "condition Sense data, b %i, t %i, "
3664 "l %i\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
3665 SCSI_LUN_32(cmd));
3666 if (sense_sz)
3667 qla1280_dump_buffer(2,
3668 (char *)cmd->sense_buffer,
3669 sense_sz);
3670 }
3671 }
3672
3673 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3674
3675
3676 list_add_tail(&sp->list, done_q);
3677 out:
3678 LEAVE("qla1280_status_entry");
3679}
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690static void
3691qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3692 struct list_head *done_q)
3693{
3694 struct srb *sp;
3695 uint32_t handle = le32_to_cpu(pkt->handle);
3696
3697 ENTER("qla1280_error_entry");
3698
3699 if (pkt->entry_status & BIT_3)
3700 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3701 else if (pkt->entry_status & BIT_2)
3702 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3703 else if (pkt->entry_status & BIT_1)
3704 dprintk(2, "qla1280_error_entry: FULL flag error\n");
3705 else
3706 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3707
3708
3709 if (handle < MAX_OUTSTANDING_COMMANDS)
3710 sp = ha->outstanding_cmds[handle];
3711 else
3712 sp = NULL;
3713
3714 if (sp) {
3715
3716 ha->outstanding_cmds[handle] = NULL;
3717
3718
3719 if (pkt->entry_status & (BIT_3 + BIT_2)) {
3720
3721
3722 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3723 } else if (pkt->entry_status & BIT_1) {
3724 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3725 } else {
3726
3727 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3728 }
3729
3730 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3731
3732
3733 list_add_tail(&sp->list, done_q);
3734 }
3735#ifdef QLA_64BIT_PTR
3736 else if (pkt->entry_type == COMMAND_A64_TYPE) {
3737 printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3738 }
3739#endif
3740
3741 LEAVE("qla1280_error_entry");
3742}
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754static int
3755qla1280_abort_isp(struct scsi_qla_host *ha)
3756{
3757 struct device_reg __iomem *reg = ha->iobase;
3758 struct srb *sp;
3759 int status = 0;
3760 int cnt;
3761 int bus;
3762
3763 ENTER("qla1280_abort_isp");
3764
3765 if (ha->flags.abort_isp_active || !ha->flags.online)
3766 goto out;
3767
3768 ha->flags.abort_isp_active = 1;
3769
3770
3771 qla1280_disable_intrs(ha);
3772 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3773 RD_REG_WORD(®->id_l);
3774
3775 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3776 ha->host_no);
3777
3778 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3779 struct scsi_cmnd *cmd;
3780 sp = ha->outstanding_cmds[cnt];
3781 if (sp) {
3782 cmd = sp->cmd;
3783 CMD_RESULT(cmd) = DID_RESET << 16;
3784 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3785 ha->outstanding_cmds[cnt] = NULL;
3786 list_add_tail(&sp->list, &ha->done_q);
3787 }
3788 }
3789
3790 qla1280_done(ha);
3791
3792 status = qla1280_load_firmware(ha);
3793 if (status)
3794 goto out;
3795
3796
3797 qla1280_nvram_config (ha);
3798
3799 status = qla1280_init_rings(ha);
3800 if (status)
3801 goto out;
3802
3803
3804 for (bus = 0; bus < ha->ports; bus++)
3805 qla1280_bus_reset(ha, bus);
3806
3807 ha->flags.abort_isp_active = 0;
3808 out:
3809 if (status) {
3810 printk(KERN_WARNING
3811 "qla1280: ISP error recovery failed, board disabled");
3812 qla1280_reset_adapter(ha);
3813 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3814 }
3815
3816 LEAVE("qla1280_abort_isp");
3817 return status;
3818}
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831static u16
3832qla1280_debounce_register(volatile u16 __iomem * addr)
3833{
3834 volatile u16 ret;
3835 volatile u16 ret2;
3836
3837 ret = RD_REG_WORD(addr);
3838 ret2 = RD_REG_WORD(addr);
3839
3840 if (ret == ret2)
3841 return ret;
3842
3843 do {
3844 cpu_relax();
3845 ret = RD_REG_WORD(addr);
3846 ret2 = RD_REG_WORD(addr);
3847 } while (ret != ret2);
3848
3849 return ret;
3850}
3851
3852
3853
3854
3855
3856
3857
3858#define SET_SXP_BANK 0x0100
3859#define SCSI_PHASE_INVALID 0x87FF
3860static int
3861qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3862{
3863 uint16_t config_reg, scsi_control;
3864 struct device_reg __iomem *reg = ha->iobase;
3865
3866 if (ha->bus_settings[bus].scsi_bus_dead) {
3867 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3868 config_reg = RD_REG_WORD(®->cfg_1);
3869 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK);
3870 scsi_control = RD_REG_WORD(®->scsiControlPins);
3871 WRT_REG_WORD(®->cfg_1, config_reg);
3872 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC);
3873
3874 if (scsi_control == SCSI_PHASE_INVALID) {
3875 ha->bus_settings[bus].scsi_bus_dead = 1;
3876 return 1;
3877 } else {
3878 ha->bus_settings[bus].scsi_bus_dead = 0;
3879 ha->bus_settings[bus].failed_reset_count = 0;
3880 }
3881 }
3882 return 0;
3883}
3884
3885static void
3886qla1280_get_target_parameters(struct scsi_qla_host *ha,
3887 struct scsi_device *device)
3888{
3889 uint16_t mb[MAILBOX_REGISTER_COUNT];
3890 int bus, target, lun;
3891
3892 bus = device->channel;
3893 target = device->id;
3894 lun = device->lun;
3895
3896
3897 mb[0] = MBC_GET_TARGET_PARAMETERS;
3898 mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3899 mb[1] <<= 8;
3900 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3901 &mb[0]);
3902
3903 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3904
3905 if (mb[3] != 0) {
3906 printk(KERN_CONT " Sync: period %d, offset %d",
3907 (mb[3] & 0xff), (mb[3] >> 8));
3908 if (mb[2] & BIT_13)
3909 printk(KERN_CONT ", Wide");
3910 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
3911 printk(KERN_CONT ", DT");
3912 } else
3913 printk(KERN_CONT " Async");
3914
3915 if (device->simple_tags)
3916 printk(KERN_CONT ", Tagged queuing: depth %d", device->queue_depth);
3917 printk(KERN_CONT "\n");
3918}
3919
3920
3921#if DEBUG_QLA1280
3922static void
3923__qla1280_dump_buffer(char *b, int size)
3924{
3925 int cnt;
3926 u8 c;
3927
3928 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
3929 "Bh Ch Dh Eh Fh\n");
3930 printk(KERN_DEBUG "---------------------------------------------"
3931 "------------------\n");
3932
3933 for (cnt = 0; cnt < size;) {
3934 c = *b++;
3935
3936 printk("0x%02x", c);
3937 cnt++;
3938 if (!(cnt % 16))
3939 printk("\n");
3940 else
3941 printk(" ");
3942 }
3943 if (cnt % 16)
3944 printk("\n");
3945}
3946
3947
3948
3949
3950
3951static void
3952__qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
3953{
3954 struct scsi_qla_host *ha;
3955 struct Scsi_Host *host = CMD_HOST(cmd);
3956 struct srb *sp;
3957
3958
3959 int i;
3960 ha = (struct scsi_qla_host *)host->hostdata;
3961
3962 sp = (struct srb *)CMD_SP(cmd);
3963 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
3964 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
3965 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
3966 CMD_CDBLEN(cmd));
3967 printk(" CDB = ");
3968 for (i = 0; i < cmd->cmd_len; i++) {
3969 printk("0x%02x ", cmd->cmnd[i]);
3970 }
3971 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
3972 printk(" request buffer=0x%p, request buffer len=0x%x\n",
3973 scsi_sglist(cmd), scsi_bufflen(cmd));
3974
3975
3976
3977
3978
3979
3980 printk(" tag=%d, transfersize=0x%x \n",
3981 scsi_cmd_to_rq(cmd)->tag, cmd->transfersize);
3982 printk(" SP=0x%p\n", CMD_SP(cmd));
3983 printk(" underflow size = 0x%x, direction=0x%x\n",
3984 cmd->underflow, cmd->sc_data_direction);
3985}
3986
3987
3988
3989
3990
3991static void
3992ql1280_dump_device(struct scsi_qla_host *ha)
3993{
3994
3995 struct scsi_cmnd *cp;
3996 struct srb *sp;
3997 int i;
3998
3999 printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4000
4001 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4002 if ((sp = ha->outstanding_cmds[i]) == NULL)
4003 continue;
4004 if ((cp = sp->cmd) == NULL)
4005 continue;
4006 qla1280_print_scsi_cmd(1, cp);
4007 }
4008}
4009#endif
4010
4011
4012enum tokens {
4013 TOKEN_NVRAM,
4014 TOKEN_SYNC,
4015 TOKEN_WIDE,
4016 TOKEN_PPR,
4017 TOKEN_VERBOSE,
4018 TOKEN_DEBUG,
4019};
4020
4021struct setup_tokens {
4022 char *token;
4023 int val;
4024};
4025
4026static struct setup_tokens setup_token[] __initdata =
4027{
4028 { "nvram", TOKEN_NVRAM },
4029 { "sync", TOKEN_SYNC },
4030 { "wide", TOKEN_WIDE },
4031 { "ppr", TOKEN_PPR },
4032 { "verbose", TOKEN_VERBOSE },
4033 { "debug", TOKEN_DEBUG },
4034};
4035
4036
4037
4038
4039
4040
4041
4042
4043static int __init
4044qla1280_setup(char *s)
4045{
4046 char *cp, *ptr;
4047 unsigned long val;
4048 int toke;
4049
4050 cp = s;
4051
4052 while (cp && (ptr = strchr(cp, ':'))) {
4053 ptr++;
4054 if (!strcmp(ptr, "yes")) {
4055 val = 0x10000;
4056 ptr += 3;
4057 } else if (!strcmp(ptr, "no")) {
4058 val = 0;
4059 ptr += 2;
4060 } else
4061 val = simple_strtoul(ptr, &ptr, 0);
4062
4063 switch ((toke = qla1280_get_token(cp))) {
4064 case TOKEN_NVRAM:
4065 if (!val)
4066 driver_setup.no_nvram = 1;
4067 break;
4068 case TOKEN_SYNC:
4069 if (!val)
4070 driver_setup.no_sync = 1;
4071 else if (val != 0x10000)
4072 driver_setup.sync_mask = val;
4073 break;
4074 case TOKEN_WIDE:
4075 if (!val)
4076 driver_setup.no_wide = 1;
4077 else if (val != 0x10000)
4078 driver_setup.wide_mask = val;
4079 break;
4080 case TOKEN_PPR:
4081 if (!val)
4082 driver_setup.no_ppr = 1;
4083 else if (val != 0x10000)
4084 driver_setup.ppr_mask = val;
4085 break;
4086 case TOKEN_VERBOSE:
4087 qla1280_verbose = val;
4088 break;
4089 default:
4090 printk(KERN_INFO "qla1280: unknown boot option %s\n",
4091 cp);
4092 }
4093
4094 cp = strchr(ptr, ';');
4095 if (cp)
4096 cp++;
4097 else {
4098 break;
4099 }
4100 }
4101 return 1;
4102}
4103
4104
4105static int __init
4106qla1280_get_token(char *str)
4107{
4108 char *sep;
4109 long ret = -1;
4110 int i;
4111
4112 sep = strchr(str, ':');
4113
4114 if (sep) {
4115 for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4116 if (!strncmp(setup_token[i].token, str, (sep - str))) {
4117 ret = setup_token[i].val;
4118 break;
4119 }
4120 }
4121 }
4122
4123 return ret;
4124}
4125
4126
4127static struct scsi_host_template qla1280_driver_template = {
4128 .module = THIS_MODULE,
4129 .proc_name = "qla1280",
4130 .name = "Qlogic ISP 1280/12160",
4131 .info = qla1280_info,
4132 .slave_configure = qla1280_slave_configure,
4133 .queuecommand = qla1280_queuecommand,
4134 .eh_abort_handler = qla1280_eh_abort,
4135 .eh_device_reset_handler= qla1280_eh_device_reset,
4136 .eh_bus_reset_handler = qla1280_eh_bus_reset,
4137 .eh_host_reset_handler = qla1280_eh_adapter_reset,
4138 .bios_param = qla1280_biosparam,
4139 .can_queue = MAX_OUTSTANDING_COMMANDS,
4140 .this_id = -1,
4141 .sg_tablesize = SG_ALL,
4142};
4143
4144
4145static int
4146qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4147{
4148 int devnum = id->driver_data;
4149 struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4150 struct Scsi_Host *host;
4151 struct scsi_qla_host *ha;
4152 int error = -ENODEV;
4153
4154
4155 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4156 printk(KERN_INFO
4157 "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4158 goto error;
4159 }
4160
4161 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4162 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4163
4164 if (pci_enable_device(pdev)) {
4165 printk(KERN_WARNING
4166 "qla1280: Failed to enabled pci device, aborting.\n");
4167 goto error;
4168 }
4169
4170 pci_set_master(pdev);
4171
4172 error = -ENOMEM;
4173 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4174 if (!host) {
4175 printk(KERN_WARNING
4176 "qla1280: Failed to register host, aborting.\n");
4177 goto error_disable_device;
4178 }
4179
4180 ha = (struct scsi_qla_host *)host->hostdata;
4181 memset(ha, 0, sizeof(struct scsi_qla_host));
4182
4183 ha->pdev = pdev;
4184 ha->devnum = devnum;
4185
4186#ifdef QLA_64BIT_PTR
4187 if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
4188 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
4189 printk(KERN_WARNING "scsi(%li): Unable to set a "
4190 "suitable DMA mask - aborting\n", ha->host_no);
4191 error = -ENODEV;
4192 goto error_put_host;
4193 }
4194 } else
4195 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4196 ha->host_no);
4197#else
4198 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
4199 printk(KERN_WARNING "scsi(%li): Unable to set a "
4200 "suitable DMA mask - aborting\n", ha->host_no);
4201 error = -ENODEV;
4202 goto error_put_host;
4203 }
4204#endif
4205
4206 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
4207 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4208 &ha->request_dma, GFP_KERNEL);
4209 if (!ha->request_ring) {
4210 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4211 goto error_put_host;
4212 }
4213
4214 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
4215 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4216 &ha->response_dma, GFP_KERNEL);
4217 if (!ha->response_ring) {
4218 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4219 goto error_free_request_ring;
4220 }
4221
4222 ha->ports = bdp->numPorts;
4223
4224 ha->host = host;
4225 ha->host_no = host->host_no;
4226
4227 host->irq = pdev->irq;
4228 host->max_channel = bdp->numPorts - 1;
4229 host->max_lun = MAX_LUNS - 1;
4230 host->max_id = MAX_TARGETS;
4231 host->max_sectors = 1024;
4232 host->unique_id = host->host_no;
4233
4234 error = -ENODEV;
4235
4236#if MEMORY_MAPPED_IO
4237 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4238 if (!ha->mmpbase) {
4239 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4240 goto error_free_response_ring;
4241 }
4242
4243 host->base = (unsigned long)ha->mmpbase;
4244 ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4245#else
4246 host->io_port = pci_resource_start(ha->pdev, 0);
4247 if (!request_region(host->io_port, 0xff, "qla1280")) {
4248 printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4249 "0x%04lx-0x%04lx - already in use\n",
4250 host->io_port, host->io_port + 0xff);
4251 goto error_free_response_ring;
4252 }
4253
4254 ha->iobase = (struct device_reg *)host->io_port;
4255#endif
4256
4257 INIT_LIST_HEAD(&ha->done_q);
4258
4259
4260 qla1280_disable_intrs(ha);
4261
4262 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4263 "qla1280", ha)) {
4264 printk("qla1280 : Failed to reserve interrupt %d already "
4265 "in use\n", pdev->irq);
4266 goto error_release_region;
4267 }
4268
4269
4270 if (qla1280_initialize_adapter(ha)) {
4271 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4272 goto error_free_irq;
4273 }
4274
4275
4276 host->this_id = ha->bus_settings[0].id;
4277
4278 pci_set_drvdata(pdev, host);
4279
4280 error = scsi_add_host(host, &pdev->dev);
4281 if (error)
4282 goto error_disable_adapter;
4283 scsi_scan_host(host);
4284
4285 return 0;
4286
4287 error_disable_adapter:
4288 qla1280_disable_intrs(ha);
4289 error_free_irq:
4290 free_irq(pdev->irq, ha);
4291 error_release_region:
4292#if MEMORY_MAPPED_IO
4293 iounmap(ha->mmpbase);
4294#else
4295 release_region(host->io_port, 0xff);
4296#endif
4297 error_free_response_ring:
4298 dma_free_coherent(&ha->pdev->dev,
4299 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4300 ha->response_ring, ha->response_dma);
4301 error_free_request_ring:
4302 dma_free_coherent(&ha->pdev->dev,
4303 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4304 ha->request_ring, ha->request_dma);
4305 error_put_host:
4306 scsi_host_put(host);
4307 error_disable_device:
4308 pci_disable_device(pdev);
4309 error:
4310 return error;
4311}
4312
4313
4314static void
4315qla1280_remove_one(struct pci_dev *pdev)
4316{
4317 struct Scsi_Host *host = pci_get_drvdata(pdev);
4318 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4319
4320 scsi_remove_host(host);
4321
4322 qla1280_disable_intrs(ha);
4323
4324 free_irq(pdev->irq, ha);
4325
4326#if MEMORY_MAPPED_IO
4327 iounmap(ha->mmpbase);
4328#else
4329 release_region(host->io_port, 0xff);
4330#endif
4331
4332 dma_free_coherent(&ha->pdev->dev,
4333 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4334 ha->request_ring, ha->request_dma);
4335 dma_free_coherent(&ha->pdev->dev,
4336 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4337 ha->response_ring, ha->response_dma);
4338
4339 pci_disable_device(pdev);
4340
4341 scsi_host_put(host);
4342}
4343
4344static struct pci_driver qla1280_pci_driver = {
4345 .name = "qla1280",
4346 .id_table = qla1280_pci_tbl,
4347 .probe = qla1280_probe_one,
4348 .remove = qla1280_remove_one,
4349};
4350
4351static int __init
4352qla1280_init(void)
4353{
4354 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4355 printk(KERN_WARNING
4356 "qla1280: struct srb too big, aborting\n");
4357 return -EINVAL;
4358 }
4359
4360#ifdef MODULE
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373 if (qla1280)
4374 qla1280_setup(qla1280);
4375#endif
4376
4377 return pci_register_driver(&qla1280_pci_driver);
4378}
4379
4380static void __exit
4381qla1280_exit(void)
4382{
4383 int i;
4384
4385 pci_unregister_driver(&qla1280_pci_driver);
4386
4387 for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
4388 release_firmware(qla1280_fw_tbl[i].fw);
4389 qla1280_fw_tbl[i].fw = NULL;
4390 }
4391}
4392
4393module_init(qla1280_init);
4394module_exit(qla1280_exit);
4395
4396MODULE_AUTHOR("Qlogic & Jes Sorensen");
4397MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4398MODULE_LICENSE("GPL");
4399MODULE_FIRMWARE("qlogic/1040.bin");
4400MODULE_FIRMWARE("qlogic/1280.bin");
4401MODULE_FIRMWARE("qlogic/12160.bin");
4402MODULE_VERSION(QLA1280_VERSION);
4403