1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define QLA1280_VERSION "3.27.1"
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339#include <linux/module.h>
340
341#include <linux/types.h>
342#include <linux/string.h>
343#include <linux/errno.h>
344#include <linux/kernel.h>
345#include <linux/ioport.h>
346#include <linux/delay.h>
347#include <linux/timer.h>
348#include <linux/pci.h>
349#include <linux/proc_fs.h>
350#include <linux/stat.h>
351#include <linux/pci_ids.h>
352#include <linux/interrupt.h>
353#include <linux/init.h>
354#include <linux/dma-mapping.h>
355#include <linux/firmware.h>
356
357#include <asm/io.h>
358#include <asm/irq.h>
359#include <asm/byteorder.h>
360#include <asm/processor.h>
361#include <asm/types.h>
362#include <asm/system.h>
363
364#include <scsi/scsi.h>
365#include <scsi/scsi_cmnd.h>
366#include <scsi/scsi_device.h>
367#include <scsi/scsi_host.h>
368#include <scsi/scsi_tcq.h>
369
370#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
371#include <asm/sn/io.h>
372#endif
373
374
375
376
377
378
379#define DEBUG_QLA1280_INTR 0
380#define DEBUG_PRINT_NVRAM 0
381#define DEBUG_QLA1280 0
382
383
384
385
386#ifdef CONFIG_X86_VISWS
387#define MEMORY_MAPPED_IO 0
388#else
389#define MEMORY_MAPPED_IO 1
390#endif
391
392#include "qla1280.h"
393
394#ifndef BITS_PER_LONG
395#error "BITS_PER_LONG not defined!"
396#endif
397#if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
398#define QLA_64BIT_PTR 1
399#endif
400
401#ifdef QLA_64BIT_PTR
402#define pci_dma_hi32(a) ((a >> 16) >> 16)
403#else
404#define pci_dma_hi32(a) 0
405#endif
406#define pci_dma_lo32(a) (a & 0xffffffff)
407
408#define NVRAM_DELAY() udelay(500)
409
410#if defined(__ia64__) && !defined(ia64_platform_is)
411#define ia64_platform_is(foo) (!strcmp(x, platform_name))
412#endif
413
414
415#define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
416#define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
417 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
418#define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
419 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
420
421
422static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
423static void qla1280_remove_one(struct pci_dev *);
424
425
426
427
428static void qla1280_done(struct scsi_qla_host *);
429static int qla1280_get_token(char *);
430static int qla1280_setup(char *s) __init;
431
432
433
434
435static int qla1280_load_firmware(struct scsi_qla_host *);
436static int qla1280_init_rings(struct scsi_qla_host *);
437static int qla1280_nvram_config(struct scsi_qla_host *);
438static int qla1280_mailbox_command(struct scsi_qla_host *,
439 uint8_t, uint16_t *);
440static int qla1280_bus_reset(struct scsi_qla_host *, int);
441static int qla1280_device_reset(struct scsi_qla_host *, int, int);
442static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
443static int qla1280_abort_isp(struct scsi_qla_host *);
444#ifdef QLA_64BIT_PTR
445static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
446#else
447static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
448#endif
449static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
450static void qla1280_poll(struct scsi_qla_host *);
451static void qla1280_reset_adapter(struct scsi_qla_host *);
452static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
453static void qla1280_isp_cmd(struct scsi_qla_host *);
454static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
455static void qla1280_rst_aen(struct scsi_qla_host *);
456static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
457 struct list_head *);
458static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
459 struct list_head *);
460static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
461static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
462static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
463static request_t *qla1280_req_pkt(struct scsi_qla_host *);
464static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
465 unsigned int);
466static void qla1280_get_target_parameters(struct scsi_qla_host *,
467 struct scsi_device *);
468static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
469
470
471static struct qla_driver_setup driver_setup;
472
473
474
475
476static inline uint16_t
477qla1280_data_direction(struct scsi_cmnd *cmnd)
478{
479 switch(cmnd->sc_data_direction) {
480 case DMA_FROM_DEVICE:
481 return BIT_5;
482 case DMA_TO_DEVICE:
483 return BIT_6;
484 case DMA_BIDIRECTIONAL:
485 return BIT_5 | BIT_6;
486
487
488
489
490
491 case DMA_NONE:
492 default:
493 return 0;
494 }
495}
496
497#if DEBUG_QLA1280
498static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
499static void __qla1280_dump_buffer(char *, int);
500#endif
501
502
503
504
505
506#ifdef MODULE
507static char *qla1280;
508
509
510module_param(qla1280, charp, 0);
511#else
512__setup("qla1280=", qla1280_setup);
513#endif
514
515
516
517
518
519
520
521
522#define CMD_SP(Cmnd) &Cmnd->SCp
523#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
524#define CMD_CDBP(Cmnd) Cmnd->cmnd
525#define CMD_SNSP(Cmnd) Cmnd->sense_buffer
526#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
527#define CMD_RESULT(Cmnd) Cmnd->result
528#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
529#define CMD_REQUEST(Cmnd) Cmnd->request->cmd
530
531#define CMD_HOST(Cmnd) Cmnd->device->host
532#define SCSI_BUS_32(Cmnd) Cmnd->device->channel
533#define SCSI_TCN_32(Cmnd) Cmnd->device->id
534#define SCSI_LUN_32(Cmnd) Cmnd->device->lun
535
536
537
538
539
540
541struct qla_boards {
542 char *name;
543 int numPorts;
544 int fw_index;
545};
546
547
548static struct pci_device_id qla1280_pci_tbl[] = {
549 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
550 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
551 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
552 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
553 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
554 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
555 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
556 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
557 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
558 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
559 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
560 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
561 {0,}
562};
563MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
564
565DEFINE_MUTEX(qla1280_firmware_mutex);
566
567struct qla_fw {
568 char *fwname;
569 const struct firmware *fw;
570};
571
572#define QL_NUM_FW_IMAGES 3
573
574struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
575 {"qlogic/1040.bin", NULL},
576 {"qlogic/1280.bin", NULL},
577 {"qlogic/12160.bin", NULL},
578};
579
580
581static struct qla_boards ql1280_board_tbl[] = {
582 {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
583 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
584 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
585 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
586 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
587 {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
588 {.name = " ", .numPorts = 0, .fw_index = -1},
589};
590
591static int qla1280_verbose = 1;
592
593#if DEBUG_QLA1280
594static int ql_debug_level = 1;
595#define dprintk(level, format, a...) \
596 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
597#define qla1280_dump_buffer(level, buf, size) \
598 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
599#define qla1280_print_scsi_cmd(level, cmd) \
600 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
601#else
602#define ql_debug_level 0
603#define dprintk(level, format, a...) do{}while(0)
604#define qla1280_dump_buffer(a, b, c) do{}while(0)
605#define qla1280_print_scsi_cmd(a, b) do{}while(0)
606#endif
607
608#define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
609#define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
610#define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
611#define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
612
613
614static int qla1280_read_nvram(struct scsi_qla_host *ha)
615{
616 uint16_t *wptr;
617 uint8_t chksum;
618 int cnt, i;
619 struct nvram *nv;
620
621 ENTER("qla1280_read_nvram");
622
623 if (driver_setup.no_nvram)
624 return 1;
625
626 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
627
628 wptr = (uint16_t *)&ha->nvram;
629 nv = &ha->nvram;
630 chksum = 0;
631 for (cnt = 0; cnt < 3; cnt++) {
632 *wptr = qla1280_get_nvram_word(ha, cnt);
633 chksum += *wptr & 0xff;
634 chksum += (*wptr >> 8) & 0xff;
635 wptr++;
636 }
637
638 if (nv->id0 != 'I' || nv->id1 != 'S' ||
639 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
640 dprintk(2, "Invalid nvram ID or version!\n");
641 chksum = 1;
642 } else {
643 for (; cnt < sizeof(struct nvram); cnt++) {
644 *wptr = qla1280_get_nvram_word(ha, cnt);
645 chksum += *wptr & 0xff;
646 chksum += (*wptr >> 8) & 0xff;
647 wptr++;
648 }
649 }
650
651 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
652 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
653 nv->version);
654
655
656 if (chksum) {
657 if (!driver_setup.no_nvram)
658 printk(KERN_WARNING "scsi(%ld): Unable to identify or "
659 "validate NVRAM checksum, using default "
660 "settings\n", ha->host_no);
661 ha->nvram_valid = 0;
662 } else
663 ha->nvram_valid = 1;
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681 nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
682 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
683 for(i = 0; i < MAX_BUSES; i++) {
684 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
685 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
686 }
687 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
688 LEAVE("qla1280_read_nvram");
689
690 return chksum;
691}
692
693
694
695
696
697static const char *
698qla1280_info(struct Scsi_Host *host)
699{
700 static char qla1280_scsi_name_buffer[125];
701 char *bp;
702 struct scsi_qla_host *ha;
703 struct qla_boards *bdp;
704
705 bp = &qla1280_scsi_name_buffer[0];
706 ha = (struct scsi_qla_host *)host->hostdata;
707 bdp = &ql1280_board_tbl[ha->devnum];
708 memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
709
710 sprintf (bp,
711 "QLogic %s PCI to SCSI Host Adapter\n"
712 " Firmware version: %2d.%02d.%02d, Driver version %s",
713 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
714 QLA1280_VERSION);
715 return bp;
716}
717
718
719
720
721
722
723
724
725
726
727
728
729static int
730qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
731{
732 struct Scsi_Host *host = cmd->device->host;
733 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
734 struct srb *sp = (struct srb *)CMD_SP(cmd);
735 int status;
736
737 cmd->scsi_done = fn;
738 sp->cmd = cmd;
739 sp->flags = 0;
740 sp->wait = NULL;
741 CMD_HANDLE(cmd) = (unsigned char *)NULL;
742
743 qla1280_print_scsi_cmd(5, cmd);
744
745#ifdef QLA_64BIT_PTR
746
747
748
749
750
751
752 status = qla1280_64bit_start_scsi(ha, sp);
753#else
754 status = qla1280_32bit_start_scsi(ha, sp);
755#endif
756 return status;
757}
758
759static DEF_SCSI_QCMD(qla1280_queuecommand)
760
761enum action {
762 ABORT_COMMAND,
763 DEVICE_RESET,
764 BUS_RESET,
765 ADAPTER_RESET,
766};
767
768
769static void qla1280_mailbox_timeout(unsigned long __data)
770{
771 struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
772 struct device_reg __iomem *reg;
773 reg = ha->iobase;
774
775 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0);
776 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
777 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
778 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus));
779 complete(ha->mailbox_wait);
780}
781
782static int
783_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
784 struct completion *wait)
785{
786 int status = FAILED;
787 struct scsi_cmnd *cmd = sp->cmd;
788
789 spin_unlock_irq(ha->host->host_lock);
790 wait_for_completion_timeout(wait, 4*HZ);
791 spin_lock_irq(ha->host->host_lock);
792 sp->wait = NULL;
793 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
794 status = SUCCESS;
795 (*cmd->scsi_done)(cmd);
796 }
797 return status;
798}
799
800static int
801qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
802{
803 DECLARE_COMPLETION_ONSTACK(wait);
804
805 sp->wait = &wait;
806 return _qla1280_wait_for_single_command(ha, sp, &wait);
807}
808
809static int
810qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
811{
812 int cnt;
813 int status;
814 struct srb *sp;
815 struct scsi_cmnd *cmd;
816
817 status = SUCCESS;
818
819
820
821
822
823 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
824 sp = ha->outstanding_cmds[cnt];
825 if (sp) {
826 cmd = sp->cmd;
827
828 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
829 continue;
830 if (target >= 0 && SCSI_TCN_32(cmd) != target)
831 continue;
832
833 status = qla1280_wait_for_single_command(ha, sp);
834 if (status == FAILED)
835 break;
836 }
837 }
838 return status;
839}
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855static int
856qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
857{
858 struct scsi_qla_host *ha;
859 int bus, target, lun;
860 struct srb *sp;
861 int i, found;
862 int result=FAILED;
863 int wait_for_bus=-1;
864 int wait_for_target = -1;
865 DECLARE_COMPLETION_ONSTACK(wait);
866
867 ENTER("qla1280_error_action");
868
869 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
870 sp = (struct srb *)CMD_SP(cmd);
871 bus = SCSI_BUS_32(cmd);
872 target = SCSI_TCN_32(cmd);
873 lun = SCSI_LUN_32(cmd);
874
875 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
876 RD_REG_WORD(&ha->iobase->istatus));
877
878 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
879 RD_REG_WORD(&ha->iobase->host_cmd),
880 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
881
882 if (qla1280_verbose)
883 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
884 "Handle=0x%p, action=0x%x\n",
885 ha->host_no, cmd, CMD_HANDLE(cmd), action);
886
887
888
889
890
891
892
893 found = -1;
894 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
895 if (sp == ha->outstanding_cmds[i]) {
896 found = i;
897 sp->wait = &wait;
898 break;
899 }
900 }
901
902 if (found < 0) {
903 result = SUCCESS;
904 if (qla1280_verbose) {
905 printk(KERN_INFO
906 "scsi(%ld:%d:%d:%d): specified command has "
907 "already completed.\n", ha->host_no, bus,
908 target, lun);
909 }
910 }
911
912 switch (action) {
913
914 case ABORT_COMMAND:
915 dprintk(1, "qla1280: RISC aborting command\n");
916
917
918
919
920
921 if (found >= 0)
922 qla1280_abort_command(ha, sp, found);
923 break;
924
925 case DEVICE_RESET:
926 if (qla1280_verbose)
927 printk(KERN_INFO
928 "scsi(%ld:%d:%d:%d): Queueing device reset "
929 "command.\n", ha->host_no, bus, target, lun);
930 if (qla1280_device_reset(ha, bus, target) == 0) {
931
932 wait_for_bus = bus;
933 wait_for_target = target;
934 }
935 break;
936
937 case BUS_RESET:
938 if (qla1280_verbose)
939 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
940 "reset.\n", ha->host_no, bus);
941 if (qla1280_bus_reset(ha, bus) == 0) {
942
943 wait_for_bus = bus;
944 }
945 break;
946
947 case ADAPTER_RESET:
948 default:
949 if (qla1280_verbose) {
950 printk(KERN_INFO
951 "scsi(%ld): Issued ADAPTER RESET\n",
952 ha->host_no);
953 printk(KERN_INFO "scsi(%ld): I/O processing will "
954 "continue automatically\n", ha->host_no);
955 }
956 ha->flags.reset_active = 1;
957
958 if (qla1280_abort_isp(ha) != 0) {
959 result = FAILED;
960 }
961
962 ha->flags.reset_active = 0;
963 }
964
965
966
967
968
969
970
971
972
973
974 if (found >= 0)
975 result = _qla1280_wait_for_single_command(ha, sp, &wait);
976
977 if (action == ABORT_COMMAND && result != SUCCESS) {
978 printk(KERN_WARNING
979 "scsi(%li:%i:%i:%i): "
980 "Unable to abort command!\n",
981 ha->host_no, bus, target, lun);
982 }
983
984
985
986
987
988
989
990
991
992
993
994 if (result == SUCCESS && wait_for_bus >= 0) {
995 result = qla1280_wait_for_pending_commands(ha,
996 wait_for_bus, wait_for_target);
997 }
998
999 dprintk(1, "RESET returning %d\n", result);
1000
1001 LEAVE("qla1280_error_action");
1002 return result;
1003}
1004
1005
1006
1007
1008
1009static int
1010qla1280_eh_abort(struct scsi_cmnd * cmd)
1011{
1012 int rc;
1013
1014 spin_lock_irq(cmd->device->host->host_lock);
1015 rc = qla1280_error_action(cmd, ABORT_COMMAND);
1016 spin_unlock_irq(cmd->device->host->host_lock);
1017
1018 return rc;
1019}
1020
1021
1022
1023
1024
1025static int
1026qla1280_eh_device_reset(struct scsi_cmnd *cmd)
1027{
1028 int rc;
1029
1030 spin_lock_irq(cmd->device->host->host_lock);
1031 rc = qla1280_error_action(cmd, DEVICE_RESET);
1032 spin_unlock_irq(cmd->device->host->host_lock);
1033
1034 return rc;
1035}
1036
1037
1038
1039
1040
1041static int
1042qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1043{
1044 int rc;
1045
1046 spin_lock_irq(cmd->device->host->host_lock);
1047 rc = qla1280_error_action(cmd, BUS_RESET);
1048 spin_unlock_irq(cmd->device->host->host_lock);
1049
1050 return rc;
1051}
1052
1053
1054
1055
1056
1057static int
1058qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1059{
1060 int rc;
1061
1062 spin_lock_irq(cmd->device->host->host_lock);
1063 rc = qla1280_error_action(cmd, ADAPTER_RESET);
1064 spin_unlock_irq(cmd->device->host->host_lock);
1065
1066 return rc;
1067}
1068
1069static int
1070qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1071 sector_t capacity, int geom[])
1072{
1073 int heads, sectors, cylinders;
1074
1075 heads = 64;
1076 sectors = 32;
1077 cylinders = (unsigned long)capacity / (heads * sectors);
1078 if (cylinders > 1024) {
1079 heads = 255;
1080 sectors = 63;
1081 cylinders = (unsigned long)capacity / (heads * sectors);
1082
1083
1084 }
1085
1086 geom[0] = heads;
1087 geom[1] = sectors;
1088 geom[2] = cylinders;
1089
1090 return 0;
1091}
1092
1093
1094
1095static inline void
1096qla1280_disable_intrs(struct scsi_qla_host *ha)
1097{
1098 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1099 RD_REG_WORD(&ha->iobase->ictrl);
1100}
1101
1102
1103static inline void
1104qla1280_enable_intrs(struct scsi_qla_host *ha)
1105{
1106 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1107 RD_REG_WORD(&ha->iobase->ictrl);
1108}
1109
1110
1111
1112
1113
1114static irqreturn_t
1115qla1280_intr_handler(int irq, void *dev_id)
1116{
1117 struct scsi_qla_host *ha;
1118 struct device_reg __iomem *reg;
1119 u16 data;
1120 int handled = 0;
1121
1122 ENTER_INTR ("qla1280_intr_handler");
1123 ha = (struct scsi_qla_host *)dev_id;
1124
1125 spin_lock(ha->host->host_lock);
1126
1127 ha->isr_count++;
1128 reg = ha->iobase;
1129
1130 qla1280_disable_intrs(ha);
1131
1132 data = qla1280_debounce_register(®->istatus);
1133
1134 if (data & RISC_INT) {
1135 qla1280_isr(ha, &ha->done_q);
1136 handled = 1;
1137 }
1138 if (!list_empty(&ha->done_q))
1139 qla1280_done(ha);
1140
1141 spin_unlock(ha->host->host_lock);
1142
1143 qla1280_enable_intrs(ha);
1144
1145 LEAVE_INTR("qla1280_intr_handler");
1146 return IRQ_RETVAL(handled);
1147}
1148
1149
1150static int
1151qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1152{
1153 uint8_t mr;
1154 uint16_t mb[MAILBOX_REGISTER_COUNT];
1155 struct nvram *nv;
1156 int status, lun;
1157
1158 nv = &ha->nvram;
1159
1160 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1161
1162
1163 mb[0] = MBC_SET_TARGET_PARAMETERS;
1164 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1165 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1166 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1167 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1168 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1169 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1170 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1171 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1172 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1173
1174 if (IS_ISP1x160(ha)) {
1175 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1176 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1177 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1178 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1179 mr |= BIT_6;
1180 } else {
1181 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1182 }
1183 mb[3] |= nv->bus[bus].target[target].sync_period;
1184
1185 status = qla1280_mailbox_command(ha, mr, mb);
1186
1187
1188 for (lun = 0; lun < MAX_LUNS; lun++) {
1189 mb[0] = MBC_SET_DEVICE_QUEUE;
1190 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1191 mb[1] |= lun;
1192 mb[2] = nv->bus[bus].max_queue_depth;
1193 mb[3] = nv->bus[bus].target[target].execution_throttle;
1194 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1195 }
1196
1197 if (status)
1198 printk(KERN_WARNING "scsi(%ld:%i:%i): "
1199 "qla1280_set_target_parameters() failed\n",
1200 ha->host_no, bus, target);
1201 return status;
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216static int
1217qla1280_slave_configure(struct scsi_device *device)
1218{
1219 struct scsi_qla_host *ha;
1220 int default_depth = 3;
1221 int bus = device->channel;
1222 int target = device->id;
1223 int status = 0;
1224 struct nvram *nv;
1225 unsigned long flags;
1226
1227 ha = (struct scsi_qla_host *)device->host->hostdata;
1228 nv = &ha->nvram;
1229
1230 if (qla1280_check_for_dead_scsi_bus(ha, bus))
1231 return 1;
1232
1233 if (device->tagged_supported &&
1234 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1235 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG,
1236 ha->bus_settings[bus].hiwat);
1237 } else {
1238 scsi_adjust_queue_depth(device, 0, default_depth);
1239 }
1240
1241 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1242 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1243 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1244
1245 if (driver_setup.no_sync ||
1246 (driver_setup.sync_mask &&
1247 (~driver_setup.sync_mask & (1 << target))))
1248 nv->bus[bus].target[target].parameter.enable_sync = 0;
1249 if (driver_setup.no_wide ||
1250 (driver_setup.wide_mask &&
1251 (~driver_setup.wide_mask & (1 << target))))
1252 nv->bus[bus].target[target].parameter.enable_wide = 0;
1253 if (IS_ISP1x160(ha)) {
1254 if (driver_setup.no_ppr ||
1255 (driver_setup.ppr_mask &&
1256 (~driver_setup.ppr_mask & (1 << target))))
1257 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1258 }
1259
1260 spin_lock_irqsave(ha->host->host_lock, flags);
1261 if (nv->bus[bus].target[target].parameter.enable_sync)
1262 status = qla1280_set_target_parameters(ha, bus, target);
1263 qla1280_get_target_parameters(ha, device);
1264 spin_unlock_irqrestore(ha->host->host_lock, flags);
1265 return status;
1266}
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276static void
1277qla1280_done(struct scsi_qla_host *ha)
1278{
1279 struct srb *sp;
1280 struct list_head *done_q;
1281 int bus, target, lun;
1282 struct scsi_cmnd *cmd;
1283
1284 ENTER("qla1280_done");
1285
1286 done_q = &ha->done_q;
1287
1288 while (!list_empty(done_q)) {
1289 sp = list_entry(done_q->next, struct srb, list);
1290
1291 list_del(&sp->list);
1292
1293 cmd = sp->cmd;
1294 bus = SCSI_BUS_32(cmd);
1295 target = SCSI_TCN_32(cmd);
1296 lun = SCSI_LUN_32(cmd);
1297
1298 switch ((CMD_RESULT(cmd) >> 16)) {
1299 case DID_RESET:
1300
1301 if (!ha->flags.abort_isp_active)
1302 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1303 break;
1304 case DID_ABORT:
1305 sp->flags &= ~SRB_ABORT_PENDING;
1306 sp->flags |= SRB_ABORTED;
1307 break;
1308 default:
1309 break;
1310 }
1311
1312
1313 scsi_dma_unmap(cmd);
1314
1315
1316 ha->actthreads--;
1317
1318 if (sp->wait == NULL)
1319 (*(cmd)->scsi_done)(cmd);
1320 else
1321 complete(sp->wait);
1322 }
1323 LEAVE("qla1280_done");
1324}
1325
1326
1327
1328
1329static int
1330qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1331{
1332 int host_status = DID_ERROR;
1333 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1334 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1335 uint32_t residual_length = le32_to_cpu(sts->residual_length);
1336 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1337#if DEBUG_QLA1280_INTR
1338 static char *reason[] = {
1339 "DID_OK",
1340 "DID_NO_CONNECT",
1341 "DID_BUS_BUSY",
1342 "DID_TIME_OUT",
1343 "DID_BAD_TARGET",
1344 "DID_ABORT",
1345 "DID_PARITY",
1346 "DID_ERROR",
1347 "DID_RESET",
1348 "DID_BAD_INTR"
1349 };
1350#endif
1351
1352 ENTER("qla1280_return_status");
1353
1354#if DEBUG_QLA1280_INTR
1355
1356
1357
1358
1359#endif
1360
1361 switch (comp_status) {
1362 case CS_COMPLETE:
1363 host_status = DID_OK;
1364 break;
1365
1366 case CS_INCOMPLETE:
1367 if (!(state_flags & SF_GOT_BUS))
1368 host_status = DID_NO_CONNECT;
1369 else if (!(state_flags & SF_GOT_TARGET))
1370 host_status = DID_BAD_TARGET;
1371 else if (!(state_flags & SF_SENT_CDB))
1372 host_status = DID_ERROR;
1373 else if (!(state_flags & SF_TRANSFERRED_DATA))
1374 host_status = DID_ERROR;
1375 else if (!(state_flags & SF_GOT_STATUS))
1376 host_status = DID_ERROR;
1377 else if (!(state_flags & SF_GOT_SENSE))
1378 host_status = DID_ERROR;
1379 break;
1380
1381 case CS_RESET:
1382 host_status = DID_RESET;
1383 break;
1384
1385 case CS_ABORTED:
1386 host_status = DID_ABORT;
1387 break;
1388
1389 case CS_TIMEOUT:
1390 host_status = DID_TIME_OUT;
1391 break;
1392
1393 case CS_DATA_OVERRUN:
1394 dprintk(2, "Data overrun 0x%x\n", residual_length);
1395 dprintk(2, "qla1280_return_status: response packet data\n");
1396 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1397 host_status = DID_ERROR;
1398 break;
1399
1400 case CS_DATA_UNDERRUN:
1401 if ((scsi_bufflen(cp) - residual_length) <
1402 cp->underflow) {
1403 printk(KERN_WARNING
1404 "scsi: Underflow detected - retrying "
1405 "command.\n");
1406 host_status = DID_ERROR;
1407 } else {
1408 scsi_set_resid(cp, residual_length);
1409 host_status = DID_OK;
1410 }
1411 break;
1412
1413 default:
1414 host_status = DID_ERROR;
1415 break;
1416 }
1417
1418#if DEBUG_QLA1280_INTR
1419 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1420 reason[host_status], scsi_status);
1421#endif
1422
1423 LEAVE("qla1280_return_status");
1424
1425 return (scsi_status & 0xff) | (host_status << 16);
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442static int __devinit
1443qla1280_initialize_adapter(struct scsi_qla_host *ha)
1444{
1445 struct device_reg __iomem *reg;
1446 int status;
1447 int bus;
1448 unsigned long flags;
1449
1450 ENTER("qla1280_initialize_adapter");
1451
1452
1453 ha->flags.online = 0;
1454 ha->flags.disable_host_adapter = 0;
1455 ha->flags.reset_active = 0;
1456 ha->flags.abort_isp_active = 0;
1457
1458#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1459 if (ia64_platform_is("sn2")) {
1460 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
1461 "dual channel lockup workaround\n", ha->host_no);
1462 ha->flags.use_pci_vchannel = 1;
1463 driver_setup.no_nvram = 1;
1464 }
1465#endif
1466
1467
1468 if (IS_ISP1040(ha))
1469 driver_setup.no_nvram = 1;
1470
1471 dprintk(1, "Configure PCI space for adapter...\n");
1472
1473 reg = ha->iobase;
1474
1475
1476 WRT_REG_WORD(®->semaphore, 0);
1477 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
1478 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT);
1479 RD_REG_WORD(®->host_cmd);
1480
1481 if (qla1280_read_nvram(ha)) {
1482 dprintk(2, "qla1280_initialize_adapter: failed to read "
1483 "NVRAM\n");
1484 }
1485
1486
1487
1488
1489
1490
1491 spin_lock_irqsave(ha->host->host_lock, flags);
1492
1493 status = qla1280_load_firmware(ha);
1494 if (status) {
1495 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1496 ha->host_no);
1497 goto out;
1498 }
1499
1500
1501 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1502 qla1280_nvram_config(ha);
1503
1504 if (ha->flags.disable_host_adapter) {
1505 status = 1;
1506 goto out;
1507 }
1508
1509 status = qla1280_init_rings(ha);
1510 if (status)
1511 goto out;
1512
1513
1514 for (bus = 0; bus < ha->ports; bus++) {
1515 if (!ha->bus_settings[bus].disable_scsi_reset &&
1516 qla1280_bus_reset(ha, bus) &&
1517 qla1280_bus_reset(ha, bus))
1518 ha->bus_settings[bus].scsi_bus_dead = 1;
1519 }
1520
1521 ha->flags.online = 1;
1522 out:
1523 spin_unlock_irqrestore(ha->host->host_lock, flags);
1524
1525 if (status)
1526 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1527
1528 LEAVE("qla1280_initialize_adapter");
1529 return status;
1530}
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544static const struct firmware *
1545qla1280_request_firmware(struct scsi_qla_host *ha)
1546{
1547 const struct firmware *fw;
1548 int err;
1549 int index;
1550 char *fwname;
1551
1552 spin_unlock_irq(ha->host->host_lock);
1553 mutex_lock(&qla1280_firmware_mutex);
1554
1555 index = ql1280_board_tbl[ha->devnum].fw_index;
1556 fw = qla1280_fw_tbl[index].fw;
1557 if (fw)
1558 goto out;
1559
1560 fwname = qla1280_fw_tbl[index].fwname;
1561 err = request_firmware(&fw, fwname, &ha->pdev->dev);
1562
1563 if (err) {
1564 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1565 fwname, err);
1566 fw = ERR_PTR(err);
1567 goto unlock;
1568 }
1569 if ((fw->size % 2) || (fw->size < 6)) {
1570 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
1571 fw->size, fwname);
1572 release_firmware(fw);
1573 fw = ERR_PTR(-EINVAL);
1574 goto unlock;
1575 }
1576
1577 qla1280_fw_tbl[index].fw = fw;
1578
1579 out:
1580 ha->fwver1 = fw->data[0];
1581 ha->fwver2 = fw->data[1];
1582 ha->fwver3 = fw->data[2];
1583 unlock:
1584 mutex_unlock(&qla1280_firmware_mutex);
1585 spin_lock_irq(ha->host->host_lock);
1586 return fw;
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599static int
1600qla1280_chip_diag(struct scsi_qla_host *ha)
1601{
1602 uint16_t mb[MAILBOX_REGISTER_COUNT];
1603 struct device_reg __iomem *reg = ha->iobase;
1604 int status = 0;
1605 int cnt;
1606 uint16_t data;
1607 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l);
1608
1609 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1610
1611
1612 WRT_REG_WORD(®->ictrl, ISP_RESET);
1613
1614
1615
1616
1617
1618
1619
1620
1621 udelay(20);
1622 data = qla1280_debounce_register(®->ictrl);
1623
1624
1625
1626 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1627 udelay(5);
1628 data = RD_REG_WORD(®->ictrl);
1629 }
1630
1631 if (!cnt)
1632 goto fail;
1633
1634
1635 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1636
1637 WRT_REG_WORD(®->cfg_1, 0);
1638
1639
1640
1641 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
1642 HC_RELEASE_RISC | HC_DISABLE_BIOS);
1643
1644 RD_REG_WORD(®->id_l);
1645 data = qla1280_debounce_register(®->mailbox0);
1646
1647
1648
1649
1650 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1651 udelay(5);
1652 data = RD_REG_WORD(®->mailbox0);
1653 }
1654
1655 if (!cnt)
1656 goto fail;
1657
1658
1659 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1660
1661 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
1662 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
1663 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
1664 RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
1665 RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
1666 printk(KERN_INFO "qla1280: Wrong product ID = "
1667 "0x%x,0x%x,0x%x,0x%x\n",
1668 RD_REG_WORD(®->mailbox1),
1669 RD_REG_WORD(®->mailbox2),
1670 RD_REG_WORD(®->mailbox3),
1671 RD_REG_WORD(®->mailbox4));
1672 goto fail;
1673 }
1674
1675
1676
1677
1678 qla1280_enable_intrs(ha);
1679
1680 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1681
1682 mb[0] = MBC_MAILBOX_REGISTER_TEST;
1683 mb[1] = 0xAAAA;
1684 mb[2] = 0x5555;
1685 mb[3] = 0xAA55;
1686 mb[4] = 0x55AA;
1687 mb[5] = 0xA5A5;
1688 mb[6] = 0x5A5A;
1689 mb[7] = 0x2525;
1690
1691 status = qla1280_mailbox_command(ha, 0xff, mb);
1692 if (status)
1693 goto fail;
1694
1695 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1696 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1697 mb[7] != 0x2525) {
1698 printk(KERN_INFO "qla1280: Failed mbox check\n");
1699 goto fail;
1700 }
1701
1702 dprintk(3, "qla1280_chip_diag: exiting normally\n");
1703 return 0;
1704 fail:
1705 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1706 return status;
1707}
1708
1709static int
1710qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1711{
1712
1713
1714 const struct firmware *fw;
1715 const __le16 *fw_data;
1716 uint16_t risc_address, risc_code_size;
1717 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1718 int err = 0;
1719
1720 fw = qla1280_request_firmware(ha);
1721 if (IS_ERR(fw))
1722 return PTR_ERR(fw);
1723
1724 fw_data = (const __le16 *)&fw->data[0];
1725 ha->fwstart = __le16_to_cpu(fw_data[2]);
1726
1727
1728 risc_address = ha->fwstart;
1729 fw_data = (const __le16 *)&fw->data[6];
1730 risc_code_size = (fw->size - 6) / 2;
1731
1732 for (i = 0; i < risc_code_size; i++) {
1733 mb[0] = MBC_WRITE_RAM_WORD;
1734 mb[1] = risc_address + i;
1735 mb[2] = __le16_to_cpu(fw_data[i]);
1736
1737 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1738 if (err) {
1739 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1740 ha->host_no);
1741 break;
1742 }
1743 }
1744
1745 return err;
1746}
1747
1748#define DUMP_IT_BACK 0
1749static int
1750qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1751{
1752
1753 const struct firmware *fw;
1754 const __le16 *fw_data;
1755 uint16_t risc_address, risc_code_size;
1756 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1757 int err = 0, num, i;
1758#if DUMP_IT_BACK
1759 uint8_t *sp, *tbuf;
1760 dma_addr_t p_tbuf;
1761
1762 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
1763 if (!tbuf)
1764 return -ENOMEM;
1765#endif
1766
1767 fw = qla1280_request_firmware(ha);
1768 if (IS_ERR(fw))
1769 return PTR_ERR(fw);
1770
1771 fw_data = (const __le16 *)&fw->data[0];
1772 ha->fwstart = __le16_to_cpu(fw_data[2]);
1773
1774
1775 risc_address = ha->fwstart;
1776 fw_data = (const __le16 *)&fw->data[6];
1777 risc_code_size = (fw->size - 6) / 2;
1778
1779 dprintk(1, "%s: DMA RISC code (%i) words\n",
1780 __func__, risc_code_size);
1781
1782 num = 0;
1783 while (risc_code_size > 0) {
1784 int warn __attribute__((unused)) = 0;
1785
1786 cnt = 2000 >> 1;
1787
1788 if (cnt > risc_code_size)
1789 cnt = risc_code_size;
1790
1791 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1792 "%d,%d(0x%x)\n",
1793 fw_data, cnt, num, risc_address);
1794 for(i = 0; i < cnt; i++)
1795 ((__le16 *)ha->request_ring)[i] = fw_data[i];
1796
1797 mb[0] = MBC_LOAD_RAM;
1798 mb[1] = risc_address;
1799 mb[4] = cnt;
1800 mb[3] = ha->request_dma & 0xffff;
1801 mb[2] = (ha->request_dma >> 16) & 0xffff;
1802 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1803 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1804 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1805 __func__, mb[0],
1806 (void *)(long)ha->request_dma,
1807 mb[6], mb[7], mb[2], mb[3]);
1808 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1809 BIT_1 | BIT_0, mb);
1810 if (err) {
1811 printk(KERN_ERR "scsi(%li): Failed to load partial "
1812 "segment of f\n", ha->host_no);
1813 goto out;
1814 }
1815
1816#if DUMP_IT_BACK
1817 mb[0] = MBC_DUMP_RAM;
1818 mb[1] = risc_address;
1819 mb[4] = cnt;
1820 mb[3] = p_tbuf & 0xffff;
1821 mb[2] = (p_tbuf >> 16) & 0xffff;
1822 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
1823 mb[6] = pci_dma_hi32(p_tbuf) >> 16;
1824
1825 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1826 BIT_1 | BIT_0, mb);
1827 if (err) {
1828 printk(KERN_ERR
1829 "Failed to dump partial segment of f/w\n");
1830 goto out;
1831 }
1832 sp = (uint8_t *)ha->request_ring;
1833 for (i = 0; i < (cnt << 1); i++) {
1834 if (tbuf[i] != sp[i] && warn++ < 10) {
1835 printk(KERN_ERR "%s: FW compare error @ "
1836 "byte(0x%x) loop#=%x\n",
1837 __func__, i, num);
1838 printk(KERN_ERR "%s: FWbyte=%x "
1839 "FWfromChip=%x\n",
1840 __func__, sp[i], tbuf[i]);
1841
1842 }
1843 }
1844#endif
1845 risc_address += cnt;
1846 risc_code_size = risc_code_size - cnt;
1847 fw_data = fw_data + cnt;
1848 num++;
1849 }
1850
1851 out:
1852#if DUMP_IT_BACK
1853 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
1854#endif
1855 return err;
1856}
1857
1858static int
1859qla1280_start_firmware(struct scsi_qla_host *ha)
1860{
1861 uint16_t mb[MAILBOX_REGISTER_COUNT];
1862 int err;
1863
1864 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1865 __func__);
1866
1867
1868 mb[0] = MBC_VERIFY_CHECKSUM;
1869
1870 mb[1] = ha->fwstart;
1871 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1872 if (err) {
1873 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1874 return err;
1875 }
1876
1877
1878 dprintk(1, "%s: start firmware running.\n", __func__);
1879 mb[0] = MBC_EXECUTE_FIRMWARE;
1880 mb[1] = ha->fwstart;
1881 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1882 if (err) {
1883 printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1884 ha->host_no);
1885 }
1886
1887 return err;
1888}
1889
1890static int
1891qla1280_load_firmware(struct scsi_qla_host *ha)
1892{
1893
1894 int err;
1895
1896 err = qla1280_chip_diag(ha);
1897 if (err)
1898 goto out;
1899 if (IS_ISP1040(ha))
1900 err = qla1280_load_firmware_pio(ha);
1901 else
1902 err = qla1280_load_firmware_dma(ha);
1903 if (err)
1904 goto out;
1905 err = qla1280_start_firmware(ha);
1906 out:
1907 return err;
1908}
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923static int
1924qla1280_init_rings(struct scsi_qla_host *ha)
1925{
1926 uint16_t mb[MAILBOX_REGISTER_COUNT];
1927 int status = 0;
1928
1929 ENTER("qla1280_init_rings");
1930
1931
1932 memset(ha->outstanding_cmds, 0,
1933 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1934
1935
1936 ha->request_ring_ptr = ha->request_ring;
1937 ha->req_ring_index = 0;
1938 ha->req_q_cnt = REQUEST_ENTRY_CNT;
1939
1940 mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1941 mb[1] = REQUEST_ENTRY_CNT;
1942 mb[3] = ha->request_dma & 0xffff;
1943 mb[2] = (ha->request_dma >> 16) & 0xffff;
1944 mb[4] = 0;
1945 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1946 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1947 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1948 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1949 &mb[0]))) {
1950
1951 ha->response_ring_ptr = ha->response_ring;
1952 ha->rsp_ring_index = 0;
1953
1954 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1955 mb[1] = RESPONSE_ENTRY_CNT;
1956 mb[3] = ha->response_dma & 0xffff;
1957 mb[2] = (ha->response_dma >> 16) & 0xffff;
1958 mb[5] = 0;
1959 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
1960 mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
1961 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1962 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1963 &mb[0]);
1964 }
1965
1966 if (status)
1967 dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1968
1969 LEAVE("qla1280_init_rings");
1970 return status;
1971}
1972
1973static void
1974qla1280_print_settings(struct nvram *nv)
1975{
1976 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1977 nv->bus[0].config_1.initiator_id);
1978 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1979 nv->bus[1].config_1.initiator_id);
1980
1981 dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1982 nv->bus[0].bus_reset_delay);
1983 dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1984 nv->bus[1].bus_reset_delay);
1985
1986 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1987 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1988 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1989 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1990
1991 dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1992 nv->bus[0].config_2.async_data_setup_time);
1993 dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1994 nv->bus[1].config_2.async_data_setup_time);
1995
1996 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1997 nv->bus[0].config_2.req_ack_active_negation);
1998 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1999 nv->bus[1].config_2.req_ack_active_negation);
2000
2001 dprintk(1, "qla1280 : data line active negation[0]=%d\n",
2002 nv->bus[0].config_2.data_line_active_negation);
2003 dprintk(1, "qla1280 : data line active negation[1]=%d\n",
2004 nv->bus[1].config_2.data_line_active_negation);
2005
2006 dprintk(1, "qla1280 : disable loading risc code=%d\n",
2007 nv->cntr_flags_1.disable_loading_risc_code);
2008
2009 dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
2010 nv->cntr_flags_1.enable_64bit_addressing);
2011
2012 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
2013 nv->bus[0].selection_timeout);
2014 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
2015 nv->bus[1].selection_timeout);
2016
2017 dprintk(1, "qla1280 : max queue depth[0]=%d\n",
2018 nv->bus[0].max_queue_depth);
2019 dprintk(1, "qla1280 : max queue depth[1]=%d\n",
2020 nv->bus[1].max_queue_depth);
2021}
2022
2023static void
2024qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
2025{
2026 struct nvram *nv = &ha->nvram;
2027
2028 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
2029 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
2030 nv->bus[bus].target[target].parameter.tag_queuing = 1;
2031 nv->bus[bus].target[target].parameter.enable_sync = 1;
2032#if 1
2033 nv->bus[bus].target[target].parameter.enable_wide = 1;
2034#endif
2035 nv->bus[bus].target[target].execution_throttle =
2036 nv->bus[bus].max_queue_depth - 1;
2037 nv->bus[bus].target[target].parameter.parity_checking = 1;
2038 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
2039
2040 if (IS_ISP1x160(ha)) {
2041 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
2042 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
2043 nv->bus[bus].target[target].sync_period = 9;
2044 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
2045 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
2046 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
2047 } else {
2048 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
2049 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
2050 nv->bus[bus].target[target].sync_period = 10;
2051 }
2052}
2053
2054static void
2055qla1280_set_defaults(struct scsi_qla_host *ha)
2056{
2057 struct nvram *nv = &ha->nvram;
2058 int bus, target;
2059
2060 dprintk(1, "Using defaults for NVRAM: \n");
2061 memset(nv, 0, sizeof(struct nvram));
2062
2063
2064 nv->firmware_feature.f.enable_fast_posting = 1;
2065 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2066 nv->termination.scsi_bus_0_control = 3;
2067 nv->termination.scsi_bus_1_control = 3;
2068 nv->termination.auto_term_support = 1;
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078 nv->isp_config.burst_enable = 1;
2079 if (IS_ISP1040(ha))
2080 nv->isp_config.fifo_threshold |= 3;
2081 else
2082 nv->isp_config.fifo_threshold |= 4;
2083
2084 if (IS_ISP1x160(ha))
2085 nv->isp_parameter = 0x01;
2086
2087 for (bus = 0; bus < MAX_BUSES; bus++) {
2088 nv->bus[bus].config_1.initiator_id = 7;
2089 nv->bus[bus].config_2.req_ack_active_negation = 1;
2090 nv->bus[bus].config_2.data_line_active_negation = 1;
2091 nv->bus[bus].selection_timeout = 250;
2092 nv->bus[bus].max_queue_depth = 32;
2093
2094 if (IS_ISP1040(ha)) {
2095 nv->bus[bus].bus_reset_delay = 3;
2096 nv->bus[bus].config_2.async_data_setup_time = 6;
2097 nv->bus[bus].retry_delay = 1;
2098 } else {
2099 nv->bus[bus].bus_reset_delay = 5;
2100 nv->bus[bus].config_2.async_data_setup_time = 8;
2101 }
2102
2103 for (target = 0; target < MAX_TARGETS; target++)
2104 qla1280_set_target_defaults(ha, bus, target);
2105 }
2106}
2107
2108static int
2109qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2110{
2111 struct nvram *nv = &ha->nvram;
2112 uint16_t mb[MAILBOX_REGISTER_COUNT];
2113 int status, lun;
2114 uint16_t flag;
2115
2116
2117 mb[0] = MBC_SET_TARGET_PARAMETERS;
2118 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2119
2120
2121
2122
2123
2124
2125 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2126 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2127
2128 if (IS_ISP1x160(ha))
2129 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2130 else
2131 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2132 mb[3] |= nv->bus[bus].target[target].sync_period;
2133 status = qla1280_mailbox_command(ha, 0x0f, mb);
2134
2135
2136 flag = (BIT_0 << target);
2137 if (nv->bus[bus].target[target].parameter.tag_queuing)
2138 ha->bus_settings[bus].qtag_enables |= flag;
2139
2140
2141 if (IS_ISP1x160(ha)) {
2142 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2143 ha->bus_settings[bus].device_enables |= flag;
2144 ha->bus_settings[bus].lun_disables |= 0;
2145 } else {
2146 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2147 ha->bus_settings[bus].device_enables |= flag;
2148
2149 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2150 ha->bus_settings[bus].lun_disables |= flag;
2151 }
2152
2153
2154 for (lun = 0; lun < MAX_LUNS; lun++) {
2155 mb[0] = MBC_SET_DEVICE_QUEUE;
2156 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2157 mb[1] |= lun;
2158 mb[2] = nv->bus[bus].max_queue_depth;
2159 mb[3] = nv->bus[bus].target[target].execution_throttle;
2160 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2161 }
2162
2163 return status;
2164}
2165
2166static int
2167qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2168{
2169 struct nvram *nv = &ha->nvram;
2170 uint16_t mb[MAILBOX_REGISTER_COUNT];
2171 int target, status;
2172
2173
2174 ha->bus_settings[bus].disable_scsi_reset =
2175 nv->bus[bus].config_1.scsi_reset_disable;
2176
2177
2178 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2179 mb[0] = MBC_SET_INITIATOR_ID;
2180 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2181 ha->bus_settings[bus].id;
2182 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2183
2184
2185 ha->bus_settings[bus].bus_reset_delay =
2186 nv->bus[bus].bus_reset_delay;
2187
2188
2189 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2190
2191
2192 for (target = 0; target < MAX_TARGETS; target++)
2193 status |= qla1280_config_target(ha, bus, target);
2194
2195 return status;
2196}
2197
2198static int
2199qla1280_nvram_config(struct scsi_qla_host *ha)
2200{
2201 struct device_reg __iomem *reg = ha->iobase;
2202 struct nvram *nv = &ha->nvram;
2203 int bus, target, status = 0;
2204 uint16_t mb[MAILBOX_REGISTER_COUNT];
2205
2206 ENTER("qla1280_nvram_config");
2207
2208 if (ha->nvram_valid) {
2209
2210 for (bus = 0; bus < MAX_BUSES; bus++)
2211 for (target = 0; target < MAX_TARGETS; target++) {
2212 nv->bus[bus].target[target].parameter.
2213 auto_request_sense = 1;
2214 }
2215 } else {
2216 qla1280_set_defaults(ha);
2217 }
2218
2219 qla1280_print_settings(nv);
2220
2221
2222 ha->flags.disable_risc_code_load =
2223 nv->cntr_flags_1.disable_loading_risc_code;
2224
2225 if (IS_ISP1040(ha)) {
2226 uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
2227
2228 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
2229
2230 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2231 cdma_conf = RD_REG_WORD(®->cdma_cfg);
2232 ddma_conf = RD_REG_WORD(®->ddma_cfg);
2233
2234
2235 if (hwrev != ISP_CFG0_1040A)
2236 cfg1 |= nv->isp_config.fifo_threshold << 4;
2237
2238 cfg1 |= nv->isp_config.burst_enable << 2;
2239 WRT_REG_WORD(®->cfg_1, cfg1);
2240
2241 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2242 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2243 } else {
2244 uint16_t cfg1, term;
2245
2246
2247 cfg1 = nv->isp_config.fifo_threshold << 4;
2248 cfg1 |= nv->isp_config.burst_enable << 2;
2249
2250 if (ha->ports > 1)
2251 cfg1 |= BIT_13;
2252 WRT_REG_WORD(®->cfg_1, cfg1);
2253
2254
2255 WRT_REG_WORD(®->gpio_enable,
2256 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2257 term = nv->termination.scsi_bus_1_control;
2258 term |= nv->termination.scsi_bus_0_control << 2;
2259 term |= nv->termination.auto_term_support << 7;
2260 RD_REG_WORD(®->id_l);
2261 WRT_REG_WORD(®->gpio_data, term);
2262 }
2263 RD_REG_WORD(®->id_l);
2264
2265
2266 mb[0] = MBC_SET_SYSTEM_PARAMETER;
2267 mb[1] = nv->isp_parameter;
2268 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2269
2270 if (IS_ISP1x40(ha)) {
2271
2272 mb[0] = MBC_SET_CLOCK_RATE;
2273 mb[1] = 40;
2274 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2275 }
2276
2277
2278 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2279 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2280 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2281 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2282#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
2283 if (ia64_platform_is("sn2")) {
2284 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
2285 "workaround\n", ha->host_no);
2286 mb[1] |= nv->firmware_feature.f.unused_9 << 9;
2287 }
2288#endif
2289 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2290
2291
2292 mb[0] = MBC_SET_RETRY_COUNT;
2293 mb[1] = nv->bus[0].retry_count;
2294 mb[2] = nv->bus[0].retry_delay;
2295 mb[6] = nv->bus[1].retry_count;
2296 mb[7] = nv->bus[1].retry_delay;
2297 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2298 BIT_1 | BIT_0, &mb[0]);
2299
2300
2301 mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2302 mb[1] = nv->bus[0].config_2.async_data_setup_time;
2303 mb[2] = nv->bus[1].config_2.async_data_setup_time;
2304 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2305
2306
2307 mb[0] = MBC_SET_ACTIVE_NEGATION;
2308 mb[1] = 0;
2309 if (nv->bus[0].config_2.req_ack_active_negation)
2310 mb[1] |= BIT_5;
2311 if (nv->bus[0].config_2.data_line_active_negation)
2312 mb[1] |= BIT_4;
2313 mb[2] = 0;
2314 if (nv->bus[1].config_2.req_ack_active_negation)
2315 mb[2] |= BIT_5;
2316 if (nv->bus[1].config_2.data_line_active_negation)
2317 mb[2] |= BIT_4;
2318 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2319
2320 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2321 mb[1] = 2;
2322 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2323
2324
2325 mb[0] = MBC_SET_PCI_CONTROL;
2326 mb[1] = BIT_1;
2327 mb[2] = BIT_1;
2328 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2329
2330 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2331 mb[1] = 8;
2332 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2333
2334
2335 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2336 mb[1] = nv->bus[0].selection_timeout;
2337 mb[2] = nv->bus[1].selection_timeout;
2338 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2339
2340 for (bus = 0; bus < ha->ports; bus++)
2341 status |= qla1280_config_bus(ha, bus);
2342
2343 if (status)
2344 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2345
2346 LEAVE("qla1280_nvram_config");
2347 return status;
2348}
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362static uint16_t
2363qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2364{
2365 uint32_t nv_cmd;
2366 uint16_t data;
2367
2368 nv_cmd = address << 16;
2369 nv_cmd |= NV_READ_OP;
2370
2371 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2372
2373 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2374 "0x%x", data);
2375
2376 return data;
2377}
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393static uint16_t
2394qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2395{
2396 struct device_reg __iomem *reg = ha->iobase;
2397 int cnt;
2398 uint16_t data = 0;
2399 uint16_t reg_data;
2400
2401
2402
2403 nv_cmd <<= 5;
2404 for (cnt = 0; cnt < 11; cnt++) {
2405 if (nv_cmd & BIT_31)
2406 qla1280_nv_write(ha, NV_DATA_OUT);
2407 else
2408 qla1280_nv_write(ha, 0);
2409 nv_cmd <<= 1;
2410 }
2411
2412
2413
2414 for (cnt = 0; cnt < 16; cnt++) {
2415 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK));
2416 RD_REG_WORD(®->id_l);
2417 NVRAM_DELAY();
2418 data <<= 1;
2419 reg_data = RD_REG_WORD(®->nvram);
2420 if (reg_data & NV_DATA_IN)
2421 data |= BIT_0;
2422 WRT_REG_WORD(®->nvram, NV_SELECT);
2423 RD_REG_WORD(®->id_l);
2424 NVRAM_DELAY();
2425 }
2426
2427
2428
2429 WRT_REG_WORD(®->nvram, NV_DESELECT);
2430 RD_REG_WORD(®->id_l);
2431 NVRAM_DELAY();
2432
2433 return data;
2434}
2435
2436static void
2437qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2438{
2439 struct device_reg __iomem *reg = ha->iobase;
2440
2441 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2442 RD_REG_WORD(®->id_l);
2443 NVRAM_DELAY();
2444 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK);
2445 RD_REG_WORD(®->id_l);
2446 NVRAM_DELAY();
2447 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2448 RD_REG_WORD(®->id_l);
2449 NVRAM_DELAY();
2450}
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467static int
2468qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2469{
2470 struct device_reg __iomem *reg = ha->iobase;
2471 int status = 0;
2472 int cnt;
2473 uint16_t *optr, *iptr;
2474 uint16_t __iomem *mptr;
2475 uint16_t data;
2476 DECLARE_COMPLETION_ONSTACK(wait);
2477 struct timer_list timer;
2478
2479 ENTER("qla1280_mailbox_command");
2480
2481 if (ha->mailbox_wait) {
2482 printk(KERN_ERR "Warning mailbox wait already in use!\n");
2483 }
2484 ha->mailbox_wait = &wait;
2485
2486
2487
2488
2489
2490
2491 mptr = (uint16_t __iomem *) ®->mailbox0;
2492 iptr = mb;
2493 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2494 if (mr & BIT_0) {
2495 WRT_REG_WORD(mptr, (*iptr));
2496 }
2497
2498 mr >>= 1;
2499 mptr++;
2500 iptr++;
2501 }
2502
2503
2504
2505
2506 init_timer(&timer);
2507 timer.expires = jiffies + 20*HZ;
2508 timer.data = (unsigned long)ha;
2509 timer.function = qla1280_mailbox_timeout;
2510 add_timer(&timer);
2511
2512 spin_unlock_irq(ha->host->host_lock);
2513 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
2514 data = qla1280_debounce_register(®->istatus);
2515
2516 wait_for_completion(&wait);
2517 del_timer_sync(&timer);
2518
2519 spin_lock_irq(ha->host->host_lock);
2520
2521 ha->mailbox_wait = NULL;
2522
2523
2524 if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2525 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2526 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2527 "0x%04x\n",
2528 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus));
2529 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2530 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1),
2531 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3));
2532 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2533 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5),
2534 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7));
2535 status = 1;
2536 }
2537
2538
2539 optr = mb;
2540 iptr = (uint16_t *) &ha->mailbox_out[0];
2541 mr = MAILBOX_REGISTER_COUNT;
2542 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2543
2544 if (ha->flags.reset_marker)
2545 qla1280_rst_aen(ha);
2546
2547 if (status)
2548 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2549 "0x%x ****\n", mb[0]);
2550
2551 LEAVE("qla1280_mailbox_command");
2552 return status;
2553}
2554
2555
2556
2557
2558
2559
2560
2561
2562static void
2563qla1280_poll(struct scsi_qla_host *ha)
2564{
2565 struct device_reg __iomem *reg = ha->iobase;
2566 uint16_t data;
2567 LIST_HEAD(done_q);
2568
2569
2570
2571
2572 data = RD_REG_WORD(®->istatus);
2573 if (data & RISC_INT)
2574 qla1280_isr(ha, &done_q);
2575
2576 if (!ha->mailbox_wait) {
2577 if (ha->flags.reset_marker)
2578 qla1280_rst_aen(ha);
2579 }
2580
2581 if (!list_empty(&done_q))
2582 qla1280_done(ha);
2583
2584
2585}
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598static int
2599qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2600{
2601 uint16_t mb[MAILBOX_REGISTER_COUNT];
2602 uint16_t reset_delay;
2603 int status;
2604
2605 dprintk(3, "qla1280_bus_reset: entered\n");
2606
2607 if (qla1280_verbose)
2608 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2609 ha->host_no, bus);
2610
2611 reset_delay = ha->bus_settings[bus].bus_reset_delay;
2612 mb[0] = MBC_BUS_RESET;
2613 mb[1] = reset_delay;
2614 mb[2] = (uint16_t) bus;
2615 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2616
2617 if (status) {
2618 if (ha->bus_settings[bus].failed_reset_count > 2)
2619 ha->bus_settings[bus].scsi_bus_dead = 1;
2620 ha->bus_settings[bus].failed_reset_count++;
2621 } else {
2622 spin_unlock_irq(ha->host->host_lock);
2623 ssleep(reset_delay);
2624 spin_lock_irq(ha->host->host_lock);
2625
2626 ha->bus_settings[bus].scsi_bus_dead = 0;
2627 ha->bus_settings[bus].failed_reset_count = 0;
2628 ha->bus_settings[bus].reset_marker = 0;
2629
2630 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2631 }
2632
2633
2634
2635
2636
2637
2638 if (status)
2639 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2640 else
2641 dprintk(3, "qla1280_bus_reset: exiting normally\n");
2642
2643 return status;
2644}
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658static int
2659qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2660{
2661 uint16_t mb[MAILBOX_REGISTER_COUNT];
2662 int status;
2663
2664 ENTER("qla1280_device_reset");
2665
2666 mb[0] = MBC_ABORT_TARGET;
2667 mb[1] = (bus ? (target | BIT_7) : target) << 8;
2668 mb[2] = 1;
2669 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2670
2671
2672 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2673
2674 if (status)
2675 dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2676
2677 LEAVE("qla1280_device_reset");
2678 return status;
2679}
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692static int
2693qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2694{
2695 uint16_t mb[MAILBOX_REGISTER_COUNT];
2696 unsigned int bus, target, lun;
2697 int status;
2698
2699 ENTER("qla1280_abort_command");
2700
2701 bus = SCSI_BUS_32(sp->cmd);
2702 target = SCSI_TCN_32(sp->cmd);
2703 lun = SCSI_LUN_32(sp->cmd);
2704
2705 sp->flags |= SRB_ABORT_PENDING;
2706
2707 mb[0] = MBC_ABORT_COMMAND;
2708 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2709 mb[2] = handle >> 16;
2710 mb[3] = handle & 0xffff;
2711 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2712
2713 if (status) {
2714 dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2715 sp->flags &= ~SRB_ABORT_PENDING;
2716 }
2717
2718
2719 LEAVE("qla1280_abort_command");
2720 return status;
2721}
2722
2723
2724
2725
2726
2727
2728
2729
2730static void
2731qla1280_reset_adapter(struct scsi_qla_host *ha)
2732{
2733 struct device_reg __iomem *reg = ha->iobase;
2734
2735 ENTER("qla1280_reset_adapter");
2736
2737
2738 ha->flags.online = 0;
2739 WRT_REG_WORD(®->ictrl, ISP_RESET);
2740 WRT_REG_WORD(®->host_cmd,
2741 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2742 RD_REG_WORD(®->id_l);
2743
2744 LEAVE("qla1280_reset_adapter");
2745}
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758static void
2759qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2760{
2761 struct mrk_entry *pkt;
2762
2763 ENTER("qla1280_marker");
2764
2765
2766 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2767 pkt->entry_type = MARKER_TYPE;
2768 pkt->lun = (uint8_t) lun;
2769 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2770 pkt->modifier = type;
2771 pkt->entry_status = 0;
2772
2773
2774 qla1280_isp_cmd(ha);
2775 }
2776
2777 LEAVE("qla1280_marker");
2778}
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793#ifdef QLA_64BIT_PTR
2794static int
2795qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2796{
2797 struct device_reg __iomem *reg = ha->iobase;
2798 struct scsi_cmnd *cmd = sp->cmd;
2799 cmd_a64_entry_t *pkt;
2800 __le32 *dword_ptr;
2801 dma_addr_t dma_handle;
2802 int status = 0;
2803 int cnt;
2804 int req_cnt;
2805 int seg_cnt;
2806 u8 dir;
2807
2808 ENTER("qla1280_64bit_start_scsi:");
2809
2810
2811 req_cnt = 1;
2812 seg_cnt = scsi_dma_map(cmd);
2813 if (seg_cnt > 0) {
2814 if (seg_cnt > 2) {
2815 req_cnt += (seg_cnt - 2) / 5;
2816 if ((seg_cnt - 2) % 5)
2817 req_cnt++;
2818 }
2819 } else if (seg_cnt < 0) {
2820 status = 1;
2821 goto out;
2822 }
2823
2824 if ((req_cnt + 2) >= ha->req_q_cnt) {
2825
2826 cnt = RD_REG_WORD(®->mailbox4);
2827 if (ha->req_ring_index < cnt)
2828 ha->req_q_cnt = cnt - ha->req_ring_index;
2829 else
2830 ha->req_q_cnt =
2831 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2832 }
2833
2834 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2835 ha->req_q_cnt, seg_cnt);
2836
2837
2838 if ((req_cnt + 2) >= ha->req_q_cnt) {
2839 status = SCSI_MLQUEUE_HOST_BUSY;
2840 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2841 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2842 req_cnt);
2843 goto out;
2844 }
2845
2846
2847 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2848 ha->outstanding_cmds[cnt] != NULL; cnt++);
2849
2850 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2851 status = SCSI_MLQUEUE_HOST_BUSY;
2852 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2853 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2854 goto out;
2855 }
2856
2857 ha->outstanding_cmds[cnt] = sp;
2858 ha->req_q_cnt -= req_cnt;
2859 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2860
2861 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2862 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2863 dprintk(2, " bus %i, target %i, lun %i\n",
2864 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2865 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2866
2867
2868
2869
2870 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2871
2872 pkt->entry_type = COMMAND_A64_TYPE;
2873 pkt->entry_count = (uint8_t) req_cnt;
2874 pkt->sys_define = (uint8_t) ha->req_ring_index;
2875 pkt->entry_status = 0;
2876 pkt->handle = cpu_to_le32(cnt);
2877
2878
2879 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2880
2881
2882 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2883
2884
2885 pkt->lun = SCSI_LUN_32(cmd);
2886 pkt->target = SCSI_BUS_32(cmd) ?
2887 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2888
2889
2890 if (cmd->device->simple_tags)
2891 pkt->control_flags |= cpu_to_le16(BIT_3);
2892
2893
2894 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2895 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2896
2897
2898
2899 dir = qla1280_data_direction(cmd);
2900 pkt->control_flags |= cpu_to_le16(dir);
2901
2902
2903 pkt->dseg_count = cpu_to_le16(seg_cnt);
2904
2905
2906
2907
2908 if (seg_cnt) {
2909 struct scatterlist *sg, *s;
2910 int remseg = seg_cnt;
2911
2912 sg = scsi_sglist(cmd);
2913
2914
2915 dword_ptr = (u32 *)&pkt->dseg_0_address;
2916
2917
2918 for_each_sg(sg, s, seg_cnt, cnt) {
2919 if (cnt == 2)
2920 break;
2921
2922 dma_handle = sg_dma_address(s);
2923#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2924 if (ha->flags.use_pci_vchannel)
2925 sn_pci_set_vchan(ha->pdev,
2926 (unsigned long *)&dma_handle,
2927 SCSI_BUS_32(cmd));
2928#endif
2929 *dword_ptr++ =
2930 cpu_to_le32(pci_dma_lo32(dma_handle));
2931 *dword_ptr++ =
2932 cpu_to_le32(pci_dma_hi32(dma_handle));
2933 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2934 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2935 cpu_to_le32(pci_dma_hi32(dma_handle)),
2936 cpu_to_le32(pci_dma_lo32(dma_handle)),
2937 cpu_to_le32(sg_dma_len(sg_next(s))));
2938 remseg--;
2939 }
2940 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2941 "command packet data - b %i, t %i, l %i \n",
2942 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2943 SCSI_LUN_32(cmd));
2944 qla1280_dump_buffer(5, (char *)pkt,
2945 REQUEST_ENTRY_SIZE);
2946
2947
2948
2949
2950 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2951 "remains\n", seg_cnt);
2952
2953 while (remseg > 0) {
2954
2955 sg = s;
2956
2957 ha->req_ring_index++;
2958 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2959 ha->req_ring_index = 0;
2960 ha->request_ring_ptr =
2961 ha->request_ring;
2962 } else
2963 ha->request_ring_ptr++;
2964
2965 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2966
2967
2968 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2969
2970
2971 ((struct cont_a64_entry *) pkt)->entry_type =
2972 CONTINUE_A64_TYPE;
2973 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2974 ((struct cont_a64_entry *) pkt)->sys_define =
2975 (uint8_t)ha->req_ring_index;
2976
2977 dword_ptr =
2978 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2979
2980
2981 for_each_sg(sg, s, remseg, cnt) {
2982 if (cnt == 5)
2983 break;
2984 dma_handle = sg_dma_address(s);
2985#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2986 if (ha->flags.use_pci_vchannel)
2987 sn_pci_set_vchan(ha->pdev,
2988 (unsigned long *)&dma_handle,
2989 SCSI_BUS_32(cmd));
2990#endif
2991 *dword_ptr++ =
2992 cpu_to_le32(pci_dma_lo32(dma_handle));
2993 *dword_ptr++ =
2994 cpu_to_le32(pci_dma_hi32(dma_handle));
2995 *dword_ptr++ =
2996 cpu_to_le32(sg_dma_len(s));
2997 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2998 cpu_to_le32(pci_dma_hi32(dma_handle)),
2999 cpu_to_le32(pci_dma_lo32(dma_handle)),
3000 cpu_to_le32(sg_dma_len(s)));
3001 }
3002 remseg -= cnt;
3003 dprintk(5, "qla1280_64bit_start_scsi: "
3004 "continuation packet data - b %i, t "
3005 "%i, l %i \n", SCSI_BUS_32(cmd),
3006 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3007 qla1280_dump_buffer(5, (char *)pkt,
3008 REQUEST_ENTRY_SIZE);
3009 }
3010 } else {
3011 dprintk(5, "qla1280_64bit_start_scsi: No data, command "
3012 "packet data - b %i, t %i, l %i \n",
3013 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3014 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3015 }
3016
3017 ha->req_ring_index++;
3018 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3019 ha->req_ring_index = 0;
3020 ha->request_ring_ptr = ha->request_ring;
3021 } else
3022 ha->request_ring_ptr++;
3023
3024
3025 dprintk(2,
3026 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
3027 sp->flags |= SRB_SENT;
3028 ha->actthreads++;
3029 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3030
3031 mmiowb();
3032
3033 out:
3034 if (status)
3035 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
3036 else
3037 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
3038
3039 return status;
3040}
3041#else
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062static int
3063qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3064{
3065 struct device_reg __iomem *reg = ha->iobase;
3066 struct scsi_cmnd *cmd = sp->cmd;
3067 struct cmd_entry *pkt;
3068 __le32 *dword_ptr;
3069 int status = 0;
3070 int cnt;
3071 int req_cnt;
3072 int seg_cnt;
3073 u8 dir;
3074
3075 ENTER("qla1280_32bit_start_scsi");
3076
3077 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3078 cmd->cmnd[0]);
3079
3080
3081 req_cnt = 1;
3082 seg_cnt = scsi_dma_map(cmd);
3083 if (seg_cnt) {
3084
3085
3086
3087
3088 if (seg_cnt > 4) {
3089 req_cnt += (seg_cnt - 4) / 7;
3090 if ((seg_cnt - 4) % 7)
3091 req_cnt++;
3092 }
3093 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3094 cmd, seg_cnt, req_cnt);
3095 } else if (seg_cnt < 0) {
3096 status = 1;
3097 goto out;
3098 }
3099
3100 if ((req_cnt + 2) >= ha->req_q_cnt) {
3101
3102 cnt = RD_REG_WORD(®->mailbox4);
3103 if (ha->req_ring_index < cnt)
3104 ha->req_q_cnt = cnt - ha->req_ring_index;
3105 else
3106 ha->req_q_cnt =
3107 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3108 }
3109
3110 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3111 ha->req_q_cnt, seg_cnt);
3112
3113 if ((req_cnt + 2) >= ha->req_q_cnt) {
3114 status = SCSI_MLQUEUE_HOST_BUSY;
3115 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3116 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3117 ha->req_q_cnt, req_cnt);
3118 goto out;
3119 }
3120
3121
3122 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3123 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3124
3125 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3126 status = SCSI_MLQUEUE_HOST_BUSY;
3127 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3128 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3129 goto out;
3130 }
3131
3132 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3133 ha->outstanding_cmds[cnt] = sp;
3134 ha->req_q_cnt -= req_cnt;
3135
3136
3137
3138
3139 pkt = (struct cmd_entry *) ha->request_ring_ptr;
3140
3141 pkt->entry_type = COMMAND_TYPE;
3142 pkt->entry_count = (uint8_t) req_cnt;
3143 pkt->sys_define = (uint8_t) ha->req_ring_index;
3144 pkt->entry_status = 0;
3145 pkt->handle = cpu_to_le32(cnt);
3146
3147
3148 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3149
3150
3151 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3152
3153
3154 pkt->lun = SCSI_LUN_32(cmd);
3155 pkt->target = SCSI_BUS_32(cmd) ?
3156 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3157
3158
3159 if (cmd->device->simple_tags)
3160 pkt->control_flags |= cpu_to_le16(BIT_3);
3161
3162
3163 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3164 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3165
3166
3167
3168 dir = qla1280_data_direction(cmd);
3169 pkt->control_flags |= cpu_to_le16(dir);
3170
3171
3172 pkt->dseg_count = cpu_to_le16(seg_cnt);
3173
3174
3175
3176
3177 if (seg_cnt) {
3178 struct scatterlist *sg, *s;
3179 int remseg = seg_cnt;
3180
3181 sg = scsi_sglist(cmd);
3182
3183
3184 dword_ptr = &pkt->dseg_0_address;
3185
3186 dprintk(3, "Building S/G data segments..\n");
3187 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3188
3189
3190 for_each_sg(sg, s, seg_cnt, cnt) {
3191 if (cnt == 4)
3192 break;
3193 *dword_ptr++ =
3194 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3195 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3196 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3197 (pci_dma_lo32(sg_dma_address(s))),
3198 (sg_dma_len(s)));
3199 remseg--;
3200 }
3201
3202
3203
3204 dprintk(3, "S/G Building Continuation"
3205 "...seg_cnt=0x%x remains\n", seg_cnt);
3206 while (remseg > 0) {
3207
3208 sg = s;
3209
3210 ha->req_ring_index++;
3211 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3212 ha->req_ring_index = 0;
3213 ha->request_ring_ptr =
3214 ha->request_ring;
3215 } else
3216 ha->request_ring_ptr++;
3217
3218 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3219
3220
3221 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3222
3223
3224 ((struct cont_entry *) pkt)->
3225 entry_type = CONTINUE_TYPE;
3226 ((struct cont_entry *) pkt)->entry_count = 1;
3227
3228 ((struct cont_entry *) pkt)->sys_define =
3229 (uint8_t) ha->req_ring_index;
3230
3231
3232 dword_ptr =
3233 &((struct cont_entry *) pkt)->dseg_0_address;
3234
3235
3236 for_each_sg(sg, s, remseg, cnt) {
3237 if (cnt == 7)
3238 break;
3239 *dword_ptr++ =
3240 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3241 *dword_ptr++ =
3242 cpu_to_le32(sg_dma_len(s));
3243 dprintk(1,
3244 "S/G Segment Cont. phys_addr=0x%x, "
3245 "len=0x%x\n",
3246 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3247 cpu_to_le32(sg_dma_len(s)));
3248 }
3249 remseg -= cnt;
3250 dprintk(5, "qla1280_32bit_start_scsi: "
3251 "continuation packet data - "
3252 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3253 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3254 qla1280_dump_buffer(5, (char *)pkt,
3255 REQUEST_ENTRY_SIZE);
3256 }
3257 } else {
3258 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3259 "packet data - \n");
3260 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3261 }
3262 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3263 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3264 REQUEST_ENTRY_SIZE);
3265
3266
3267 ha->req_ring_index++;
3268 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3269 ha->req_ring_index = 0;
3270 ha->request_ring_ptr = ha->request_ring;
3271 } else
3272 ha->request_ring_ptr++;
3273
3274
3275 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3276 "for pending command\n");
3277 sp->flags |= SRB_SENT;
3278 ha->actthreads++;
3279 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3280
3281 mmiowb();
3282
3283out:
3284 if (status)
3285 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3286
3287 LEAVE("qla1280_32bit_start_scsi");
3288
3289 return status;
3290}
3291#endif
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304static request_t *
3305qla1280_req_pkt(struct scsi_qla_host *ha)
3306{
3307 struct device_reg __iomem *reg = ha->iobase;
3308 request_t *pkt = NULL;
3309 int cnt;
3310 uint32_t timer;
3311
3312 ENTER("qla1280_req_pkt");
3313
3314
3315
3316
3317
3318 for (timer = 15000000; timer; timer--) {
3319 if (ha->req_q_cnt > 0) {
3320
3321 cnt = RD_REG_WORD(®->mailbox4);
3322 if (ha->req_ring_index < cnt)
3323 ha->req_q_cnt = cnt - ha->req_ring_index;
3324 else
3325 ha->req_q_cnt =
3326 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3327 }
3328
3329
3330 if (ha->req_q_cnt > 0) {
3331 ha->req_q_cnt--;
3332 pkt = ha->request_ring_ptr;
3333
3334
3335 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3336
3337
3338
3339
3340
3341
3342 pkt->sys_define = (uint8_t) ha->req_ring_index;
3343
3344
3345 pkt->entry_count = 1;
3346
3347 break;
3348 }
3349
3350 udelay(2);
3351
3352
3353 qla1280_poll(ha);
3354 }
3355
3356 if (!pkt)
3357 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3358 else
3359 dprintk(3, "qla1280_req_pkt: exiting normally\n");
3360
3361 return pkt;
3362}
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372static void
3373qla1280_isp_cmd(struct scsi_qla_host *ha)
3374{
3375 struct device_reg __iomem *reg = ha->iobase;
3376
3377 ENTER("qla1280_isp_cmd");
3378
3379 dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3380 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3381 REQUEST_ENTRY_SIZE);
3382
3383
3384 ha->req_ring_index++;
3385 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3386 ha->req_ring_index = 0;
3387 ha->request_ring_ptr = ha->request_ring;
3388 } else
3389 ha->request_ring_ptr++;
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3405 mmiowb();
3406
3407 LEAVE("qla1280_isp_cmd");
3408}
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422static void
3423qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3424{
3425 struct device_reg __iomem *reg = ha->iobase;
3426 struct response *pkt;
3427 struct srb *sp = NULL;
3428 uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3429 uint16_t *wptr;
3430 uint32_t index;
3431 u16 istatus;
3432
3433 ENTER("qla1280_isr");
3434
3435 istatus = RD_REG_WORD(®->istatus);
3436 if (!(istatus & (RISC_INT | PCI_INT)))
3437 return;
3438
3439
3440 mailbox[5] = RD_REG_WORD(®->mailbox5);
3441
3442
3443
3444 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore);
3445
3446 if (mailbox[0] & BIT_0) {
3447
3448
3449
3450 wptr = &mailbox[0];
3451 *wptr++ = RD_REG_WORD(®->mailbox0);
3452 *wptr++ = RD_REG_WORD(®->mailbox1);
3453 *wptr = RD_REG_WORD(®->mailbox2);
3454 if (mailbox[0] != MBA_SCSI_COMPLETION) {
3455 wptr++;
3456 *wptr++ = RD_REG_WORD(®->mailbox3);
3457 *wptr++ = RD_REG_WORD(®->mailbox4);
3458 wptr++;
3459 *wptr++ = RD_REG_WORD(®->mailbox6);
3460 *wptr = RD_REG_WORD(®->mailbox7);
3461 }
3462
3463
3464
3465 WRT_REG_WORD(®->semaphore, 0);
3466 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3467
3468 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3469 mailbox[0]);
3470
3471
3472 switch (mailbox[0]) {
3473 case MBA_SCSI_COMPLETION:
3474 dprintk(5, "qla1280_isr: mailbox SCSI response "
3475 "completion\n");
3476
3477 if (ha->flags.online) {
3478
3479 index = mailbox[2] << 16 | mailbox[1];
3480
3481
3482 if (index < MAX_OUTSTANDING_COMMANDS)
3483 sp = ha->outstanding_cmds[index];
3484 else
3485 sp = NULL;
3486
3487 if (sp) {
3488
3489 ha->outstanding_cmds[index] = NULL;
3490
3491
3492 CMD_RESULT(sp->cmd) = 0;
3493 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3494
3495
3496 list_add_tail(&sp->list, done_q);
3497 } else {
3498
3499
3500
3501 printk(KERN_WARNING
3502 "qla1280: ISP invalid handle\n");
3503 }
3504 }
3505 break;
3506
3507 case MBA_BUS_RESET:
3508 ha->flags.reset_marker = 1;
3509 index = mailbox[6] & BIT_0;
3510 ha->bus_settings[index].reset_marker = 1;
3511
3512 printk(KERN_DEBUG "qla1280_isr(): index %i "
3513 "asynchronous BUS_RESET\n", index);
3514 break;
3515
3516 case MBA_SYSTEM_ERR:
3517 printk(KERN_WARNING
3518 "qla1280: ISP System Error - mbx1=%xh, mbx2="
3519 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3520 mailbox[3]);
3521 break;
3522
3523 case MBA_REQ_TRANSFER_ERR:
3524 printk(KERN_WARNING
3525 "qla1280: ISP Request Transfer Error\n");
3526 break;
3527
3528 case MBA_RSP_TRANSFER_ERR:
3529 printk(KERN_WARNING
3530 "qla1280: ISP Response Transfer Error\n");
3531 break;
3532
3533 case MBA_WAKEUP_THRES:
3534 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3535 break;
3536
3537 case MBA_TIMEOUT_RESET:
3538 dprintk(2,
3539 "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3540 break;
3541
3542 case MBA_DEVICE_RESET:
3543 printk(KERN_INFO "qla1280_isr(): asynchronous "
3544 "BUS_DEVICE_RESET\n");
3545
3546 ha->flags.reset_marker = 1;
3547 index = mailbox[6] & BIT_0;
3548 ha->bus_settings[index].reset_marker = 1;
3549 break;
3550
3551 case MBA_BUS_MODE_CHANGE:
3552 dprintk(2,
3553 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3554 break;
3555
3556 default:
3557
3558 if (mailbox[0] < MBA_ASYNC_EVENT) {
3559 wptr = &mailbox[0];
3560 memcpy((uint16_t *) ha->mailbox_out, wptr,
3561 MAILBOX_REGISTER_COUNT *
3562 sizeof(uint16_t));
3563
3564 if(ha->mailbox_wait != NULL)
3565 complete(ha->mailbox_wait);
3566 }
3567 break;
3568 }
3569 } else {
3570 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3571 }
3572
3573
3574
3575
3576
3577 if (!(ha->flags.online && !ha->mailbox_wait)) {
3578 dprintk(2, "qla1280_isr: Response pointer Error\n");
3579 goto out;
3580 }
3581
3582 if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3583 goto out;
3584
3585 while (ha->rsp_ring_index != mailbox[5]) {
3586 pkt = ha->response_ring_ptr;
3587
3588 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3589 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3590 dprintk(5,"qla1280_isr: response packet data\n");
3591 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3592
3593 if (pkt->entry_type == STATUS_TYPE) {
3594 if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3595 || pkt->comp_status || pkt->entry_status) {
3596 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3597 "0x%x mailbox[5] = 0x%x, comp_status "
3598 "= 0x%x, scsi_status = 0x%x\n",
3599 ha->rsp_ring_index, mailbox[5],
3600 le16_to_cpu(pkt->comp_status),
3601 le16_to_cpu(pkt->scsi_status));
3602 }
3603 } else {
3604 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3605 "0x%x, mailbox[5] = 0x%x\n",
3606 ha->rsp_ring_index, mailbox[5]);
3607 dprintk(2, "qla1280_isr: response packet data\n");
3608 qla1280_dump_buffer(2, (char *)pkt,
3609 RESPONSE_ENTRY_SIZE);
3610 }
3611
3612 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3613 dprintk(2, "status: Cmd %p, handle %i\n",
3614 ha->outstanding_cmds[pkt->handle]->cmd,
3615 pkt->handle);
3616 if (pkt->entry_type == STATUS_TYPE)
3617 qla1280_status_entry(ha, pkt, done_q);
3618 else
3619 qla1280_error_entry(ha, pkt, done_q);
3620
3621 ha->rsp_ring_index++;
3622 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3623 ha->rsp_ring_index = 0;
3624 ha->response_ring_ptr = ha->response_ring;
3625 } else
3626 ha->response_ring_ptr++;
3627 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index);
3628 }
3629 }
3630
3631 out:
3632 LEAVE("qla1280_isr");
3633}
3634
3635
3636
3637
3638
3639
3640
3641
3642static void
3643qla1280_rst_aen(struct scsi_qla_host *ha)
3644{
3645 uint8_t bus;
3646
3647 ENTER("qla1280_rst_aen");
3648
3649 if (ha->flags.online && !ha->flags.reset_active &&
3650 !ha->flags.abort_isp_active) {
3651 ha->flags.reset_active = 1;
3652 while (ha->flags.reset_marker) {
3653
3654 ha->flags.reset_marker = 0;
3655 for (bus = 0; bus < ha->ports &&
3656 !ha->flags.reset_marker; bus++) {
3657 if (ha->bus_settings[bus].reset_marker) {
3658 ha->bus_settings[bus].reset_marker = 0;
3659 qla1280_marker(ha, bus, 0, 0,
3660 MK_SYNC_ALL);
3661 }
3662 }
3663 }
3664 }
3665
3666 LEAVE("qla1280_rst_aen");
3667}
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679static void
3680qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3681 struct list_head *done_q)
3682{
3683 unsigned int bus, target, lun;
3684 int sense_sz;
3685 struct srb *sp;
3686 struct scsi_cmnd *cmd;
3687 uint32_t handle = le32_to_cpu(pkt->handle);
3688 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3689 uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3690
3691 ENTER("qla1280_status_entry");
3692
3693
3694 if (handle < MAX_OUTSTANDING_COMMANDS)
3695 sp = ha->outstanding_cmds[handle];
3696 else
3697 sp = NULL;
3698
3699 if (!sp) {
3700 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3701 goto out;
3702 }
3703
3704
3705 ha->outstanding_cmds[handle] = NULL;
3706
3707 cmd = sp->cmd;
3708
3709
3710 bus = SCSI_BUS_32(cmd);
3711 target = SCSI_TCN_32(cmd);
3712 lun = SCSI_LUN_32(cmd);
3713
3714 if (comp_status || scsi_status) {
3715 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3716 "0x%x, handle = 0x%x\n", comp_status,
3717 scsi_status, handle);
3718 }
3719
3720
3721 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3722 (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3723 CMD_RESULT(cmd) = scsi_status & 0xff;
3724 } else {
3725
3726
3727 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3728
3729 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3730 if (comp_status != CS_ARS_FAILED) {
3731 uint16_t req_sense_length =
3732 le16_to_cpu(pkt->req_sense_length);
3733 if (req_sense_length < CMD_SNSLEN(cmd))
3734 sense_sz = req_sense_length;
3735 else
3736
3737
3738
3739
3740
3741 sense_sz = CMD_SNSLEN(cmd) - 1;
3742
3743 memcpy(cmd->sense_buffer,
3744 &pkt->req_sense_data, sense_sz);
3745 } else
3746 sense_sz = 0;
3747 memset(cmd->sense_buffer + sense_sz, 0,
3748 SCSI_SENSE_BUFFERSIZE - sense_sz);
3749
3750 dprintk(2, "qla1280_status_entry: Check "
3751 "condition Sense data, b %i, t %i, "
3752 "l %i\n", bus, target, lun);
3753 if (sense_sz)
3754 qla1280_dump_buffer(2,
3755 (char *)cmd->sense_buffer,
3756 sense_sz);
3757 }
3758 }
3759
3760 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3761
3762
3763 list_add_tail(&sp->list, done_q);
3764 out:
3765 LEAVE("qla1280_status_entry");
3766}
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777static void
3778qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3779 struct list_head *done_q)
3780{
3781 struct srb *sp;
3782 uint32_t handle = le32_to_cpu(pkt->handle);
3783
3784 ENTER("qla1280_error_entry");
3785
3786 if (pkt->entry_status & BIT_3)
3787 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3788 else if (pkt->entry_status & BIT_2)
3789 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3790 else if (pkt->entry_status & BIT_1)
3791 dprintk(2, "qla1280_error_entry: FULL flag error\n");
3792 else
3793 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3794
3795
3796 if (handle < MAX_OUTSTANDING_COMMANDS)
3797 sp = ha->outstanding_cmds[handle];
3798 else
3799 sp = NULL;
3800
3801 if (sp) {
3802
3803 ha->outstanding_cmds[handle] = NULL;
3804
3805
3806 if (pkt->entry_status & (BIT_3 + BIT_2)) {
3807
3808
3809 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3810 } else if (pkt->entry_status & BIT_1) {
3811 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3812 } else {
3813
3814 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3815 }
3816
3817 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3818
3819
3820 list_add_tail(&sp->list, done_q);
3821 }
3822#ifdef QLA_64BIT_PTR
3823 else if (pkt->entry_type == COMMAND_A64_TYPE) {
3824 printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3825 }
3826#endif
3827
3828 LEAVE("qla1280_error_entry");
3829}
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841static int
3842qla1280_abort_isp(struct scsi_qla_host *ha)
3843{
3844 struct device_reg __iomem *reg = ha->iobase;
3845 struct srb *sp;
3846 int status = 0;
3847 int cnt;
3848 int bus;
3849
3850 ENTER("qla1280_abort_isp");
3851
3852 if (ha->flags.abort_isp_active || !ha->flags.online)
3853 goto out;
3854
3855 ha->flags.abort_isp_active = 1;
3856
3857
3858 qla1280_disable_intrs(ha);
3859 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3860 RD_REG_WORD(®->id_l);
3861
3862 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3863 ha->host_no);
3864
3865 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3866 struct scsi_cmnd *cmd;
3867 sp = ha->outstanding_cmds[cnt];
3868 if (sp) {
3869 cmd = sp->cmd;
3870 CMD_RESULT(cmd) = DID_RESET << 16;
3871 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3872 ha->outstanding_cmds[cnt] = NULL;
3873 list_add_tail(&sp->list, &ha->done_q);
3874 }
3875 }
3876
3877 qla1280_done(ha);
3878
3879 status = qla1280_load_firmware(ha);
3880 if (status)
3881 goto out;
3882
3883
3884 qla1280_nvram_config (ha);
3885
3886 status = qla1280_init_rings(ha);
3887 if (status)
3888 goto out;
3889
3890
3891 for (bus = 0; bus < ha->ports; bus++)
3892 qla1280_bus_reset(ha, bus);
3893
3894 ha->flags.abort_isp_active = 0;
3895 out:
3896 if (status) {
3897 printk(KERN_WARNING
3898 "qla1280: ISP error recovery failed, board disabled");
3899 qla1280_reset_adapter(ha);
3900 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3901 }
3902
3903 LEAVE("qla1280_abort_isp");
3904 return status;
3905}
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918static u16
3919qla1280_debounce_register(volatile u16 __iomem * addr)
3920{
3921 volatile u16 ret;
3922 volatile u16 ret2;
3923
3924 ret = RD_REG_WORD(addr);
3925 ret2 = RD_REG_WORD(addr);
3926
3927 if (ret == ret2)
3928 return ret;
3929
3930 do {
3931 cpu_relax();
3932 ret = RD_REG_WORD(addr);
3933 ret2 = RD_REG_WORD(addr);
3934 } while (ret != ret2);
3935
3936 return ret;
3937}
3938
3939
3940
3941
3942
3943
3944
3945#define SET_SXP_BANK 0x0100
3946#define SCSI_PHASE_INVALID 0x87FF
3947static int
3948qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3949{
3950 uint16_t config_reg, scsi_control;
3951 struct device_reg __iomem *reg = ha->iobase;
3952
3953 if (ha->bus_settings[bus].scsi_bus_dead) {
3954 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3955 config_reg = RD_REG_WORD(®->cfg_1);
3956 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK);
3957 scsi_control = RD_REG_WORD(®->scsiControlPins);
3958 WRT_REG_WORD(®->cfg_1, config_reg);
3959 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC);
3960
3961 if (scsi_control == SCSI_PHASE_INVALID) {
3962 ha->bus_settings[bus].scsi_bus_dead = 1;
3963 return 1;
3964 } else {
3965 ha->bus_settings[bus].scsi_bus_dead = 0;
3966 ha->bus_settings[bus].failed_reset_count = 0;
3967 }
3968 }
3969 return 0;
3970}
3971
3972static void
3973qla1280_get_target_parameters(struct scsi_qla_host *ha,
3974 struct scsi_device *device)
3975{
3976 uint16_t mb[MAILBOX_REGISTER_COUNT];
3977 int bus, target, lun;
3978
3979 bus = device->channel;
3980 target = device->id;
3981 lun = device->lun;
3982
3983
3984 mb[0] = MBC_GET_TARGET_PARAMETERS;
3985 mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3986 mb[1] <<= 8;
3987 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3988 &mb[0]);
3989
3990 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3991
3992 if (mb[3] != 0) {
3993 printk(" Sync: period %d, offset %d",
3994 (mb[3] & 0xff), (mb[3] >> 8));
3995 if (mb[2] & BIT_13)
3996 printk(", Wide");
3997 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
3998 printk(", DT");
3999 } else
4000 printk(" Async");
4001
4002 if (device->simple_tags)
4003 printk(", Tagged queuing: depth %d", device->queue_depth);
4004 printk("\n");
4005}
4006
4007
4008#if DEBUG_QLA1280
4009static void
4010__qla1280_dump_buffer(char *b, int size)
4011{
4012 int cnt;
4013 u8 c;
4014
4015 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
4016 "Bh Ch Dh Eh Fh\n");
4017 printk(KERN_DEBUG "---------------------------------------------"
4018 "------------------\n");
4019
4020 for (cnt = 0; cnt < size;) {
4021 c = *b++;
4022
4023 printk("0x%02x", c);
4024 cnt++;
4025 if (!(cnt % 16))
4026 printk("\n");
4027 else
4028 printk(" ");
4029 }
4030 if (cnt % 16)
4031 printk("\n");
4032}
4033
4034
4035
4036
4037
4038static void
4039__qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
4040{
4041 struct scsi_qla_host *ha;
4042 struct Scsi_Host *host = CMD_HOST(cmd);
4043 struct srb *sp;
4044
4045
4046 int i;
4047 ha = (struct scsi_qla_host *)host->hostdata;
4048
4049 sp = (struct srb *)CMD_SP(cmd);
4050 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
4051 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
4052 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
4053 CMD_CDBLEN(cmd));
4054 printk(" CDB = ");
4055 for (i = 0; i < cmd->cmd_len; i++) {
4056 printk("0x%02x ", cmd->cmnd[i]);
4057 }
4058 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
4059 printk(" request buffer=0x%p, request buffer len=0x%x\n",
4060 scsi_sglist(cmd), scsi_bufflen(cmd));
4061
4062
4063
4064
4065
4066
4067 printk(" tag=%d, transfersize=0x%x \n",
4068 cmd->tag, cmd->transfersize);
4069 printk(" SP=0x%p\n", CMD_SP(cmd));
4070 printk(" underflow size = 0x%x, direction=0x%x\n",
4071 cmd->underflow, cmd->sc_data_direction);
4072}
4073
4074
4075
4076
4077
4078static void
4079ql1280_dump_device(struct scsi_qla_host *ha)
4080{
4081
4082 struct scsi_cmnd *cp;
4083 struct srb *sp;
4084 int i;
4085
4086 printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4087
4088 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4089 if ((sp = ha->outstanding_cmds[i]) == NULL)
4090 continue;
4091 if ((cp = sp->cmd) == NULL)
4092 continue;
4093 qla1280_print_scsi_cmd(1, cp);
4094 }
4095}
4096#endif
4097
4098
4099enum tokens {
4100 TOKEN_NVRAM,
4101 TOKEN_SYNC,
4102 TOKEN_WIDE,
4103 TOKEN_PPR,
4104 TOKEN_VERBOSE,
4105 TOKEN_DEBUG,
4106};
4107
4108struct setup_tokens {
4109 char *token;
4110 int val;
4111};
4112
4113static struct setup_tokens setup_token[] __initdata =
4114{
4115 { "nvram", TOKEN_NVRAM },
4116 { "sync", TOKEN_SYNC },
4117 { "wide", TOKEN_WIDE },
4118 { "ppr", TOKEN_PPR },
4119 { "verbose", TOKEN_VERBOSE },
4120 { "debug", TOKEN_DEBUG },
4121};
4122
4123
4124
4125
4126
4127
4128
4129
4130static int __init
4131qla1280_setup(char *s)
4132{
4133 char *cp, *ptr;
4134 unsigned long val;
4135 int toke;
4136
4137 cp = s;
4138
4139 while (cp && (ptr = strchr(cp, ':'))) {
4140 ptr++;
4141 if (!strcmp(ptr, "yes")) {
4142 val = 0x10000;
4143 ptr += 3;
4144 } else if (!strcmp(ptr, "no")) {
4145 val = 0;
4146 ptr += 2;
4147 } else
4148 val = simple_strtoul(ptr, &ptr, 0);
4149
4150 switch ((toke = qla1280_get_token(cp))) {
4151 case TOKEN_NVRAM:
4152 if (!val)
4153 driver_setup.no_nvram = 1;
4154 break;
4155 case TOKEN_SYNC:
4156 if (!val)
4157 driver_setup.no_sync = 1;
4158 else if (val != 0x10000)
4159 driver_setup.sync_mask = val;
4160 break;
4161 case TOKEN_WIDE:
4162 if (!val)
4163 driver_setup.no_wide = 1;
4164 else if (val != 0x10000)
4165 driver_setup.wide_mask = val;
4166 break;
4167 case TOKEN_PPR:
4168 if (!val)
4169 driver_setup.no_ppr = 1;
4170 else if (val != 0x10000)
4171 driver_setup.ppr_mask = val;
4172 break;
4173 case TOKEN_VERBOSE:
4174 qla1280_verbose = val;
4175 break;
4176 default:
4177 printk(KERN_INFO "qla1280: unknown boot option %s\n",
4178 cp);
4179 }
4180
4181 cp = strchr(ptr, ';');
4182 if (cp)
4183 cp++;
4184 else {
4185 break;
4186 }
4187 }
4188 return 1;
4189}
4190
4191
4192static int __init
4193qla1280_get_token(char *str)
4194{
4195 char *sep;
4196 long ret = -1;
4197 int i;
4198
4199 sep = strchr(str, ':');
4200
4201 if (sep) {
4202 for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4203 if (!strncmp(setup_token[i].token, str, (sep - str))) {
4204 ret = setup_token[i].val;
4205 break;
4206 }
4207 }
4208 }
4209
4210 return ret;
4211}
4212
4213
4214static struct scsi_host_template qla1280_driver_template = {
4215 .module = THIS_MODULE,
4216 .proc_name = "qla1280",
4217 .name = "Qlogic ISP 1280/12160",
4218 .info = qla1280_info,
4219 .slave_configure = qla1280_slave_configure,
4220 .queuecommand = qla1280_queuecommand,
4221 .eh_abort_handler = qla1280_eh_abort,
4222 .eh_device_reset_handler= qla1280_eh_device_reset,
4223 .eh_bus_reset_handler = qla1280_eh_bus_reset,
4224 .eh_host_reset_handler = qla1280_eh_adapter_reset,
4225 .bios_param = qla1280_biosparam,
4226 .can_queue = 0xfffff,
4227 .this_id = -1,
4228 .sg_tablesize = SG_ALL,
4229 .cmd_per_lun = 1,
4230 .use_clustering = ENABLE_CLUSTERING,
4231};
4232
4233
4234static int __devinit
4235qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4236{
4237 int devnum = id->driver_data;
4238 struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4239 struct Scsi_Host *host;
4240 struct scsi_qla_host *ha;
4241 int error = -ENODEV;
4242
4243
4244 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4245 printk(KERN_INFO
4246 "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4247 goto error;
4248 }
4249
4250 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4251 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4252
4253 if (pci_enable_device(pdev)) {
4254 printk(KERN_WARNING
4255 "qla1280: Failed to enabled pci device, aborting.\n");
4256 goto error;
4257 }
4258
4259 pci_set_master(pdev);
4260
4261 error = -ENOMEM;
4262 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4263 if (!host) {
4264 printk(KERN_WARNING
4265 "qla1280: Failed to register host, aborting.\n");
4266 goto error_disable_device;
4267 }
4268
4269 ha = (struct scsi_qla_host *)host->hostdata;
4270 memset(ha, 0, sizeof(struct scsi_qla_host));
4271
4272 ha->pdev = pdev;
4273 ha->devnum = devnum;
4274
4275#ifdef QLA_64BIT_PTR
4276 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
4277 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4278 printk(KERN_WARNING "scsi(%li): Unable to set a "
4279 "suitable DMA mask - aborting\n", ha->host_no);
4280 error = -ENODEV;
4281 goto error_put_host;
4282 }
4283 } else
4284 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4285 ha->host_no);
4286#else
4287 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4288 printk(KERN_WARNING "scsi(%li): Unable to set a "
4289 "suitable DMA mask - aborting\n", ha->host_no);
4290 error = -ENODEV;
4291 goto error_put_host;
4292 }
4293#endif
4294
4295 ha->request_ring = pci_alloc_consistent(ha->pdev,
4296 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4297 &ha->request_dma);
4298 if (!ha->request_ring) {
4299 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4300 goto error_put_host;
4301 }
4302
4303 ha->response_ring = pci_alloc_consistent(ha->pdev,
4304 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4305 &ha->response_dma);
4306 if (!ha->response_ring) {
4307 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4308 goto error_free_request_ring;
4309 }
4310
4311 ha->ports = bdp->numPorts;
4312
4313 ha->host = host;
4314 ha->host_no = host->host_no;
4315
4316 host->irq = pdev->irq;
4317 host->max_channel = bdp->numPorts - 1;
4318 host->max_lun = MAX_LUNS - 1;
4319 host->max_id = MAX_TARGETS;
4320 host->max_sectors = 1024;
4321 host->unique_id = host->host_no;
4322
4323 error = -ENODEV;
4324
4325#if MEMORY_MAPPED_IO
4326 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4327 if (!ha->mmpbase) {
4328 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4329 goto error_free_response_ring;
4330 }
4331
4332 host->base = (unsigned long)ha->mmpbase;
4333 ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4334#else
4335 host->io_port = pci_resource_start(ha->pdev, 0);
4336 if (!request_region(host->io_port, 0xff, "qla1280")) {
4337 printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4338 "0x%04lx-0x%04lx - already in use\n",
4339 host->io_port, host->io_port + 0xff);
4340 goto error_free_response_ring;
4341 }
4342
4343 ha->iobase = (struct device_reg *)host->io_port;
4344#endif
4345
4346 INIT_LIST_HEAD(&ha->done_q);
4347
4348
4349 qla1280_disable_intrs(ha);
4350
4351 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4352 "qla1280", ha)) {
4353 printk("qla1280 : Failed to reserve interrupt %d already "
4354 "in use\n", pdev->irq);
4355 goto error_release_region;
4356 }
4357
4358
4359 if (qla1280_initialize_adapter(ha)) {
4360 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4361 goto error_free_irq;
4362 }
4363
4364
4365 host->this_id = ha->bus_settings[0].id;
4366
4367 pci_set_drvdata(pdev, host);
4368
4369 error = scsi_add_host(host, &pdev->dev);
4370 if (error)
4371 goto error_disable_adapter;
4372 scsi_scan_host(host);
4373
4374 return 0;
4375
4376 error_disable_adapter:
4377 qla1280_disable_intrs(ha);
4378 error_free_irq:
4379 free_irq(pdev->irq, ha);
4380 error_release_region:
4381#if MEMORY_MAPPED_IO
4382 iounmap(ha->mmpbase);
4383#else
4384 release_region(host->io_port, 0xff);
4385#endif
4386 error_free_response_ring:
4387 pci_free_consistent(ha->pdev,
4388 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4389 ha->response_ring, ha->response_dma);
4390 error_free_request_ring:
4391 pci_free_consistent(ha->pdev,
4392 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4393 ha->request_ring, ha->request_dma);
4394 error_put_host:
4395 scsi_host_put(host);
4396 error_disable_device:
4397 pci_disable_device(pdev);
4398 error:
4399 return error;
4400}
4401
4402
4403static void __devexit
4404qla1280_remove_one(struct pci_dev *pdev)
4405{
4406 struct Scsi_Host *host = pci_get_drvdata(pdev);
4407 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4408
4409 scsi_remove_host(host);
4410
4411 qla1280_disable_intrs(ha);
4412
4413 free_irq(pdev->irq, ha);
4414
4415#if MEMORY_MAPPED_IO
4416 iounmap(ha->mmpbase);
4417#else
4418 release_region(host->io_port, 0xff);
4419#endif
4420
4421 pci_free_consistent(ha->pdev,
4422 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4423 ha->request_ring, ha->request_dma);
4424 pci_free_consistent(ha->pdev,
4425 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4426 ha->response_ring, ha->response_dma);
4427
4428 pci_disable_device(pdev);
4429
4430 scsi_host_put(host);
4431}
4432
4433static struct pci_driver qla1280_pci_driver = {
4434 .name = "qla1280",
4435 .id_table = qla1280_pci_tbl,
4436 .probe = qla1280_probe_one,
4437 .remove = __devexit_p(qla1280_remove_one),
4438};
4439
4440static int __init
4441qla1280_init(void)
4442{
4443 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4444 printk(KERN_WARNING
4445 "qla1280: struct srb too big, aborting\n");
4446 return -EINVAL;
4447 }
4448
4449#ifdef MODULE
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462 if (qla1280)
4463 qla1280_setup(qla1280);
4464#endif
4465
4466 return pci_register_driver(&qla1280_pci_driver);
4467}
4468
4469static void __exit
4470qla1280_exit(void)
4471{
4472 int i;
4473
4474 pci_unregister_driver(&qla1280_pci_driver);
4475
4476 for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
4477 if (qla1280_fw_tbl[i].fw) {
4478 release_firmware(qla1280_fw_tbl[i].fw);
4479 qla1280_fw_tbl[i].fw = NULL;
4480 }
4481 }
4482}
4483
4484module_init(qla1280_init);
4485module_exit(qla1280_exit);
4486
4487
4488MODULE_AUTHOR("Qlogic & Jes Sorensen");
4489MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4490MODULE_LICENSE("GPL");
4491MODULE_FIRMWARE("qlogic/1040.bin");
4492MODULE_FIRMWARE("qlogic/1280.bin");
4493MODULE_FIRMWARE("qlogic/12160.bin");
4494MODULE_VERSION(QLA1280_VERSION);
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507