1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define QLA1280_VERSION "3.27.1"
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339#include <linux/module.h>
340
341#include <linux/types.h>
342#include <linux/string.h>
343#include <linux/errno.h>
344#include <linux/kernel.h>
345#include <linux/ioport.h>
346#include <linux/delay.h>
347#include <linux/timer.h>
348#include <linux/pci.h>
349#include <linux/proc_fs.h>
350#include <linux/stat.h>
351#include <linux/pci_ids.h>
352#include <linux/interrupt.h>
353#include <linux/init.h>
354#include <linux/dma-mapping.h>
355#include <linux/firmware.h>
356
357#include <asm/io.h>
358#include <asm/irq.h>
359#include <asm/byteorder.h>
360#include <asm/processor.h>
361#include <asm/types.h>
362
363#include <scsi/scsi.h>
364#include <scsi/scsi_cmnd.h>
365#include <scsi/scsi_device.h>
366#include <scsi/scsi_host.h>
367#include <scsi/scsi_tcq.h>
368
369#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
370#include <asm/sn/io.h>
371#endif
372
373
374
375
376
377
378#define DEBUG_QLA1280_INTR 0
379#define DEBUG_PRINT_NVRAM 0
380#define DEBUG_QLA1280 0
381
382#define MEMORY_MAPPED_IO 1
383
384#include "qla1280.h"
385
386#ifndef BITS_PER_LONG
387#error "BITS_PER_LONG not defined!"
388#endif
389#if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
390#define QLA_64BIT_PTR 1
391#endif
392
393#ifdef QLA_64BIT_PTR
394#define pci_dma_hi32(a) ((a >> 16) >> 16)
395#else
396#define pci_dma_hi32(a) 0
397#endif
398#define pci_dma_lo32(a) (a & 0xffffffff)
399
400#define NVRAM_DELAY() udelay(500)
401
402#if defined(__ia64__) && !defined(ia64_platform_is)
403#define ia64_platform_is(foo) (!strcmp(x, platform_name))
404#endif
405
406
407#define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
408#define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
409 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
410#define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
411 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
412
413
414static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
415static void qla1280_remove_one(struct pci_dev *);
416
417
418
419
420static void qla1280_done(struct scsi_qla_host *);
421static int qla1280_get_token(char *);
422static int qla1280_setup(char *s) __init;
423
424
425
426
427static int qla1280_load_firmware(struct scsi_qla_host *);
428static int qla1280_init_rings(struct scsi_qla_host *);
429static int qla1280_nvram_config(struct scsi_qla_host *);
430static int qla1280_mailbox_command(struct scsi_qla_host *,
431 uint8_t, uint16_t *);
432static int qla1280_bus_reset(struct scsi_qla_host *, int);
433static int qla1280_device_reset(struct scsi_qla_host *, int, int);
434static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
435static int qla1280_abort_isp(struct scsi_qla_host *);
436#ifdef QLA_64BIT_PTR
437static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
438#else
439static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
440#endif
441static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
442static void qla1280_poll(struct scsi_qla_host *);
443static void qla1280_reset_adapter(struct scsi_qla_host *);
444static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
445static void qla1280_isp_cmd(struct scsi_qla_host *);
446static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
447static void qla1280_rst_aen(struct scsi_qla_host *);
448static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
449 struct list_head *);
450static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
451 struct list_head *);
452static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
453static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
454static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
455static request_t *qla1280_req_pkt(struct scsi_qla_host *);
456static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
457 unsigned int);
458static void qla1280_get_target_parameters(struct scsi_qla_host *,
459 struct scsi_device *);
460static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
461
462
463static struct qla_driver_setup driver_setup;
464
465
466
467
468static inline uint16_t
469qla1280_data_direction(struct scsi_cmnd *cmnd)
470{
471 switch(cmnd->sc_data_direction) {
472 case DMA_FROM_DEVICE:
473 return BIT_5;
474 case DMA_TO_DEVICE:
475 return BIT_6;
476 case DMA_BIDIRECTIONAL:
477 return BIT_5 | BIT_6;
478
479
480
481
482
483 case DMA_NONE:
484 default:
485 return 0;
486 }
487}
488
489#if DEBUG_QLA1280
490static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
491static void __qla1280_dump_buffer(char *, int);
492#endif
493
494
495
496
497
498#ifdef MODULE
499static char *qla1280;
500
501
502module_param(qla1280, charp, 0);
503#else
504__setup("qla1280=", qla1280_setup);
505#endif
506
507
508
509
510
511
512
513
514#define CMD_SP(Cmnd) &Cmnd->SCp
515#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
516#define CMD_CDBP(Cmnd) Cmnd->cmnd
517#define CMD_SNSP(Cmnd) Cmnd->sense_buffer
518#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
519#define CMD_RESULT(Cmnd) Cmnd->result
520#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
521#define CMD_REQUEST(Cmnd) Cmnd->request->cmd
522
523#define CMD_HOST(Cmnd) Cmnd->device->host
524#define SCSI_BUS_32(Cmnd) Cmnd->device->channel
525#define SCSI_TCN_32(Cmnd) Cmnd->device->id
526#define SCSI_LUN_32(Cmnd) Cmnd->device->lun
527
528
529
530
531
532
533struct qla_boards {
534 char *name;
535 int numPorts;
536 int fw_index;
537};
538
539
540static struct pci_device_id qla1280_pci_tbl[] = {
541 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
542 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
543 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
544 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
545 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
546 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
547 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
548 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
549 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
550 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
551 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
552 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
553 {0,}
554};
555MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
556
557DEFINE_MUTEX(qla1280_firmware_mutex);
558
559struct qla_fw {
560 char *fwname;
561 const struct firmware *fw;
562};
563
564#define QL_NUM_FW_IMAGES 3
565
566struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
567 {"qlogic/1040.bin", NULL},
568 {"qlogic/1280.bin", NULL},
569 {"qlogic/12160.bin", NULL},
570};
571
572
573static struct qla_boards ql1280_board_tbl[] = {
574 {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
575 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
576 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
577 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
578 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
579 {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
580 {.name = " ", .numPorts = 0, .fw_index = -1},
581};
582
583static int qla1280_verbose = 1;
584
585#if DEBUG_QLA1280
586static int ql_debug_level = 1;
587#define dprintk(level, format, a...) \
588 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
589#define qla1280_dump_buffer(level, buf, size) \
590 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
591#define qla1280_print_scsi_cmd(level, cmd) \
592 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
593#else
594#define ql_debug_level 0
595#define dprintk(level, format, a...) do{}while(0)
596#define qla1280_dump_buffer(a, b, c) do{}while(0)
597#define qla1280_print_scsi_cmd(a, b) do{}while(0)
598#endif
599
600#define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
601#define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
602#define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
603#define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
604
605
606static int qla1280_read_nvram(struct scsi_qla_host *ha)
607{
608 uint16_t *wptr;
609 uint8_t chksum;
610 int cnt, i;
611 struct nvram *nv;
612
613 ENTER("qla1280_read_nvram");
614
615 if (driver_setup.no_nvram)
616 return 1;
617
618 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
619
620 wptr = (uint16_t *)&ha->nvram;
621 nv = &ha->nvram;
622 chksum = 0;
623 for (cnt = 0; cnt < 3; cnt++) {
624 *wptr = qla1280_get_nvram_word(ha, cnt);
625 chksum += *wptr & 0xff;
626 chksum += (*wptr >> 8) & 0xff;
627 wptr++;
628 }
629
630 if (nv->id0 != 'I' || nv->id1 != 'S' ||
631 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
632 dprintk(2, "Invalid nvram ID or version!\n");
633 chksum = 1;
634 } else {
635 for (; cnt < sizeof(struct nvram); cnt++) {
636 *wptr = qla1280_get_nvram_word(ha, cnt);
637 chksum += *wptr & 0xff;
638 chksum += (*wptr >> 8) & 0xff;
639 wptr++;
640 }
641 }
642
643 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
644 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
645 nv->version);
646
647
648 if (chksum) {
649 if (!driver_setup.no_nvram)
650 printk(KERN_WARNING "scsi(%ld): Unable to identify or "
651 "validate NVRAM checksum, using default "
652 "settings\n", ha->host_no);
653 ha->nvram_valid = 0;
654 } else
655 ha->nvram_valid = 1;
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673 nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
674 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
675 for(i = 0; i < MAX_BUSES; i++) {
676 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
677 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
678 }
679 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
680 LEAVE("qla1280_read_nvram");
681
682 return chksum;
683}
684
685
686
687
688
689static const char *
690qla1280_info(struct Scsi_Host *host)
691{
692 static char qla1280_scsi_name_buffer[125];
693 char *bp;
694 struct scsi_qla_host *ha;
695 struct qla_boards *bdp;
696
697 bp = &qla1280_scsi_name_buffer[0];
698 ha = (struct scsi_qla_host *)host->hostdata;
699 bdp = &ql1280_board_tbl[ha->devnum];
700 memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
701
702 sprintf (bp,
703 "QLogic %s PCI to SCSI Host Adapter\n"
704 " Firmware version: %2d.%02d.%02d, Driver version %s",
705 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
706 QLA1280_VERSION);
707 return bp;
708}
709
710
711
712
713
714
715
716
717
718
719
720
721static int
722qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
723{
724 struct Scsi_Host *host = cmd->device->host;
725 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
726 struct srb *sp = (struct srb *)CMD_SP(cmd);
727 int status;
728
729 cmd->scsi_done = fn;
730 sp->cmd = cmd;
731 sp->flags = 0;
732 sp->wait = NULL;
733 CMD_HANDLE(cmd) = (unsigned char *)NULL;
734
735 qla1280_print_scsi_cmd(5, cmd);
736
737#ifdef QLA_64BIT_PTR
738
739
740
741
742
743
744 status = qla1280_64bit_start_scsi(ha, sp);
745#else
746 status = qla1280_32bit_start_scsi(ha, sp);
747#endif
748 return status;
749}
750
751static DEF_SCSI_QCMD(qla1280_queuecommand)
752
753enum action {
754 ABORT_COMMAND,
755 DEVICE_RESET,
756 BUS_RESET,
757 ADAPTER_RESET,
758};
759
760
761static void qla1280_mailbox_timeout(unsigned long __data)
762{
763 struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
764 struct device_reg __iomem *reg;
765 reg = ha->iobase;
766
767 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0);
768 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
769 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
770 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus));
771 complete(ha->mailbox_wait);
772}
773
774static int
775_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
776 struct completion *wait)
777{
778 int status = FAILED;
779 struct scsi_cmnd *cmd = sp->cmd;
780
781 spin_unlock_irq(ha->host->host_lock);
782 wait_for_completion_timeout(wait, 4*HZ);
783 spin_lock_irq(ha->host->host_lock);
784 sp->wait = NULL;
785 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
786 status = SUCCESS;
787 (*cmd->scsi_done)(cmd);
788 }
789 return status;
790}
791
792static int
793qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
794{
795 DECLARE_COMPLETION_ONSTACK(wait);
796
797 sp->wait = &wait;
798 return _qla1280_wait_for_single_command(ha, sp, &wait);
799}
800
801static int
802qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
803{
804 int cnt;
805 int status;
806 struct srb *sp;
807 struct scsi_cmnd *cmd;
808
809 status = SUCCESS;
810
811
812
813
814
815 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
816 sp = ha->outstanding_cmds[cnt];
817 if (sp) {
818 cmd = sp->cmd;
819
820 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
821 continue;
822 if (target >= 0 && SCSI_TCN_32(cmd) != target)
823 continue;
824
825 status = qla1280_wait_for_single_command(ha, sp);
826 if (status == FAILED)
827 break;
828 }
829 }
830 return status;
831}
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847static int
848qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
849{
850 struct scsi_qla_host *ha;
851 int bus, target, lun;
852 struct srb *sp;
853 int i, found;
854 int result=FAILED;
855 int wait_for_bus=-1;
856 int wait_for_target = -1;
857 DECLARE_COMPLETION_ONSTACK(wait);
858
859 ENTER("qla1280_error_action");
860
861 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
862 sp = (struct srb *)CMD_SP(cmd);
863 bus = SCSI_BUS_32(cmd);
864 target = SCSI_TCN_32(cmd);
865 lun = SCSI_LUN_32(cmd);
866
867 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
868 RD_REG_WORD(&ha->iobase->istatus));
869
870 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
871 RD_REG_WORD(&ha->iobase->host_cmd),
872 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
873
874 if (qla1280_verbose)
875 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
876 "Handle=0x%p, action=0x%x\n",
877 ha->host_no, cmd, CMD_HANDLE(cmd), action);
878
879
880
881
882
883
884
885 found = -1;
886 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
887 if (sp == ha->outstanding_cmds[i]) {
888 found = i;
889 sp->wait = &wait;
890 break;
891 }
892 }
893
894 if (found < 0) {
895 result = SUCCESS;
896 if (qla1280_verbose) {
897 printk(KERN_INFO
898 "scsi(%ld:%d:%d:%d): specified command has "
899 "already completed.\n", ha->host_no, bus,
900 target, lun);
901 }
902 }
903
904 switch (action) {
905
906 case ABORT_COMMAND:
907 dprintk(1, "qla1280: RISC aborting command\n");
908
909
910
911
912
913 if (found >= 0)
914 qla1280_abort_command(ha, sp, found);
915 break;
916
917 case DEVICE_RESET:
918 if (qla1280_verbose)
919 printk(KERN_INFO
920 "scsi(%ld:%d:%d:%d): Queueing device reset "
921 "command.\n", ha->host_no, bus, target, lun);
922 if (qla1280_device_reset(ha, bus, target) == 0) {
923
924 wait_for_bus = bus;
925 wait_for_target = target;
926 }
927 break;
928
929 case BUS_RESET:
930 if (qla1280_verbose)
931 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
932 "reset.\n", ha->host_no, bus);
933 if (qla1280_bus_reset(ha, bus) == 0) {
934
935 wait_for_bus = bus;
936 }
937 break;
938
939 case ADAPTER_RESET:
940 default:
941 if (qla1280_verbose) {
942 printk(KERN_INFO
943 "scsi(%ld): Issued ADAPTER RESET\n",
944 ha->host_no);
945 printk(KERN_INFO "scsi(%ld): I/O processing will "
946 "continue automatically\n", ha->host_no);
947 }
948 ha->flags.reset_active = 1;
949
950 if (qla1280_abort_isp(ha) != 0) {
951 result = FAILED;
952 }
953
954 ha->flags.reset_active = 0;
955 }
956
957
958
959
960
961
962
963
964
965
966 if (found >= 0)
967 result = _qla1280_wait_for_single_command(ha, sp, &wait);
968
969 if (action == ABORT_COMMAND && result != SUCCESS) {
970 printk(KERN_WARNING
971 "scsi(%li:%i:%i:%i): "
972 "Unable to abort command!\n",
973 ha->host_no, bus, target, lun);
974 }
975
976
977
978
979
980
981
982
983
984
985
986 if (result == SUCCESS && wait_for_bus >= 0) {
987 result = qla1280_wait_for_pending_commands(ha,
988 wait_for_bus, wait_for_target);
989 }
990
991 dprintk(1, "RESET returning %d\n", result);
992
993 LEAVE("qla1280_error_action");
994 return result;
995}
996
997
998
999
1000
1001static int
1002qla1280_eh_abort(struct scsi_cmnd * cmd)
1003{
1004 int rc;
1005
1006 spin_lock_irq(cmd->device->host->host_lock);
1007 rc = qla1280_error_action(cmd, ABORT_COMMAND);
1008 spin_unlock_irq(cmd->device->host->host_lock);
1009
1010 return rc;
1011}
1012
1013
1014
1015
1016
1017static int
1018qla1280_eh_device_reset(struct scsi_cmnd *cmd)
1019{
1020 int rc;
1021
1022 spin_lock_irq(cmd->device->host->host_lock);
1023 rc = qla1280_error_action(cmd, DEVICE_RESET);
1024 spin_unlock_irq(cmd->device->host->host_lock);
1025
1026 return rc;
1027}
1028
1029
1030
1031
1032
1033static int
1034qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1035{
1036 int rc;
1037
1038 spin_lock_irq(cmd->device->host->host_lock);
1039 rc = qla1280_error_action(cmd, BUS_RESET);
1040 spin_unlock_irq(cmd->device->host->host_lock);
1041
1042 return rc;
1043}
1044
1045
1046
1047
1048
1049static int
1050qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1051{
1052 int rc;
1053
1054 spin_lock_irq(cmd->device->host->host_lock);
1055 rc = qla1280_error_action(cmd, ADAPTER_RESET);
1056 spin_unlock_irq(cmd->device->host->host_lock);
1057
1058 return rc;
1059}
1060
1061static int
1062qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1063 sector_t capacity, int geom[])
1064{
1065 int heads, sectors, cylinders;
1066
1067 heads = 64;
1068 sectors = 32;
1069 cylinders = (unsigned long)capacity / (heads * sectors);
1070 if (cylinders > 1024) {
1071 heads = 255;
1072 sectors = 63;
1073 cylinders = (unsigned long)capacity / (heads * sectors);
1074
1075
1076 }
1077
1078 geom[0] = heads;
1079 geom[1] = sectors;
1080 geom[2] = cylinders;
1081
1082 return 0;
1083}
1084
1085
1086
1087static inline void
1088qla1280_disable_intrs(struct scsi_qla_host *ha)
1089{
1090 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1091 RD_REG_WORD(&ha->iobase->ictrl);
1092}
1093
1094
1095static inline void
1096qla1280_enable_intrs(struct scsi_qla_host *ha)
1097{
1098 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1099 RD_REG_WORD(&ha->iobase->ictrl);
1100}
1101
1102
1103
1104
1105
1106static irqreturn_t
1107qla1280_intr_handler(int irq, void *dev_id)
1108{
1109 struct scsi_qla_host *ha;
1110 struct device_reg __iomem *reg;
1111 u16 data;
1112 int handled = 0;
1113
1114 ENTER_INTR ("qla1280_intr_handler");
1115 ha = (struct scsi_qla_host *)dev_id;
1116
1117 spin_lock(ha->host->host_lock);
1118
1119 ha->isr_count++;
1120 reg = ha->iobase;
1121
1122 qla1280_disable_intrs(ha);
1123
1124 data = qla1280_debounce_register(®->istatus);
1125
1126 if (data & RISC_INT) {
1127 qla1280_isr(ha, &ha->done_q);
1128 handled = 1;
1129 }
1130 if (!list_empty(&ha->done_q))
1131 qla1280_done(ha);
1132
1133 spin_unlock(ha->host->host_lock);
1134
1135 qla1280_enable_intrs(ha);
1136
1137 LEAVE_INTR("qla1280_intr_handler");
1138 return IRQ_RETVAL(handled);
1139}
1140
1141
1142static int
1143qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1144{
1145 uint8_t mr;
1146 uint16_t mb[MAILBOX_REGISTER_COUNT];
1147 struct nvram *nv;
1148 int status, lun;
1149
1150 nv = &ha->nvram;
1151
1152 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1153
1154
1155 mb[0] = MBC_SET_TARGET_PARAMETERS;
1156 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1157 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1158 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1159 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1160 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1161 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1162 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1163 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1164 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1165
1166 if (IS_ISP1x160(ha)) {
1167 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1168 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1169 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1170 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1171 mr |= BIT_6;
1172 } else {
1173 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1174 }
1175 mb[3] |= nv->bus[bus].target[target].sync_period;
1176
1177 status = qla1280_mailbox_command(ha, mr, mb);
1178
1179
1180 for (lun = 0; lun < MAX_LUNS; lun++) {
1181 mb[0] = MBC_SET_DEVICE_QUEUE;
1182 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1183 mb[1] |= lun;
1184 mb[2] = nv->bus[bus].max_queue_depth;
1185 mb[3] = nv->bus[bus].target[target].execution_throttle;
1186 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1187 }
1188
1189 if (status)
1190 printk(KERN_WARNING "scsi(%ld:%i:%i): "
1191 "qla1280_set_target_parameters() failed\n",
1192 ha->host_no, bus, target);
1193 return status;
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208static int
1209qla1280_slave_configure(struct scsi_device *device)
1210{
1211 struct scsi_qla_host *ha;
1212 int default_depth = 3;
1213 int bus = device->channel;
1214 int target = device->id;
1215 int status = 0;
1216 struct nvram *nv;
1217 unsigned long flags;
1218
1219 ha = (struct scsi_qla_host *)device->host->hostdata;
1220 nv = &ha->nvram;
1221
1222 if (qla1280_check_for_dead_scsi_bus(ha, bus))
1223 return 1;
1224
1225 if (device->tagged_supported &&
1226 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1227 scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat);
1228 } else {
1229 scsi_change_queue_depth(device, default_depth);
1230 }
1231
1232 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1233 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1234 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1235
1236 if (driver_setup.no_sync ||
1237 (driver_setup.sync_mask &&
1238 (~driver_setup.sync_mask & (1 << target))))
1239 nv->bus[bus].target[target].parameter.enable_sync = 0;
1240 if (driver_setup.no_wide ||
1241 (driver_setup.wide_mask &&
1242 (~driver_setup.wide_mask & (1 << target))))
1243 nv->bus[bus].target[target].parameter.enable_wide = 0;
1244 if (IS_ISP1x160(ha)) {
1245 if (driver_setup.no_ppr ||
1246 (driver_setup.ppr_mask &&
1247 (~driver_setup.ppr_mask & (1 << target))))
1248 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1249 }
1250
1251 spin_lock_irqsave(ha->host->host_lock, flags);
1252 if (nv->bus[bus].target[target].parameter.enable_sync)
1253 status = qla1280_set_target_parameters(ha, bus, target);
1254 qla1280_get_target_parameters(ha, device);
1255 spin_unlock_irqrestore(ha->host->host_lock, flags);
1256 return status;
1257}
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267static void
1268qla1280_done(struct scsi_qla_host *ha)
1269{
1270 struct srb *sp;
1271 struct list_head *done_q;
1272 int bus, target, lun;
1273 struct scsi_cmnd *cmd;
1274
1275 ENTER("qla1280_done");
1276
1277 done_q = &ha->done_q;
1278
1279 while (!list_empty(done_q)) {
1280 sp = list_entry(done_q->next, struct srb, list);
1281
1282 list_del(&sp->list);
1283
1284 cmd = sp->cmd;
1285 bus = SCSI_BUS_32(cmd);
1286 target = SCSI_TCN_32(cmd);
1287 lun = SCSI_LUN_32(cmd);
1288
1289 switch ((CMD_RESULT(cmd) >> 16)) {
1290 case DID_RESET:
1291
1292 if (!ha->flags.abort_isp_active)
1293 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1294 break;
1295 case DID_ABORT:
1296 sp->flags &= ~SRB_ABORT_PENDING;
1297 sp->flags |= SRB_ABORTED;
1298 break;
1299 default:
1300 break;
1301 }
1302
1303
1304 scsi_dma_unmap(cmd);
1305
1306
1307 ha->actthreads--;
1308
1309 if (sp->wait == NULL)
1310 (*(cmd)->scsi_done)(cmd);
1311 else
1312 complete(sp->wait);
1313 }
1314 LEAVE("qla1280_done");
1315}
1316
1317
1318
1319
1320static int
1321qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1322{
1323 int host_status = DID_ERROR;
1324 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1325 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1326 uint32_t residual_length = le32_to_cpu(sts->residual_length);
1327 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1328#if DEBUG_QLA1280_INTR
1329 static char *reason[] = {
1330 "DID_OK",
1331 "DID_NO_CONNECT",
1332 "DID_BUS_BUSY",
1333 "DID_TIME_OUT",
1334 "DID_BAD_TARGET",
1335 "DID_ABORT",
1336 "DID_PARITY",
1337 "DID_ERROR",
1338 "DID_RESET",
1339 "DID_BAD_INTR"
1340 };
1341#endif
1342
1343 ENTER("qla1280_return_status");
1344
1345#if DEBUG_QLA1280_INTR
1346
1347
1348
1349
1350#endif
1351
1352 switch (comp_status) {
1353 case CS_COMPLETE:
1354 host_status = DID_OK;
1355 break;
1356
1357 case CS_INCOMPLETE:
1358 if (!(state_flags & SF_GOT_BUS))
1359 host_status = DID_NO_CONNECT;
1360 else if (!(state_flags & SF_GOT_TARGET))
1361 host_status = DID_BAD_TARGET;
1362 else if (!(state_flags & SF_SENT_CDB))
1363 host_status = DID_ERROR;
1364 else if (!(state_flags & SF_TRANSFERRED_DATA))
1365 host_status = DID_ERROR;
1366 else if (!(state_flags & SF_GOT_STATUS))
1367 host_status = DID_ERROR;
1368 else if (!(state_flags & SF_GOT_SENSE))
1369 host_status = DID_ERROR;
1370 break;
1371
1372 case CS_RESET:
1373 host_status = DID_RESET;
1374 break;
1375
1376 case CS_ABORTED:
1377 host_status = DID_ABORT;
1378 break;
1379
1380 case CS_TIMEOUT:
1381 host_status = DID_TIME_OUT;
1382 break;
1383
1384 case CS_DATA_OVERRUN:
1385 dprintk(2, "Data overrun 0x%x\n", residual_length);
1386 dprintk(2, "qla1280_return_status: response packet data\n");
1387 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1388 host_status = DID_ERROR;
1389 break;
1390
1391 case CS_DATA_UNDERRUN:
1392 if ((scsi_bufflen(cp) - residual_length) <
1393 cp->underflow) {
1394 printk(KERN_WARNING
1395 "scsi: Underflow detected - retrying "
1396 "command.\n");
1397 host_status = DID_ERROR;
1398 } else {
1399 scsi_set_resid(cp, residual_length);
1400 host_status = DID_OK;
1401 }
1402 break;
1403
1404 default:
1405 host_status = DID_ERROR;
1406 break;
1407 }
1408
1409#if DEBUG_QLA1280_INTR
1410 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1411 reason[host_status], scsi_status);
1412#endif
1413
1414 LEAVE("qla1280_return_status");
1415
1416 return (scsi_status & 0xff) | (host_status << 16);
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433static int
1434qla1280_initialize_adapter(struct scsi_qla_host *ha)
1435{
1436 struct device_reg __iomem *reg;
1437 int status;
1438 int bus;
1439 unsigned long flags;
1440
1441 ENTER("qla1280_initialize_adapter");
1442
1443
1444 ha->flags.online = 0;
1445 ha->flags.disable_host_adapter = 0;
1446 ha->flags.reset_active = 0;
1447 ha->flags.abort_isp_active = 0;
1448
1449#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1450 if (ia64_platform_is("sn2")) {
1451 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
1452 "dual channel lockup workaround\n", ha->host_no);
1453 ha->flags.use_pci_vchannel = 1;
1454 driver_setup.no_nvram = 1;
1455 }
1456#endif
1457
1458
1459 if (IS_ISP1040(ha))
1460 driver_setup.no_nvram = 1;
1461
1462 dprintk(1, "Configure PCI space for adapter...\n");
1463
1464 reg = ha->iobase;
1465
1466
1467 WRT_REG_WORD(®->semaphore, 0);
1468 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
1469 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT);
1470 RD_REG_WORD(®->host_cmd);
1471
1472 if (qla1280_read_nvram(ha)) {
1473 dprintk(2, "qla1280_initialize_adapter: failed to read "
1474 "NVRAM\n");
1475 }
1476
1477
1478
1479
1480
1481
1482 spin_lock_irqsave(ha->host->host_lock, flags);
1483
1484 status = qla1280_load_firmware(ha);
1485 if (status) {
1486 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1487 ha->host_no);
1488 goto out;
1489 }
1490
1491
1492 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1493 qla1280_nvram_config(ha);
1494
1495 if (ha->flags.disable_host_adapter) {
1496 status = 1;
1497 goto out;
1498 }
1499
1500 status = qla1280_init_rings(ha);
1501 if (status)
1502 goto out;
1503
1504
1505 for (bus = 0; bus < ha->ports; bus++) {
1506 if (!ha->bus_settings[bus].disable_scsi_reset &&
1507 qla1280_bus_reset(ha, bus) &&
1508 qla1280_bus_reset(ha, bus))
1509 ha->bus_settings[bus].scsi_bus_dead = 1;
1510 }
1511
1512 ha->flags.online = 1;
1513 out:
1514 spin_unlock_irqrestore(ha->host->host_lock, flags);
1515
1516 if (status)
1517 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1518
1519 LEAVE("qla1280_initialize_adapter");
1520 return status;
1521}
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535static const struct firmware *
1536qla1280_request_firmware(struct scsi_qla_host *ha)
1537{
1538 const struct firmware *fw;
1539 int err;
1540 int index;
1541 char *fwname;
1542
1543 spin_unlock_irq(ha->host->host_lock);
1544 mutex_lock(&qla1280_firmware_mutex);
1545
1546 index = ql1280_board_tbl[ha->devnum].fw_index;
1547 fw = qla1280_fw_tbl[index].fw;
1548 if (fw)
1549 goto out;
1550
1551 fwname = qla1280_fw_tbl[index].fwname;
1552 err = request_firmware(&fw, fwname, &ha->pdev->dev);
1553
1554 if (err) {
1555 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1556 fwname, err);
1557 fw = ERR_PTR(err);
1558 goto unlock;
1559 }
1560 if ((fw->size % 2) || (fw->size < 6)) {
1561 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
1562 fw->size, fwname);
1563 release_firmware(fw);
1564 fw = ERR_PTR(-EINVAL);
1565 goto unlock;
1566 }
1567
1568 qla1280_fw_tbl[index].fw = fw;
1569
1570 out:
1571 ha->fwver1 = fw->data[0];
1572 ha->fwver2 = fw->data[1];
1573 ha->fwver3 = fw->data[2];
1574 unlock:
1575 mutex_unlock(&qla1280_firmware_mutex);
1576 spin_lock_irq(ha->host->host_lock);
1577 return fw;
1578}
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590static int
1591qla1280_chip_diag(struct scsi_qla_host *ha)
1592{
1593 uint16_t mb[MAILBOX_REGISTER_COUNT];
1594 struct device_reg __iomem *reg = ha->iobase;
1595 int status = 0;
1596 int cnt;
1597 uint16_t data;
1598 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l);
1599
1600 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1601
1602
1603 WRT_REG_WORD(®->ictrl, ISP_RESET);
1604
1605
1606
1607
1608
1609
1610
1611
1612 udelay(20);
1613 data = qla1280_debounce_register(®->ictrl);
1614
1615
1616
1617 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1618 udelay(5);
1619 data = RD_REG_WORD(®->ictrl);
1620 }
1621
1622 if (!cnt)
1623 goto fail;
1624
1625
1626 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1627
1628 WRT_REG_WORD(®->cfg_1, 0);
1629
1630
1631
1632 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
1633 HC_RELEASE_RISC | HC_DISABLE_BIOS);
1634
1635 RD_REG_WORD(®->id_l);
1636 data = qla1280_debounce_register(®->mailbox0);
1637
1638
1639
1640
1641 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1642 udelay(5);
1643 data = RD_REG_WORD(®->mailbox0);
1644 }
1645
1646 if (!cnt)
1647 goto fail;
1648
1649
1650 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1651
1652 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
1653 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
1654 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
1655 RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
1656 RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
1657 printk(KERN_INFO "qla1280: Wrong product ID = "
1658 "0x%x,0x%x,0x%x,0x%x\n",
1659 RD_REG_WORD(®->mailbox1),
1660 RD_REG_WORD(®->mailbox2),
1661 RD_REG_WORD(®->mailbox3),
1662 RD_REG_WORD(®->mailbox4));
1663 goto fail;
1664 }
1665
1666
1667
1668
1669 qla1280_enable_intrs(ha);
1670
1671 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1672
1673 mb[0] = MBC_MAILBOX_REGISTER_TEST;
1674 mb[1] = 0xAAAA;
1675 mb[2] = 0x5555;
1676 mb[3] = 0xAA55;
1677 mb[4] = 0x55AA;
1678 mb[5] = 0xA5A5;
1679 mb[6] = 0x5A5A;
1680 mb[7] = 0x2525;
1681
1682 status = qla1280_mailbox_command(ha, 0xff, mb);
1683 if (status)
1684 goto fail;
1685
1686 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1687 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1688 mb[7] != 0x2525) {
1689 printk(KERN_INFO "qla1280: Failed mbox check\n");
1690 goto fail;
1691 }
1692
1693 dprintk(3, "qla1280_chip_diag: exiting normally\n");
1694 return 0;
1695 fail:
1696 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1697 return status;
1698}
1699
1700static int
1701qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1702{
1703
1704
1705 const struct firmware *fw;
1706 const __le16 *fw_data;
1707 uint16_t risc_address, risc_code_size;
1708 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1709 int err = 0;
1710
1711 fw = qla1280_request_firmware(ha);
1712 if (IS_ERR(fw))
1713 return PTR_ERR(fw);
1714
1715 fw_data = (const __le16 *)&fw->data[0];
1716 ha->fwstart = __le16_to_cpu(fw_data[2]);
1717
1718
1719 risc_address = ha->fwstart;
1720 fw_data = (const __le16 *)&fw->data[6];
1721 risc_code_size = (fw->size - 6) / 2;
1722
1723 for (i = 0; i < risc_code_size; i++) {
1724 mb[0] = MBC_WRITE_RAM_WORD;
1725 mb[1] = risc_address + i;
1726 mb[2] = __le16_to_cpu(fw_data[i]);
1727
1728 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1729 if (err) {
1730 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1731 ha->host_no);
1732 break;
1733 }
1734 }
1735
1736 return err;
1737}
1738
1739#define DUMP_IT_BACK 0
1740static int
1741qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1742{
1743
1744 const struct firmware *fw;
1745 const __le16 *fw_data;
1746 uint16_t risc_address, risc_code_size;
1747 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1748 int err = 0, num, i;
1749#if DUMP_IT_BACK
1750 uint8_t *sp, *tbuf;
1751 dma_addr_t p_tbuf;
1752
1753 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
1754 if (!tbuf)
1755 return -ENOMEM;
1756#endif
1757
1758 fw = qla1280_request_firmware(ha);
1759 if (IS_ERR(fw))
1760 return PTR_ERR(fw);
1761
1762 fw_data = (const __le16 *)&fw->data[0];
1763 ha->fwstart = __le16_to_cpu(fw_data[2]);
1764
1765
1766 risc_address = ha->fwstart;
1767 fw_data = (const __le16 *)&fw->data[6];
1768 risc_code_size = (fw->size - 6) / 2;
1769
1770 dprintk(1, "%s: DMA RISC code (%i) words\n",
1771 __func__, risc_code_size);
1772
1773 num = 0;
1774 while (risc_code_size > 0) {
1775 int warn __attribute__((unused)) = 0;
1776
1777 cnt = 2000 >> 1;
1778
1779 if (cnt > risc_code_size)
1780 cnt = risc_code_size;
1781
1782 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1783 "%d,%d(0x%x)\n",
1784 fw_data, cnt, num, risc_address);
1785 for(i = 0; i < cnt; i++)
1786 ((__le16 *)ha->request_ring)[i] = fw_data[i];
1787
1788 mb[0] = MBC_LOAD_RAM;
1789 mb[1] = risc_address;
1790 mb[4] = cnt;
1791 mb[3] = ha->request_dma & 0xffff;
1792 mb[2] = (ha->request_dma >> 16) & 0xffff;
1793 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1794 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1795 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1796 __func__, mb[0],
1797 (void *)(long)ha->request_dma,
1798 mb[6], mb[7], mb[2], mb[3]);
1799 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1800 BIT_1 | BIT_0, mb);
1801 if (err) {
1802 printk(KERN_ERR "scsi(%li): Failed to load partial "
1803 "segment of f\n", ha->host_no);
1804 goto out;
1805 }
1806
1807#if DUMP_IT_BACK
1808 mb[0] = MBC_DUMP_RAM;
1809 mb[1] = risc_address;
1810 mb[4] = cnt;
1811 mb[3] = p_tbuf & 0xffff;
1812 mb[2] = (p_tbuf >> 16) & 0xffff;
1813 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
1814 mb[6] = pci_dma_hi32(p_tbuf) >> 16;
1815
1816 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1817 BIT_1 | BIT_0, mb);
1818 if (err) {
1819 printk(KERN_ERR
1820 "Failed to dump partial segment of f/w\n");
1821 goto out;
1822 }
1823 sp = (uint8_t *)ha->request_ring;
1824 for (i = 0; i < (cnt << 1); i++) {
1825 if (tbuf[i] != sp[i] && warn++ < 10) {
1826 printk(KERN_ERR "%s: FW compare error @ "
1827 "byte(0x%x) loop#=%x\n",
1828 __func__, i, num);
1829 printk(KERN_ERR "%s: FWbyte=%x "
1830 "FWfromChip=%x\n",
1831 __func__, sp[i], tbuf[i]);
1832
1833 }
1834 }
1835#endif
1836 risc_address += cnt;
1837 risc_code_size = risc_code_size - cnt;
1838 fw_data = fw_data + cnt;
1839 num++;
1840 }
1841
1842 out:
1843#if DUMP_IT_BACK
1844 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
1845#endif
1846 return err;
1847}
1848
1849static int
1850qla1280_start_firmware(struct scsi_qla_host *ha)
1851{
1852 uint16_t mb[MAILBOX_REGISTER_COUNT];
1853 int err;
1854
1855 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1856 __func__);
1857
1858
1859 mb[0] = MBC_VERIFY_CHECKSUM;
1860
1861 mb[1] = ha->fwstart;
1862 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1863 if (err) {
1864 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1865 return err;
1866 }
1867
1868
1869 dprintk(1, "%s: start firmware running.\n", __func__);
1870 mb[0] = MBC_EXECUTE_FIRMWARE;
1871 mb[1] = ha->fwstart;
1872 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1873 if (err) {
1874 printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1875 ha->host_no);
1876 }
1877
1878 return err;
1879}
1880
1881static int
1882qla1280_load_firmware(struct scsi_qla_host *ha)
1883{
1884
1885 int err;
1886
1887 err = qla1280_chip_diag(ha);
1888 if (err)
1889 goto out;
1890 if (IS_ISP1040(ha))
1891 err = qla1280_load_firmware_pio(ha);
1892 else
1893 err = qla1280_load_firmware_dma(ha);
1894 if (err)
1895 goto out;
1896 err = qla1280_start_firmware(ha);
1897 out:
1898 return err;
1899}
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914static int
1915qla1280_init_rings(struct scsi_qla_host *ha)
1916{
1917 uint16_t mb[MAILBOX_REGISTER_COUNT];
1918 int status = 0;
1919
1920 ENTER("qla1280_init_rings");
1921
1922
1923 memset(ha->outstanding_cmds, 0,
1924 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1925
1926
1927 ha->request_ring_ptr = ha->request_ring;
1928 ha->req_ring_index = 0;
1929 ha->req_q_cnt = REQUEST_ENTRY_CNT;
1930
1931 mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1932 mb[1] = REQUEST_ENTRY_CNT;
1933 mb[3] = ha->request_dma & 0xffff;
1934 mb[2] = (ha->request_dma >> 16) & 0xffff;
1935 mb[4] = 0;
1936 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1937 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1938 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1939 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1940 &mb[0]))) {
1941
1942 ha->response_ring_ptr = ha->response_ring;
1943 ha->rsp_ring_index = 0;
1944
1945 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1946 mb[1] = RESPONSE_ENTRY_CNT;
1947 mb[3] = ha->response_dma & 0xffff;
1948 mb[2] = (ha->response_dma >> 16) & 0xffff;
1949 mb[5] = 0;
1950 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
1951 mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
1952 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1953 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1954 &mb[0]);
1955 }
1956
1957 if (status)
1958 dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1959
1960 LEAVE("qla1280_init_rings");
1961 return status;
1962}
1963
1964static void
1965qla1280_print_settings(struct nvram *nv)
1966{
1967 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1968 nv->bus[0].config_1.initiator_id);
1969 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1970 nv->bus[1].config_1.initiator_id);
1971
1972 dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1973 nv->bus[0].bus_reset_delay);
1974 dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1975 nv->bus[1].bus_reset_delay);
1976
1977 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1978 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1979 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1980 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1981
1982 dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1983 nv->bus[0].config_2.async_data_setup_time);
1984 dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1985 nv->bus[1].config_2.async_data_setup_time);
1986
1987 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1988 nv->bus[0].config_2.req_ack_active_negation);
1989 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1990 nv->bus[1].config_2.req_ack_active_negation);
1991
1992 dprintk(1, "qla1280 : data line active negation[0]=%d\n",
1993 nv->bus[0].config_2.data_line_active_negation);
1994 dprintk(1, "qla1280 : data line active negation[1]=%d\n",
1995 nv->bus[1].config_2.data_line_active_negation);
1996
1997 dprintk(1, "qla1280 : disable loading risc code=%d\n",
1998 nv->cntr_flags_1.disable_loading_risc_code);
1999
2000 dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
2001 nv->cntr_flags_1.enable_64bit_addressing);
2002
2003 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
2004 nv->bus[0].selection_timeout);
2005 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
2006 nv->bus[1].selection_timeout);
2007
2008 dprintk(1, "qla1280 : max queue depth[0]=%d\n",
2009 nv->bus[0].max_queue_depth);
2010 dprintk(1, "qla1280 : max queue depth[1]=%d\n",
2011 nv->bus[1].max_queue_depth);
2012}
2013
2014static void
2015qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
2016{
2017 struct nvram *nv = &ha->nvram;
2018
2019 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
2020 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
2021 nv->bus[bus].target[target].parameter.tag_queuing = 1;
2022 nv->bus[bus].target[target].parameter.enable_sync = 1;
2023#if 1
2024 nv->bus[bus].target[target].parameter.enable_wide = 1;
2025#endif
2026 nv->bus[bus].target[target].execution_throttle =
2027 nv->bus[bus].max_queue_depth - 1;
2028 nv->bus[bus].target[target].parameter.parity_checking = 1;
2029 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
2030
2031 if (IS_ISP1x160(ha)) {
2032 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
2033 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
2034 nv->bus[bus].target[target].sync_period = 9;
2035 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
2036 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
2037 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
2038 } else {
2039 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
2040 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
2041 nv->bus[bus].target[target].sync_period = 10;
2042 }
2043}
2044
2045static void
2046qla1280_set_defaults(struct scsi_qla_host *ha)
2047{
2048 struct nvram *nv = &ha->nvram;
2049 int bus, target;
2050
2051 dprintk(1, "Using defaults for NVRAM: \n");
2052 memset(nv, 0, sizeof(struct nvram));
2053
2054
2055 nv->firmware_feature.f.enable_fast_posting = 1;
2056 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2057 nv->termination.scsi_bus_0_control = 3;
2058 nv->termination.scsi_bus_1_control = 3;
2059 nv->termination.auto_term_support = 1;
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069 nv->isp_config.burst_enable = 1;
2070 if (IS_ISP1040(ha))
2071 nv->isp_config.fifo_threshold |= 3;
2072 else
2073 nv->isp_config.fifo_threshold |= 4;
2074
2075 if (IS_ISP1x160(ha))
2076 nv->isp_parameter = 0x01;
2077
2078 for (bus = 0; bus < MAX_BUSES; bus++) {
2079 nv->bus[bus].config_1.initiator_id = 7;
2080 nv->bus[bus].config_2.req_ack_active_negation = 1;
2081 nv->bus[bus].config_2.data_line_active_negation = 1;
2082 nv->bus[bus].selection_timeout = 250;
2083 nv->bus[bus].max_queue_depth = 32;
2084
2085 if (IS_ISP1040(ha)) {
2086 nv->bus[bus].bus_reset_delay = 3;
2087 nv->bus[bus].config_2.async_data_setup_time = 6;
2088 nv->bus[bus].retry_delay = 1;
2089 } else {
2090 nv->bus[bus].bus_reset_delay = 5;
2091 nv->bus[bus].config_2.async_data_setup_time = 8;
2092 }
2093
2094 for (target = 0; target < MAX_TARGETS; target++)
2095 qla1280_set_target_defaults(ha, bus, target);
2096 }
2097}
2098
2099static int
2100qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2101{
2102 struct nvram *nv = &ha->nvram;
2103 uint16_t mb[MAILBOX_REGISTER_COUNT];
2104 int status, lun;
2105 uint16_t flag;
2106
2107
2108 mb[0] = MBC_SET_TARGET_PARAMETERS;
2109 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2110
2111
2112
2113
2114
2115
2116 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2117 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2118
2119 if (IS_ISP1x160(ha))
2120 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2121 else
2122 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2123 mb[3] |= nv->bus[bus].target[target].sync_period;
2124 status = qla1280_mailbox_command(ha, 0x0f, mb);
2125
2126
2127 flag = (BIT_0 << target);
2128 if (nv->bus[bus].target[target].parameter.tag_queuing)
2129 ha->bus_settings[bus].qtag_enables |= flag;
2130
2131
2132 if (IS_ISP1x160(ha)) {
2133 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2134 ha->bus_settings[bus].device_enables |= flag;
2135 ha->bus_settings[bus].lun_disables |= 0;
2136 } else {
2137 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2138 ha->bus_settings[bus].device_enables |= flag;
2139
2140 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2141 ha->bus_settings[bus].lun_disables |= flag;
2142 }
2143
2144
2145 for (lun = 0; lun < MAX_LUNS; lun++) {
2146 mb[0] = MBC_SET_DEVICE_QUEUE;
2147 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2148 mb[1] |= lun;
2149 mb[2] = nv->bus[bus].max_queue_depth;
2150 mb[3] = nv->bus[bus].target[target].execution_throttle;
2151 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2152 }
2153
2154 return status;
2155}
2156
2157static int
2158qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2159{
2160 struct nvram *nv = &ha->nvram;
2161 uint16_t mb[MAILBOX_REGISTER_COUNT];
2162 int target, status;
2163
2164
2165 ha->bus_settings[bus].disable_scsi_reset =
2166 nv->bus[bus].config_1.scsi_reset_disable;
2167
2168
2169 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2170 mb[0] = MBC_SET_INITIATOR_ID;
2171 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2172 ha->bus_settings[bus].id;
2173 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2174
2175
2176 ha->bus_settings[bus].bus_reset_delay =
2177 nv->bus[bus].bus_reset_delay;
2178
2179
2180 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2181
2182
2183 for (target = 0; target < MAX_TARGETS; target++)
2184 status |= qla1280_config_target(ha, bus, target);
2185
2186 return status;
2187}
2188
2189static int
2190qla1280_nvram_config(struct scsi_qla_host *ha)
2191{
2192 struct device_reg __iomem *reg = ha->iobase;
2193 struct nvram *nv = &ha->nvram;
2194 int bus, target, status = 0;
2195 uint16_t mb[MAILBOX_REGISTER_COUNT];
2196
2197 ENTER("qla1280_nvram_config");
2198
2199 if (ha->nvram_valid) {
2200
2201 for (bus = 0; bus < MAX_BUSES; bus++)
2202 for (target = 0; target < MAX_TARGETS; target++) {
2203 nv->bus[bus].target[target].parameter.
2204 auto_request_sense = 1;
2205 }
2206 } else {
2207 qla1280_set_defaults(ha);
2208 }
2209
2210 qla1280_print_settings(nv);
2211
2212
2213 ha->flags.disable_risc_code_load =
2214 nv->cntr_flags_1.disable_loading_risc_code;
2215
2216 if (IS_ISP1040(ha)) {
2217 uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
2218
2219 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
2220
2221 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2222 cdma_conf = RD_REG_WORD(®->cdma_cfg);
2223 ddma_conf = RD_REG_WORD(®->ddma_cfg);
2224
2225
2226 if (hwrev != ISP_CFG0_1040A)
2227 cfg1 |= nv->isp_config.fifo_threshold << 4;
2228
2229 cfg1 |= nv->isp_config.burst_enable << 2;
2230 WRT_REG_WORD(®->cfg_1, cfg1);
2231
2232 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2233 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2234 } else {
2235 uint16_t cfg1, term;
2236
2237
2238 cfg1 = nv->isp_config.fifo_threshold << 4;
2239 cfg1 |= nv->isp_config.burst_enable << 2;
2240
2241 if (ha->ports > 1)
2242 cfg1 |= BIT_13;
2243 WRT_REG_WORD(®->cfg_1, cfg1);
2244
2245
2246 WRT_REG_WORD(®->gpio_enable,
2247 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2248 term = nv->termination.scsi_bus_1_control;
2249 term |= nv->termination.scsi_bus_0_control << 2;
2250 term |= nv->termination.auto_term_support << 7;
2251 RD_REG_WORD(®->id_l);
2252 WRT_REG_WORD(®->gpio_data, term);
2253 }
2254 RD_REG_WORD(®->id_l);
2255
2256
2257 mb[0] = MBC_SET_SYSTEM_PARAMETER;
2258 mb[1] = nv->isp_parameter;
2259 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2260
2261 if (IS_ISP1x40(ha)) {
2262
2263 mb[0] = MBC_SET_CLOCK_RATE;
2264 mb[1] = 40;
2265 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2266 }
2267
2268
2269 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2270 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2271 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2272 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2273#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
2274 if (ia64_platform_is("sn2")) {
2275 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
2276 "workaround\n", ha->host_no);
2277 mb[1] |= nv->firmware_feature.f.unused_9 << 9;
2278 }
2279#endif
2280 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2281
2282
2283 mb[0] = MBC_SET_RETRY_COUNT;
2284 mb[1] = nv->bus[0].retry_count;
2285 mb[2] = nv->bus[0].retry_delay;
2286 mb[6] = nv->bus[1].retry_count;
2287 mb[7] = nv->bus[1].retry_delay;
2288 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2289 BIT_1 | BIT_0, &mb[0]);
2290
2291
2292 mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2293 mb[1] = nv->bus[0].config_2.async_data_setup_time;
2294 mb[2] = nv->bus[1].config_2.async_data_setup_time;
2295 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2296
2297
2298 mb[0] = MBC_SET_ACTIVE_NEGATION;
2299 mb[1] = 0;
2300 if (nv->bus[0].config_2.req_ack_active_negation)
2301 mb[1] |= BIT_5;
2302 if (nv->bus[0].config_2.data_line_active_negation)
2303 mb[1] |= BIT_4;
2304 mb[2] = 0;
2305 if (nv->bus[1].config_2.req_ack_active_negation)
2306 mb[2] |= BIT_5;
2307 if (nv->bus[1].config_2.data_line_active_negation)
2308 mb[2] |= BIT_4;
2309 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2310
2311 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2312 mb[1] = 2;
2313 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2314
2315
2316 mb[0] = MBC_SET_PCI_CONTROL;
2317 mb[1] = BIT_1;
2318 mb[2] = BIT_1;
2319 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2320
2321 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2322 mb[1] = 8;
2323 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2324
2325
2326 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2327 mb[1] = nv->bus[0].selection_timeout;
2328 mb[2] = nv->bus[1].selection_timeout;
2329 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2330
2331 for (bus = 0; bus < ha->ports; bus++)
2332 status |= qla1280_config_bus(ha, bus);
2333
2334 if (status)
2335 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2336
2337 LEAVE("qla1280_nvram_config");
2338 return status;
2339}
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353static uint16_t
2354qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2355{
2356 uint32_t nv_cmd;
2357 uint16_t data;
2358
2359 nv_cmd = address << 16;
2360 nv_cmd |= NV_READ_OP;
2361
2362 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2363
2364 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2365 "0x%x", data);
2366
2367 return data;
2368}
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384static uint16_t
2385qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2386{
2387 struct device_reg __iomem *reg = ha->iobase;
2388 int cnt;
2389 uint16_t data = 0;
2390 uint16_t reg_data;
2391
2392
2393
2394 nv_cmd <<= 5;
2395 for (cnt = 0; cnt < 11; cnt++) {
2396 if (nv_cmd & BIT_31)
2397 qla1280_nv_write(ha, NV_DATA_OUT);
2398 else
2399 qla1280_nv_write(ha, 0);
2400 nv_cmd <<= 1;
2401 }
2402
2403
2404
2405 for (cnt = 0; cnt < 16; cnt++) {
2406 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK));
2407 RD_REG_WORD(®->id_l);
2408 NVRAM_DELAY();
2409 data <<= 1;
2410 reg_data = RD_REG_WORD(®->nvram);
2411 if (reg_data & NV_DATA_IN)
2412 data |= BIT_0;
2413 WRT_REG_WORD(®->nvram, NV_SELECT);
2414 RD_REG_WORD(®->id_l);
2415 NVRAM_DELAY();
2416 }
2417
2418
2419
2420 WRT_REG_WORD(®->nvram, NV_DESELECT);
2421 RD_REG_WORD(®->id_l);
2422 NVRAM_DELAY();
2423
2424 return data;
2425}
2426
2427static void
2428qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2429{
2430 struct device_reg __iomem *reg = ha->iobase;
2431
2432 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2433 RD_REG_WORD(®->id_l);
2434 NVRAM_DELAY();
2435 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK);
2436 RD_REG_WORD(®->id_l);
2437 NVRAM_DELAY();
2438 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2439 RD_REG_WORD(®->id_l);
2440 NVRAM_DELAY();
2441}
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458static int
2459qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2460{
2461 struct device_reg __iomem *reg = ha->iobase;
2462 int status = 0;
2463 int cnt;
2464 uint16_t *optr, *iptr;
2465 uint16_t __iomem *mptr;
2466 uint16_t data;
2467 DECLARE_COMPLETION_ONSTACK(wait);
2468 struct timer_list timer;
2469
2470 ENTER("qla1280_mailbox_command");
2471
2472 if (ha->mailbox_wait) {
2473 printk(KERN_ERR "Warning mailbox wait already in use!\n");
2474 }
2475 ha->mailbox_wait = &wait;
2476
2477
2478
2479
2480
2481
2482 mptr = (uint16_t __iomem *) ®->mailbox0;
2483 iptr = mb;
2484 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2485 if (mr & BIT_0) {
2486 WRT_REG_WORD(mptr, (*iptr));
2487 }
2488
2489 mr >>= 1;
2490 mptr++;
2491 iptr++;
2492 }
2493
2494
2495
2496
2497 init_timer_on_stack(&timer);
2498 timer.expires = jiffies + 20*HZ;
2499 timer.data = (unsigned long)ha;
2500 timer.function = qla1280_mailbox_timeout;
2501 add_timer(&timer);
2502
2503 spin_unlock_irq(ha->host->host_lock);
2504 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
2505 data = qla1280_debounce_register(®->istatus);
2506
2507 wait_for_completion(&wait);
2508 del_timer_sync(&timer);
2509
2510 spin_lock_irq(ha->host->host_lock);
2511
2512 ha->mailbox_wait = NULL;
2513
2514
2515 if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2516 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2517 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2518 "0x%04x\n",
2519 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus));
2520 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2521 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1),
2522 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3));
2523 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2524 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5),
2525 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7));
2526 status = 1;
2527 }
2528
2529
2530 optr = mb;
2531 iptr = (uint16_t *) &ha->mailbox_out[0];
2532 mr = MAILBOX_REGISTER_COUNT;
2533 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2534
2535 if (ha->flags.reset_marker)
2536 qla1280_rst_aen(ha);
2537
2538 if (status)
2539 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2540 "0x%x ****\n", mb[0]);
2541
2542 LEAVE("qla1280_mailbox_command");
2543 return status;
2544}
2545
2546
2547
2548
2549
2550
2551
2552
2553static void
2554qla1280_poll(struct scsi_qla_host *ha)
2555{
2556 struct device_reg __iomem *reg = ha->iobase;
2557 uint16_t data;
2558 LIST_HEAD(done_q);
2559
2560
2561
2562
2563 data = RD_REG_WORD(®->istatus);
2564 if (data & RISC_INT)
2565 qla1280_isr(ha, &done_q);
2566
2567 if (!ha->mailbox_wait) {
2568 if (ha->flags.reset_marker)
2569 qla1280_rst_aen(ha);
2570 }
2571
2572 if (!list_empty(&done_q))
2573 qla1280_done(ha);
2574
2575
2576}
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589static int
2590qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2591{
2592 uint16_t mb[MAILBOX_REGISTER_COUNT];
2593 uint16_t reset_delay;
2594 int status;
2595
2596 dprintk(3, "qla1280_bus_reset: entered\n");
2597
2598 if (qla1280_verbose)
2599 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2600 ha->host_no, bus);
2601
2602 reset_delay = ha->bus_settings[bus].bus_reset_delay;
2603 mb[0] = MBC_BUS_RESET;
2604 mb[1] = reset_delay;
2605 mb[2] = (uint16_t) bus;
2606 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2607
2608 if (status) {
2609 if (ha->bus_settings[bus].failed_reset_count > 2)
2610 ha->bus_settings[bus].scsi_bus_dead = 1;
2611 ha->bus_settings[bus].failed_reset_count++;
2612 } else {
2613 spin_unlock_irq(ha->host->host_lock);
2614 ssleep(reset_delay);
2615 spin_lock_irq(ha->host->host_lock);
2616
2617 ha->bus_settings[bus].scsi_bus_dead = 0;
2618 ha->bus_settings[bus].failed_reset_count = 0;
2619 ha->bus_settings[bus].reset_marker = 0;
2620
2621 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2622 }
2623
2624
2625
2626
2627
2628
2629 if (status)
2630 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2631 else
2632 dprintk(3, "qla1280_bus_reset: exiting normally\n");
2633
2634 return status;
2635}
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649static int
2650qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2651{
2652 uint16_t mb[MAILBOX_REGISTER_COUNT];
2653 int status;
2654
2655 ENTER("qla1280_device_reset");
2656
2657 mb[0] = MBC_ABORT_TARGET;
2658 mb[1] = (bus ? (target | BIT_7) : target) << 8;
2659 mb[2] = 1;
2660 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2661
2662
2663 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2664
2665 if (status)
2666 dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2667
2668 LEAVE("qla1280_device_reset");
2669 return status;
2670}
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683static int
2684qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2685{
2686 uint16_t mb[MAILBOX_REGISTER_COUNT];
2687 unsigned int bus, target, lun;
2688 int status;
2689
2690 ENTER("qla1280_abort_command");
2691
2692 bus = SCSI_BUS_32(sp->cmd);
2693 target = SCSI_TCN_32(sp->cmd);
2694 lun = SCSI_LUN_32(sp->cmd);
2695
2696 sp->flags |= SRB_ABORT_PENDING;
2697
2698 mb[0] = MBC_ABORT_COMMAND;
2699 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2700 mb[2] = handle >> 16;
2701 mb[3] = handle & 0xffff;
2702 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2703
2704 if (status) {
2705 dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2706 sp->flags &= ~SRB_ABORT_PENDING;
2707 }
2708
2709
2710 LEAVE("qla1280_abort_command");
2711 return status;
2712}
2713
2714
2715
2716
2717
2718
2719
2720
2721static void
2722qla1280_reset_adapter(struct scsi_qla_host *ha)
2723{
2724 struct device_reg __iomem *reg = ha->iobase;
2725
2726 ENTER("qla1280_reset_adapter");
2727
2728
2729 ha->flags.online = 0;
2730 WRT_REG_WORD(®->ictrl, ISP_RESET);
2731 WRT_REG_WORD(®->host_cmd,
2732 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2733 RD_REG_WORD(®->id_l);
2734
2735 LEAVE("qla1280_reset_adapter");
2736}
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749static void
2750qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2751{
2752 struct mrk_entry *pkt;
2753
2754 ENTER("qla1280_marker");
2755
2756
2757 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2758 pkt->entry_type = MARKER_TYPE;
2759 pkt->lun = (uint8_t) lun;
2760 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2761 pkt->modifier = type;
2762 pkt->entry_status = 0;
2763
2764
2765 qla1280_isp_cmd(ha);
2766 }
2767
2768 LEAVE("qla1280_marker");
2769}
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784#ifdef QLA_64BIT_PTR
2785static int
2786qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2787{
2788 struct device_reg __iomem *reg = ha->iobase;
2789 struct scsi_cmnd *cmd = sp->cmd;
2790 cmd_a64_entry_t *pkt;
2791 __le32 *dword_ptr;
2792 dma_addr_t dma_handle;
2793 int status = 0;
2794 int cnt;
2795 int req_cnt;
2796 int seg_cnt;
2797 u8 dir;
2798
2799 ENTER("qla1280_64bit_start_scsi:");
2800
2801
2802 req_cnt = 1;
2803 seg_cnt = scsi_dma_map(cmd);
2804 if (seg_cnt > 0) {
2805 if (seg_cnt > 2) {
2806 req_cnt += (seg_cnt - 2) / 5;
2807 if ((seg_cnt - 2) % 5)
2808 req_cnt++;
2809 }
2810 } else if (seg_cnt < 0) {
2811 status = 1;
2812 goto out;
2813 }
2814
2815 if ((req_cnt + 2) >= ha->req_q_cnt) {
2816
2817 cnt = RD_REG_WORD(®->mailbox4);
2818 if (ha->req_ring_index < cnt)
2819 ha->req_q_cnt = cnt - ha->req_ring_index;
2820 else
2821 ha->req_q_cnt =
2822 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2823 }
2824
2825 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2826 ha->req_q_cnt, seg_cnt);
2827
2828
2829 if ((req_cnt + 2) >= ha->req_q_cnt) {
2830 status = SCSI_MLQUEUE_HOST_BUSY;
2831 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2832 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2833 req_cnt);
2834 goto out;
2835 }
2836
2837
2838 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2839 ha->outstanding_cmds[cnt] != NULL; cnt++);
2840
2841 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2842 status = SCSI_MLQUEUE_HOST_BUSY;
2843 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2844 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2845 goto out;
2846 }
2847
2848 ha->outstanding_cmds[cnt] = sp;
2849 ha->req_q_cnt -= req_cnt;
2850 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2851
2852 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2853 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2854 dprintk(2, " bus %i, target %i, lun %i\n",
2855 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2856 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2857
2858
2859
2860
2861 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2862
2863 pkt->entry_type = COMMAND_A64_TYPE;
2864 pkt->entry_count = (uint8_t) req_cnt;
2865 pkt->sys_define = (uint8_t) ha->req_ring_index;
2866 pkt->entry_status = 0;
2867 pkt->handle = cpu_to_le32(cnt);
2868
2869
2870 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2871
2872
2873 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2874
2875
2876 pkt->lun = SCSI_LUN_32(cmd);
2877 pkt->target = SCSI_BUS_32(cmd) ?
2878 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2879
2880
2881 if (cmd->device->simple_tags)
2882 pkt->control_flags |= cpu_to_le16(BIT_3);
2883
2884
2885 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2886 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2887
2888
2889
2890 dir = qla1280_data_direction(cmd);
2891 pkt->control_flags |= cpu_to_le16(dir);
2892
2893
2894 pkt->dseg_count = cpu_to_le16(seg_cnt);
2895
2896
2897
2898
2899 if (seg_cnt) {
2900 struct scatterlist *sg, *s;
2901 int remseg = seg_cnt;
2902
2903 sg = scsi_sglist(cmd);
2904
2905
2906 dword_ptr = (u32 *)&pkt->dseg_0_address;
2907
2908
2909 for_each_sg(sg, s, seg_cnt, cnt) {
2910 if (cnt == 2)
2911 break;
2912
2913 dma_handle = sg_dma_address(s);
2914#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2915 if (ha->flags.use_pci_vchannel)
2916 sn_pci_set_vchan(ha->pdev,
2917 (unsigned long *)&dma_handle,
2918 SCSI_BUS_32(cmd));
2919#endif
2920 *dword_ptr++ =
2921 cpu_to_le32(pci_dma_lo32(dma_handle));
2922 *dword_ptr++ =
2923 cpu_to_le32(pci_dma_hi32(dma_handle));
2924 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2925 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2926 cpu_to_le32(pci_dma_hi32(dma_handle)),
2927 cpu_to_le32(pci_dma_lo32(dma_handle)),
2928 cpu_to_le32(sg_dma_len(sg_next(s))));
2929 remseg--;
2930 }
2931 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2932 "command packet data - b %i, t %i, l %i \n",
2933 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2934 SCSI_LUN_32(cmd));
2935 qla1280_dump_buffer(5, (char *)pkt,
2936 REQUEST_ENTRY_SIZE);
2937
2938
2939
2940
2941 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2942 "remains\n", seg_cnt);
2943
2944 while (remseg > 0) {
2945
2946 sg = s;
2947
2948 ha->req_ring_index++;
2949 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2950 ha->req_ring_index = 0;
2951 ha->request_ring_ptr =
2952 ha->request_ring;
2953 } else
2954 ha->request_ring_ptr++;
2955
2956 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2957
2958
2959 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2960
2961
2962 ((struct cont_a64_entry *) pkt)->entry_type =
2963 CONTINUE_A64_TYPE;
2964 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2965 ((struct cont_a64_entry *) pkt)->sys_define =
2966 (uint8_t)ha->req_ring_index;
2967
2968 dword_ptr =
2969 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2970
2971
2972 for_each_sg(sg, s, remseg, cnt) {
2973 if (cnt == 5)
2974 break;
2975 dma_handle = sg_dma_address(s);
2976#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2977 if (ha->flags.use_pci_vchannel)
2978 sn_pci_set_vchan(ha->pdev,
2979 (unsigned long *)&dma_handle,
2980 SCSI_BUS_32(cmd));
2981#endif
2982 *dword_ptr++ =
2983 cpu_to_le32(pci_dma_lo32(dma_handle));
2984 *dword_ptr++ =
2985 cpu_to_le32(pci_dma_hi32(dma_handle));
2986 *dword_ptr++ =
2987 cpu_to_le32(sg_dma_len(s));
2988 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2989 cpu_to_le32(pci_dma_hi32(dma_handle)),
2990 cpu_to_le32(pci_dma_lo32(dma_handle)),
2991 cpu_to_le32(sg_dma_len(s)));
2992 }
2993 remseg -= cnt;
2994 dprintk(5, "qla1280_64bit_start_scsi: "
2995 "continuation packet data - b %i, t "
2996 "%i, l %i \n", SCSI_BUS_32(cmd),
2997 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2998 qla1280_dump_buffer(5, (char *)pkt,
2999 REQUEST_ENTRY_SIZE);
3000 }
3001 } else {
3002 dprintk(5, "qla1280_64bit_start_scsi: No data, command "
3003 "packet data - b %i, t %i, l %i \n",
3004 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3005 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3006 }
3007
3008 ha->req_ring_index++;
3009 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3010 ha->req_ring_index = 0;
3011 ha->request_ring_ptr = ha->request_ring;
3012 } else
3013 ha->request_ring_ptr++;
3014
3015
3016 dprintk(2,
3017 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
3018 sp->flags |= SRB_SENT;
3019 ha->actthreads++;
3020 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3021
3022 mmiowb();
3023
3024 out:
3025 if (status)
3026 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
3027 else
3028 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
3029
3030 return status;
3031}
3032#else
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053static int
3054qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3055{
3056 struct device_reg __iomem *reg = ha->iobase;
3057 struct scsi_cmnd *cmd = sp->cmd;
3058 struct cmd_entry *pkt;
3059 __le32 *dword_ptr;
3060 int status = 0;
3061 int cnt;
3062 int req_cnt;
3063 int seg_cnt;
3064 u8 dir;
3065
3066 ENTER("qla1280_32bit_start_scsi");
3067
3068 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3069 cmd->cmnd[0]);
3070
3071
3072 req_cnt = 1;
3073 seg_cnt = scsi_dma_map(cmd);
3074 if (seg_cnt) {
3075
3076
3077
3078
3079 if (seg_cnt > 4) {
3080 req_cnt += (seg_cnt - 4) / 7;
3081 if ((seg_cnt - 4) % 7)
3082 req_cnt++;
3083 }
3084 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3085 cmd, seg_cnt, req_cnt);
3086 } else if (seg_cnt < 0) {
3087 status = 1;
3088 goto out;
3089 }
3090
3091 if ((req_cnt + 2) >= ha->req_q_cnt) {
3092
3093 cnt = RD_REG_WORD(®->mailbox4);
3094 if (ha->req_ring_index < cnt)
3095 ha->req_q_cnt = cnt - ha->req_ring_index;
3096 else
3097 ha->req_q_cnt =
3098 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3099 }
3100
3101 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3102 ha->req_q_cnt, seg_cnt);
3103
3104 if ((req_cnt + 2) >= ha->req_q_cnt) {
3105 status = SCSI_MLQUEUE_HOST_BUSY;
3106 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3107 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3108 ha->req_q_cnt, req_cnt);
3109 goto out;
3110 }
3111
3112
3113 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3114 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3115
3116 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3117 status = SCSI_MLQUEUE_HOST_BUSY;
3118 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3119 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3120 goto out;
3121 }
3122
3123 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3124 ha->outstanding_cmds[cnt] = sp;
3125 ha->req_q_cnt -= req_cnt;
3126
3127
3128
3129
3130 pkt = (struct cmd_entry *) ha->request_ring_ptr;
3131
3132 pkt->entry_type = COMMAND_TYPE;
3133 pkt->entry_count = (uint8_t) req_cnt;
3134 pkt->sys_define = (uint8_t) ha->req_ring_index;
3135 pkt->entry_status = 0;
3136 pkt->handle = cpu_to_le32(cnt);
3137
3138
3139 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3140
3141
3142 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3143
3144
3145 pkt->lun = SCSI_LUN_32(cmd);
3146 pkt->target = SCSI_BUS_32(cmd) ?
3147 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3148
3149
3150 if (cmd->device->simple_tags)
3151 pkt->control_flags |= cpu_to_le16(BIT_3);
3152
3153
3154 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3155 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3156
3157
3158
3159 dir = qla1280_data_direction(cmd);
3160 pkt->control_flags |= cpu_to_le16(dir);
3161
3162
3163 pkt->dseg_count = cpu_to_le16(seg_cnt);
3164
3165
3166
3167
3168 if (seg_cnt) {
3169 struct scatterlist *sg, *s;
3170 int remseg = seg_cnt;
3171
3172 sg = scsi_sglist(cmd);
3173
3174
3175 dword_ptr = &pkt->dseg_0_address;
3176
3177 dprintk(3, "Building S/G data segments..\n");
3178 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3179
3180
3181 for_each_sg(sg, s, seg_cnt, cnt) {
3182 if (cnt == 4)
3183 break;
3184 *dword_ptr++ =
3185 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3186 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3187 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3188 (pci_dma_lo32(sg_dma_address(s))),
3189 (sg_dma_len(s)));
3190 remseg--;
3191 }
3192
3193
3194
3195 dprintk(3, "S/G Building Continuation"
3196 "...seg_cnt=0x%x remains\n", seg_cnt);
3197 while (remseg > 0) {
3198
3199 sg = s;
3200
3201 ha->req_ring_index++;
3202 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3203 ha->req_ring_index = 0;
3204 ha->request_ring_ptr =
3205 ha->request_ring;
3206 } else
3207 ha->request_ring_ptr++;
3208
3209 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3210
3211
3212 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3213
3214
3215 ((struct cont_entry *) pkt)->
3216 entry_type = CONTINUE_TYPE;
3217 ((struct cont_entry *) pkt)->entry_count = 1;
3218
3219 ((struct cont_entry *) pkt)->sys_define =
3220 (uint8_t) ha->req_ring_index;
3221
3222
3223 dword_ptr =
3224 &((struct cont_entry *) pkt)->dseg_0_address;
3225
3226
3227 for_each_sg(sg, s, remseg, cnt) {
3228 if (cnt == 7)
3229 break;
3230 *dword_ptr++ =
3231 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3232 *dword_ptr++ =
3233 cpu_to_le32(sg_dma_len(s));
3234 dprintk(1,
3235 "S/G Segment Cont. phys_addr=0x%x, "
3236 "len=0x%x\n",
3237 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3238 cpu_to_le32(sg_dma_len(s)));
3239 }
3240 remseg -= cnt;
3241 dprintk(5, "qla1280_32bit_start_scsi: "
3242 "continuation packet data - "
3243 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3244 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3245 qla1280_dump_buffer(5, (char *)pkt,
3246 REQUEST_ENTRY_SIZE);
3247 }
3248 } else {
3249 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3250 "packet data - \n");
3251 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3252 }
3253 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3254 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3255 REQUEST_ENTRY_SIZE);
3256
3257
3258 ha->req_ring_index++;
3259 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3260 ha->req_ring_index = 0;
3261 ha->request_ring_ptr = ha->request_ring;
3262 } else
3263 ha->request_ring_ptr++;
3264
3265
3266 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3267 "for pending command\n");
3268 sp->flags |= SRB_SENT;
3269 ha->actthreads++;
3270 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3271
3272 mmiowb();
3273
3274out:
3275 if (status)
3276 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3277
3278 LEAVE("qla1280_32bit_start_scsi");
3279
3280 return status;
3281}
3282#endif
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295static request_t *
3296qla1280_req_pkt(struct scsi_qla_host *ha)
3297{
3298 struct device_reg __iomem *reg = ha->iobase;
3299 request_t *pkt = NULL;
3300 int cnt;
3301 uint32_t timer;
3302
3303 ENTER("qla1280_req_pkt");
3304
3305
3306
3307
3308
3309 for (timer = 15000000; timer; timer--) {
3310 if (ha->req_q_cnt > 0) {
3311
3312 cnt = RD_REG_WORD(®->mailbox4);
3313 if (ha->req_ring_index < cnt)
3314 ha->req_q_cnt = cnt - ha->req_ring_index;
3315 else
3316 ha->req_q_cnt =
3317 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3318 }
3319
3320
3321 if (ha->req_q_cnt > 0) {
3322 ha->req_q_cnt--;
3323 pkt = ha->request_ring_ptr;
3324
3325
3326 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3327
3328
3329
3330
3331
3332
3333 pkt->sys_define = (uint8_t) ha->req_ring_index;
3334
3335
3336 pkt->entry_count = 1;
3337
3338 break;
3339 }
3340
3341 udelay(2);
3342
3343
3344 qla1280_poll(ha);
3345 }
3346
3347 if (!pkt)
3348 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3349 else
3350 dprintk(3, "qla1280_req_pkt: exiting normally\n");
3351
3352 return pkt;
3353}
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363static void
3364qla1280_isp_cmd(struct scsi_qla_host *ha)
3365{
3366 struct device_reg __iomem *reg = ha->iobase;
3367
3368 ENTER("qla1280_isp_cmd");
3369
3370 dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3371 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3372 REQUEST_ENTRY_SIZE);
3373
3374
3375 ha->req_ring_index++;
3376 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3377 ha->req_ring_index = 0;
3378 ha->request_ring_ptr = ha->request_ring;
3379 } else
3380 ha->request_ring_ptr++;
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3396 mmiowb();
3397
3398 LEAVE("qla1280_isp_cmd");
3399}
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413static void
3414qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3415{
3416 struct device_reg __iomem *reg = ha->iobase;
3417 struct response *pkt;
3418 struct srb *sp = NULL;
3419 uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3420 uint16_t *wptr;
3421 uint32_t index;
3422 u16 istatus;
3423
3424 ENTER("qla1280_isr");
3425
3426 istatus = RD_REG_WORD(®->istatus);
3427 if (!(istatus & (RISC_INT | PCI_INT)))
3428 return;
3429
3430
3431 mailbox[5] = RD_REG_WORD(®->mailbox5);
3432
3433
3434
3435 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore);
3436
3437 if (mailbox[0] & BIT_0) {
3438
3439
3440
3441 wptr = &mailbox[0];
3442 *wptr++ = RD_REG_WORD(®->mailbox0);
3443 *wptr++ = RD_REG_WORD(®->mailbox1);
3444 *wptr = RD_REG_WORD(®->mailbox2);
3445 if (mailbox[0] != MBA_SCSI_COMPLETION) {
3446 wptr++;
3447 *wptr++ = RD_REG_WORD(®->mailbox3);
3448 *wptr++ = RD_REG_WORD(®->mailbox4);
3449 wptr++;
3450 *wptr++ = RD_REG_WORD(®->mailbox6);
3451 *wptr = RD_REG_WORD(®->mailbox7);
3452 }
3453
3454
3455
3456 WRT_REG_WORD(®->semaphore, 0);
3457 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3458
3459 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3460 mailbox[0]);
3461
3462
3463 switch (mailbox[0]) {
3464 case MBA_SCSI_COMPLETION:
3465 dprintk(5, "qla1280_isr: mailbox SCSI response "
3466 "completion\n");
3467
3468 if (ha->flags.online) {
3469
3470 index = mailbox[2] << 16 | mailbox[1];
3471
3472
3473 if (index < MAX_OUTSTANDING_COMMANDS)
3474 sp = ha->outstanding_cmds[index];
3475 else
3476 sp = NULL;
3477
3478 if (sp) {
3479
3480 ha->outstanding_cmds[index] = NULL;
3481
3482
3483 CMD_RESULT(sp->cmd) = 0;
3484 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3485
3486
3487 list_add_tail(&sp->list, done_q);
3488 } else {
3489
3490
3491
3492 printk(KERN_WARNING
3493 "qla1280: ISP invalid handle\n");
3494 }
3495 }
3496 break;
3497
3498 case MBA_BUS_RESET:
3499 ha->flags.reset_marker = 1;
3500 index = mailbox[6] & BIT_0;
3501 ha->bus_settings[index].reset_marker = 1;
3502
3503 printk(KERN_DEBUG "qla1280_isr(): index %i "
3504 "asynchronous BUS_RESET\n", index);
3505 break;
3506
3507 case MBA_SYSTEM_ERR:
3508 printk(KERN_WARNING
3509 "qla1280: ISP System Error - mbx1=%xh, mbx2="
3510 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3511 mailbox[3]);
3512 break;
3513
3514 case MBA_REQ_TRANSFER_ERR:
3515 printk(KERN_WARNING
3516 "qla1280: ISP Request Transfer Error\n");
3517 break;
3518
3519 case MBA_RSP_TRANSFER_ERR:
3520 printk(KERN_WARNING
3521 "qla1280: ISP Response Transfer Error\n");
3522 break;
3523
3524 case MBA_WAKEUP_THRES:
3525 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3526 break;
3527
3528 case MBA_TIMEOUT_RESET:
3529 dprintk(2,
3530 "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3531 break;
3532
3533 case MBA_DEVICE_RESET:
3534 printk(KERN_INFO "qla1280_isr(): asynchronous "
3535 "BUS_DEVICE_RESET\n");
3536
3537 ha->flags.reset_marker = 1;
3538 index = mailbox[6] & BIT_0;
3539 ha->bus_settings[index].reset_marker = 1;
3540 break;
3541
3542 case MBA_BUS_MODE_CHANGE:
3543 dprintk(2,
3544 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3545 break;
3546
3547 default:
3548
3549 if (mailbox[0] < MBA_ASYNC_EVENT) {
3550 wptr = &mailbox[0];
3551 memcpy((uint16_t *) ha->mailbox_out, wptr,
3552 MAILBOX_REGISTER_COUNT *
3553 sizeof(uint16_t));
3554
3555 if(ha->mailbox_wait != NULL)
3556 complete(ha->mailbox_wait);
3557 }
3558 break;
3559 }
3560 } else {
3561 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3562 }
3563
3564
3565
3566
3567
3568 if (!(ha->flags.online && !ha->mailbox_wait)) {
3569 dprintk(2, "qla1280_isr: Response pointer Error\n");
3570 goto out;
3571 }
3572
3573 if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3574 goto out;
3575
3576 while (ha->rsp_ring_index != mailbox[5]) {
3577 pkt = ha->response_ring_ptr;
3578
3579 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3580 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3581 dprintk(5,"qla1280_isr: response packet data\n");
3582 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3583
3584 if (pkt->entry_type == STATUS_TYPE) {
3585 if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3586 || pkt->comp_status || pkt->entry_status) {
3587 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3588 "0x%x mailbox[5] = 0x%x, comp_status "
3589 "= 0x%x, scsi_status = 0x%x\n",
3590 ha->rsp_ring_index, mailbox[5],
3591 le16_to_cpu(pkt->comp_status),
3592 le16_to_cpu(pkt->scsi_status));
3593 }
3594 } else {
3595 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3596 "0x%x, mailbox[5] = 0x%x\n",
3597 ha->rsp_ring_index, mailbox[5]);
3598 dprintk(2, "qla1280_isr: response packet data\n");
3599 qla1280_dump_buffer(2, (char *)pkt,
3600 RESPONSE_ENTRY_SIZE);
3601 }
3602
3603 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3604 dprintk(2, "status: Cmd %p, handle %i\n",
3605 ha->outstanding_cmds[pkt->handle]->cmd,
3606 pkt->handle);
3607 if (pkt->entry_type == STATUS_TYPE)
3608 qla1280_status_entry(ha, pkt, done_q);
3609 else
3610 qla1280_error_entry(ha, pkt, done_q);
3611
3612 ha->rsp_ring_index++;
3613 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3614 ha->rsp_ring_index = 0;
3615 ha->response_ring_ptr = ha->response_ring;
3616 } else
3617 ha->response_ring_ptr++;
3618 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index);
3619 }
3620 }
3621
3622 out:
3623 LEAVE("qla1280_isr");
3624}
3625
3626
3627
3628
3629
3630
3631
3632
3633static void
3634qla1280_rst_aen(struct scsi_qla_host *ha)
3635{
3636 uint8_t bus;
3637
3638 ENTER("qla1280_rst_aen");
3639
3640 if (ha->flags.online && !ha->flags.reset_active &&
3641 !ha->flags.abort_isp_active) {
3642 ha->flags.reset_active = 1;
3643 while (ha->flags.reset_marker) {
3644
3645 ha->flags.reset_marker = 0;
3646 for (bus = 0; bus < ha->ports &&
3647 !ha->flags.reset_marker; bus++) {
3648 if (ha->bus_settings[bus].reset_marker) {
3649 ha->bus_settings[bus].reset_marker = 0;
3650 qla1280_marker(ha, bus, 0, 0,
3651 MK_SYNC_ALL);
3652 }
3653 }
3654 }
3655 }
3656
3657 LEAVE("qla1280_rst_aen");
3658}
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670static void
3671qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3672 struct list_head *done_q)
3673{
3674 unsigned int bus, target, lun;
3675 int sense_sz;
3676 struct srb *sp;
3677 struct scsi_cmnd *cmd;
3678 uint32_t handle = le32_to_cpu(pkt->handle);
3679 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3680 uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3681
3682 ENTER("qla1280_status_entry");
3683
3684
3685 if (handle < MAX_OUTSTANDING_COMMANDS)
3686 sp = ha->outstanding_cmds[handle];
3687 else
3688 sp = NULL;
3689
3690 if (!sp) {
3691 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3692 goto out;
3693 }
3694
3695
3696 ha->outstanding_cmds[handle] = NULL;
3697
3698 cmd = sp->cmd;
3699
3700
3701 bus = SCSI_BUS_32(cmd);
3702 target = SCSI_TCN_32(cmd);
3703 lun = SCSI_LUN_32(cmd);
3704
3705 if (comp_status || scsi_status) {
3706 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3707 "0x%x, handle = 0x%x\n", comp_status,
3708 scsi_status, handle);
3709 }
3710
3711
3712 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3713 (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3714 CMD_RESULT(cmd) = scsi_status & 0xff;
3715 } else {
3716
3717
3718 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3719
3720 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3721 if (comp_status != CS_ARS_FAILED) {
3722 uint16_t req_sense_length =
3723 le16_to_cpu(pkt->req_sense_length);
3724 if (req_sense_length < CMD_SNSLEN(cmd))
3725 sense_sz = req_sense_length;
3726 else
3727
3728
3729
3730
3731
3732 sense_sz = CMD_SNSLEN(cmd) - 1;
3733
3734 memcpy(cmd->sense_buffer,
3735 &pkt->req_sense_data, sense_sz);
3736 } else
3737 sense_sz = 0;
3738 memset(cmd->sense_buffer + sense_sz, 0,
3739 SCSI_SENSE_BUFFERSIZE - sense_sz);
3740
3741 dprintk(2, "qla1280_status_entry: Check "
3742 "condition Sense data, b %i, t %i, "
3743 "l %i\n", bus, target, lun);
3744 if (sense_sz)
3745 qla1280_dump_buffer(2,
3746 (char *)cmd->sense_buffer,
3747 sense_sz);
3748 }
3749 }
3750
3751 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3752
3753
3754 list_add_tail(&sp->list, done_q);
3755 out:
3756 LEAVE("qla1280_status_entry");
3757}
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768static void
3769qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3770 struct list_head *done_q)
3771{
3772 struct srb *sp;
3773 uint32_t handle = le32_to_cpu(pkt->handle);
3774
3775 ENTER("qla1280_error_entry");
3776
3777 if (pkt->entry_status & BIT_3)
3778 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3779 else if (pkt->entry_status & BIT_2)
3780 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3781 else if (pkt->entry_status & BIT_1)
3782 dprintk(2, "qla1280_error_entry: FULL flag error\n");
3783 else
3784 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3785
3786
3787 if (handle < MAX_OUTSTANDING_COMMANDS)
3788 sp = ha->outstanding_cmds[handle];
3789 else
3790 sp = NULL;
3791
3792 if (sp) {
3793
3794 ha->outstanding_cmds[handle] = NULL;
3795
3796
3797 if (pkt->entry_status & (BIT_3 + BIT_2)) {
3798
3799
3800 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3801 } else if (pkt->entry_status & BIT_1) {
3802 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3803 } else {
3804
3805 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3806 }
3807
3808 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3809
3810
3811 list_add_tail(&sp->list, done_q);
3812 }
3813#ifdef QLA_64BIT_PTR
3814 else if (pkt->entry_type == COMMAND_A64_TYPE) {
3815 printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3816 }
3817#endif
3818
3819 LEAVE("qla1280_error_entry");
3820}
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832static int
3833qla1280_abort_isp(struct scsi_qla_host *ha)
3834{
3835 struct device_reg __iomem *reg = ha->iobase;
3836 struct srb *sp;
3837 int status = 0;
3838 int cnt;
3839 int bus;
3840
3841 ENTER("qla1280_abort_isp");
3842
3843 if (ha->flags.abort_isp_active || !ha->flags.online)
3844 goto out;
3845
3846 ha->flags.abort_isp_active = 1;
3847
3848
3849 qla1280_disable_intrs(ha);
3850 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3851 RD_REG_WORD(®->id_l);
3852
3853 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3854 ha->host_no);
3855
3856 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3857 struct scsi_cmnd *cmd;
3858 sp = ha->outstanding_cmds[cnt];
3859 if (sp) {
3860 cmd = sp->cmd;
3861 CMD_RESULT(cmd) = DID_RESET << 16;
3862 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3863 ha->outstanding_cmds[cnt] = NULL;
3864 list_add_tail(&sp->list, &ha->done_q);
3865 }
3866 }
3867
3868 qla1280_done(ha);
3869
3870 status = qla1280_load_firmware(ha);
3871 if (status)
3872 goto out;
3873
3874
3875 qla1280_nvram_config (ha);
3876
3877 status = qla1280_init_rings(ha);
3878 if (status)
3879 goto out;
3880
3881
3882 for (bus = 0; bus < ha->ports; bus++)
3883 qla1280_bus_reset(ha, bus);
3884
3885 ha->flags.abort_isp_active = 0;
3886 out:
3887 if (status) {
3888 printk(KERN_WARNING
3889 "qla1280: ISP error recovery failed, board disabled");
3890 qla1280_reset_adapter(ha);
3891 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3892 }
3893
3894 LEAVE("qla1280_abort_isp");
3895 return status;
3896}
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909static u16
3910qla1280_debounce_register(volatile u16 __iomem * addr)
3911{
3912 volatile u16 ret;
3913 volatile u16 ret2;
3914
3915 ret = RD_REG_WORD(addr);
3916 ret2 = RD_REG_WORD(addr);
3917
3918 if (ret == ret2)
3919 return ret;
3920
3921 do {
3922 cpu_relax();
3923 ret = RD_REG_WORD(addr);
3924 ret2 = RD_REG_WORD(addr);
3925 } while (ret != ret2);
3926
3927 return ret;
3928}
3929
3930
3931
3932
3933
3934
3935
3936#define SET_SXP_BANK 0x0100
3937#define SCSI_PHASE_INVALID 0x87FF
3938static int
3939qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3940{
3941 uint16_t config_reg, scsi_control;
3942 struct device_reg __iomem *reg = ha->iobase;
3943
3944 if (ha->bus_settings[bus].scsi_bus_dead) {
3945 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3946 config_reg = RD_REG_WORD(®->cfg_1);
3947 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK);
3948 scsi_control = RD_REG_WORD(®->scsiControlPins);
3949 WRT_REG_WORD(®->cfg_1, config_reg);
3950 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC);
3951
3952 if (scsi_control == SCSI_PHASE_INVALID) {
3953 ha->bus_settings[bus].scsi_bus_dead = 1;
3954 return 1;
3955 } else {
3956 ha->bus_settings[bus].scsi_bus_dead = 0;
3957 ha->bus_settings[bus].failed_reset_count = 0;
3958 }
3959 }
3960 return 0;
3961}
3962
3963static void
3964qla1280_get_target_parameters(struct scsi_qla_host *ha,
3965 struct scsi_device *device)
3966{
3967 uint16_t mb[MAILBOX_REGISTER_COUNT];
3968 int bus, target, lun;
3969
3970 bus = device->channel;
3971 target = device->id;
3972 lun = device->lun;
3973
3974
3975 mb[0] = MBC_GET_TARGET_PARAMETERS;
3976 mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3977 mb[1] <<= 8;
3978 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3979 &mb[0]);
3980
3981 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3982
3983 if (mb[3] != 0) {
3984 printk(" Sync: period %d, offset %d",
3985 (mb[3] & 0xff), (mb[3] >> 8));
3986 if (mb[2] & BIT_13)
3987 printk(", Wide");
3988 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
3989 printk(", DT");
3990 } else
3991 printk(" Async");
3992
3993 if (device->simple_tags)
3994 printk(", Tagged queuing: depth %d", device->queue_depth);
3995 printk("\n");
3996}
3997
3998
3999#if DEBUG_QLA1280
4000static void
4001__qla1280_dump_buffer(char *b, int size)
4002{
4003 int cnt;
4004 u8 c;
4005
4006 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
4007 "Bh Ch Dh Eh Fh\n");
4008 printk(KERN_DEBUG "---------------------------------------------"
4009 "------------------\n");
4010
4011 for (cnt = 0; cnt < size;) {
4012 c = *b++;
4013
4014 printk("0x%02x", c);
4015 cnt++;
4016 if (!(cnt % 16))
4017 printk("\n");
4018 else
4019 printk(" ");
4020 }
4021 if (cnt % 16)
4022 printk("\n");
4023}
4024
4025
4026
4027
4028
4029static void
4030__qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
4031{
4032 struct scsi_qla_host *ha;
4033 struct Scsi_Host *host = CMD_HOST(cmd);
4034 struct srb *sp;
4035
4036
4037 int i;
4038 ha = (struct scsi_qla_host *)host->hostdata;
4039
4040 sp = (struct srb *)CMD_SP(cmd);
4041 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
4042 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
4043 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
4044 CMD_CDBLEN(cmd));
4045 printk(" CDB = ");
4046 for (i = 0; i < cmd->cmd_len; i++) {
4047 printk("0x%02x ", cmd->cmnd[i]);
4048 }
4049 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
4050 printk(" request buffer=0x%p, request buffer len=0x%x\n",
4051 scsi_sglist(cmd), scsi_bufflen(cmd));
4052
4053
4054
4055
4056
4057
4058 printk(" tag=%d, transfersize=0x%x \n",
4059 cmd->tag, cmd->transfersize);
4060 printk(" SP=0x%p\n", CMD_SP(cmd));
4061 printk(" underflow size = 0x%x, direction=0x%x\n",
4062 cmd->underflow, cmd->sc_data_direction);
4063}
4064
4065
4066
4067
4068
4069static void
4070ql1280_dump_device(struct scsi_qla_host *ha)
4071{
4072
4073 struct scsi_cmnd *cp;
4074 struct srb *sp;
4075 int i;
4076
4077 printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4078
4079 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4080 if ((sp = ha->outstanding_cmds[i]) == NULL)
4081 continue;
4082 if ((cp = sp->cmd) == NULL)
4083 continue;
4084 qla1280_print_scsi_cmd(1, cp);
4085 }
4086}
4087#endif
4088
4089
4090enum tokens {
4091 TOKEN_NVRAM,
4092 TOKEN_SYNC,
4093 TOKEN_WIDE,
4094 TOKEN_PPR,
4095 TOKEN_VERBOSE,
4096 TOKEN_DEBUG,
4097};
4098
4099struct setup_tokens {
4100 char *token;
4101 int val;
4102};
4103
4104static struct setup_tokens setup_token[] __initdata =
4105{
4106 { "nvram", TOKEN_NVRAM },
4107 { "sync", TOKEN_SYNC },
4108 { "wide", TOKEN_WIDE },
4109 { "ppr", TOKEN_PPR },
4110 { "verbose", TOKEN_VERBOSE },
4111 { "debug", TOKEN_DEBUG },
4112};
4113
4114
4115
4116
4117
4118
4119
4120
4121static int __init
4122qla1280_setup(char *s)
4123{
4124 char *cp, *ptr;
4125 unsigned long val;
4126 int toke;
4127
4128 cp = s;
4129
4130 while (cp && (ptr = strchr(cp, ':'))) {
4131 ptr++;
4132 if (!strcmp(ptr, "yes")) {
4133 val = 0x10000;
4134 ptr += 3;
4135 } else if (!strcmp(ptr, "no")) {
4136 val = 0;
4137 ptr += 2;
4138 } else
4139 val = simple_strtoul(ptr, &ptr, 0);
4140
4141 switch ((toke = qla1280_get_token(cp))) {
4142 case TOKEN_NVRAM:
4143 if (!val)
4144 driver_setup.no_nvram = 1;
4145 break;
4146 case TOKEN_SYNC:
4147 if (!val)
4148 driver_setup.no_sync = 1;
4149 else if (val != 0x10000)
4150 driver_setup.sync_mask = val;
4151 break;
4152 case TOKEN_WIDE:
4153 if (!val)
4154 driver_setup.no_wide = 1;
4155 else if (val != 0x10000)
4156 driver_setup.wide_mask = val;
4157 break;
4158 case TOKEN_PPR:
4159 if (!val)
4160 driver_setup.no_ppr = 1;
4161 else if (val != 0x10000)
4162 driver_setup.ppr_mask = val;
4163 break;
4164 case TOKEN_VERBOSE:
4165 qla1280_verbose = val;
4166 break;
4167 default:
4168 printk(KERN_INFO "qla1280: unknown boot option %s\n",
4169 cp);
4170 }
4171
4172 cp = strchr(ptr, ';');
4173 if (cp)
4174 cp++;
4175 else {
4176 break;
4177 }
4178 }
4179 return 1;
4180}
4181
4182
4183static int __init
4184qla1280_get_token(char *str)
4185{
4186 char *sep;
4187 long ret = -1;
4188 int i;
4189
4190 sep = strchr(str, ':');
4191
4192 if (sep) {
4193 for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4194 if (!strncmp(setup_token[i].token, str, (sep - str))) {
4195 ret = setup_token[i].val;
4196 break;
4197 }
4198 }
4199 }
4200
4201 return ret;
4202}
4203
4204
4205static struct scsi_host_template qla1280_driver_template = {
4206 .module = THIS_MODULE,
4207 .proc_name = "qla1280",
4208 .name = "Qlogic ISP 1280/12160",
4209 .info = qla1280_info,
4210 .slave_configure = qla1280_slave_configure,
4211 .queuecommand = qla1280_queuecommand,
4212 .eh_abort_handler = qla1280_eh_abort,
4213 .eh_device_reset_handler= qla1280_eh_device_reset,
4214 .eh_bus_reset_handler = qla1280_eh_bus_reset,
4215 .eh_host_reset_handler = qla1280_eh_adapter_reset,
4216 .bios_param = qla1280_biosparam,
4217 .can_queue = MAX_OUTSTANDING_COMMANDS,
4218 .this_id = -1,
4219 .sg_tablesize = SG_ALL,
4220 .use_clustering = ENABLE_CLUSTERING,
4221};
4222
4223
4224static int
4225qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4226{
4227 int devnum = id->driver_data;
4228 struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4229 struct Scsi_Host *host;
4230 struct scsi_qla_host *ha;
4231 int error = -ENODEV;
4232
4233
4234 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4235 printk(KERN_INFO
4236 "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4237 goto error;
4238 }
4239
4240 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4241 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4242
4243 if (pci_enable_device(pdev)) {
4244 printk(KERN_WARNING
4245 "qla1280: Failed to enabled pci device, aborting.\n");
4246 goto error;
4247 }
4248
4249 pci_set_master(pdev);
4250
4251 error = -ENOMEM;
4252 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4253 if (!host) {
4254 printk(KERN_WARNING
4255 "qla1280: Failed to register host, aborting.\n");
4256 goto error_disable_device;
4257 }
4258
4259 ha = (struct scsi_qla_host *)host->hostdata;
4260 memset(ha, 0, sizeof(struct scsi_qla_host));
4261
4262 ha->pdev = pdev;
4263 ha->devnum = devnum;
4264
4265#ifdef QLA_64BIT_PTR
4266 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
4267 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4268 printk(KERN_WARNING "scsi(%li): Unable to set a "
4269 "suitable DMA mask - aborting\n", ha->host_no);
4270 error = -ENODEV;
4271 goto error_put_host;
4272 }
4273 } else
4274 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4275 ha->host_no);
4276#else
4277 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4278 printk(KERN_WARNING "scsi(%li): Unable to set a "
4279 "suitable DMA mask - aborting\n", ha->host_no);
4280 error = -ENODEV;
4281 goto error_put_host;
4282 }
4283#endif
4284
4285 ha->request_ring = pci_alloc_consistent(ha->pdev,
4286 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4287 &ha->request_dma);
4288 if (!ha->request_ring) {
4289 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4290 goto error_put_host;
4291 }
4292
4293 ha->response_ring = pci_alloc_consistent(ha->pdev,
4294 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4295 &ha->response_dma);
4296 if (!ha->response_ring) {
4297 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4298 goto error_free_request_ring;
4299 }
4300
4301 ha->ports = bdp->numPorts;
4302
4303 ha->host = host;
4304 ha->host_no = host->host_no;
4305
4306 host->irq = pdev->irq;
4307 host->max_channel = bdp->numPorts - 1;
4308 host->max_lun = MAX_LUNS - 1;
4309 host->max_id = MAX_TARGETS;
4310 host->max_sectors = 1024;
4311 host->unique_id = host->host_no;
4312
4313 error = -ENODEV;
4314
4315#if MEMORY_MAPPED_IO
4316 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4317 if (!ha->mmpbase) {
4318 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4319 goto error_free_response_ring;
4320 }
4321
4322 host->base = (unsigned long)ha->mmpbase;
4323 ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4324#else
4325 host->io_port = pci_resource_start(ha->pdev, 0);
4326 if (!request_region(host->io_port, 0xff, "qla1280")) {
4327 printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4328 "0x%04lx-0x%04lx - already in use\n",
4329 host->io_port, host->io_port + 0xff);
4330 goto error_free_response_ring;
4331 }
4332
4333 ha->iobase = (struct device_reg *)host->io_port;
4334#endif
4335
4336 INIT_LIST_HEAD(&ha->done_q);
4337
4338
4339 qla1280_disable_intrs(ha);
4340
4341 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4342 "qla1280", ha)) {
4343 printk("qla1280 : Failed to reserve interrupt %d already "
4344 "in use\n", pdev->irq);
4345 goto error_release_region;
4346 }
4347
4348
4349 if (qla1280_initialize_adapter(ha)) {
4350 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4351 goto error_free_irq;
4352 }
4353
4354
4355 host->this_id = ha->bus_settings[0].id;
4356
4357 pci_set_drvdata(pdev, host);
4358
4359 error = scsi_add_host(host, &pdev->dev);
4360 if (error)
4361 goto error_disable_adapter;
4362 scsi_scan_host(host);
4363
4364 return 0;
4365
4366 error_disable_adapter:
4367 qla1280_disable_intrs(ha);
4368 error_free_irq:
4369 free_irq(pdev->irq, ha);
4370 error_release_region:
4371#if MEMORY_MAPPED_IO
4372 iounmap(ha->mmpbase);
4373#else
4374 release_region(host->io_port, 0xff);
4375#endif
4376 error_free_response_ring:
4377 pci_free_consistent(ha->pdev,
4378 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4379 ha->response_ring, ha->response_dma);
4380 error_free_request_ring:
4381 pci_free_consistent(ha->pdev,
4382 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4383 ha->request_ring, ha->request_dma);
4384 error_put_host:
4385 scsi_host_put(host);
4386 error_disable_device:
4387 pci_disable_device(pdev);
4388 error:
4389 return error;
4390}
4391
4392
4393static void
4394qla1280_remove_one(struct pci_dev *pdev)
4395{
4396 struct Scsi_Host *host = pci_get_drvdata(pdev);
4397 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4398
4399 scsi_remove_host(host);
4400
4401 qla1280_disable_intrs(ha);
4402
4403 free_irq(pdev->irq, ha);
4404
4405#if MEMORY_MAPPED_IO
4406 iounmap(ha->mmpbase);
4407#else
4408 release_region(host->io_port, 0xff);
4409#endif
4410
4411 pci_free_consistent(ha->pdev,
4412 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4413 ha->request_ring, ha->request_dma);
4414 pci_free_consistent(ha->pdev,
4415 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4416 ha->response_ring, ha->response_dma);
4417
4418 pci_disable_device(pdev);
4419
4420 scsi_host_put(host);
4421}
4422
4423static struct pci_driver qla1280_pci_driver = {
4424 .name = "qla1280",
4425 .id_table = qla1280_pci_tbl,
4426 .probe = qla1280_probe_one,
4427 .remove = qla1280_remove_one,
4428};
4429
4430static int __init
4431qla1280_init(void)
4432{
4433 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4434 printk(KERN_WARNING
4435 "qla1280: struct srb too big, aborting\n");
4436 return -EINVAL;
4437 }
4438
4439#ifdef MODULE
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452 if (qla1280)
4453 qla1280_setup(qla1280);
4454#endif
4455
4456 return pci_register_driver(&qla1280_pci_driver);
4457}
4458
4459static void __exit
4460qla1280_exit(void)
4461{
4462 int i;
4463
4464 pci_unregister_driver(&qla1280_pci_driver);
4465
4466 for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
4467 release_firmware(qla1280_fw_tbl[i].fw);
4468 qla1280_fw_tbl[i].fw = NULL;
4469 }
4470}
4471
4472module_init(qla1280_init);
4473module_exit(qla1280_exit);
4474
4475MODULE_AUTHOR("Qlogic & Jes Sorensen");
4476MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4477MODULE_LICENSE("GPL");
4478MODULE_FIRMWARE("qlogic/1040.bin");
4479MODULE_FIRMWARE("qlogic/1280.bin");
4480MODULE_FIRMWARE("qlogic/12160.bin");
4481MODULE_VERSION(QLA1280_VERSION);
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494