1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define QLA1280_VERSION "3.27.1"
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339#include <linux/module.h>
340
341#include <linux/types.h>
342#include <linux/string.h>
343#include <linux/errno.h>
344#include <linux/kernel.h>
345#include <linux/ioport.h>
346#include <linux/delay.h>
347#include <linux/timer.h>
348#include <linux/pci.h>
349#include <linux/proc_fs.h>
350#include <linux/stat.h>
351#include <linux/pci_ids.h>
352#include <linux/interrupt.h>
353#include <linux/init.h>
354#include <linux/dma-mapping.h>
355#include <linux/firmware.h>
356
357#include <asm/io.h>
358#include <asm/irq.h>
359#include <asm/byteorder.h>
360#include <asm/processor.h>
361#include <asm/types.h>
362
363#include <scsi/scsi.h>
364#include <scsi/scsi_cmnd.h>
365#include <scsi/scsi_device.h>
366#include <scsi/scsi_host.h>
367#include <scsi/scsi_tcq.h>
368
369#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
370#include <asm/sn/io.h>
371#endif
372
373
374
375
376
377
378#define DEBUG_QLA1280_INTR 0
379#define DEBUG_PRINT_NVRAM 0
380#define DEBUG_QLA1280 0
381
382#define MEMORY_MAPPED_IO 1
383
384#include "qla1280.h"
385
386#ifndef BITS_PER_LONG
387#error "BITS_PER_LONG not defined!"
388#endif
389#if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
390#define QLA_64BIT_PTR 1
391#endif
392
393#ifdef QLA_64BIT_PTR
394#define pci_dma_hi32(a) ((a >> 16) >> 16)
395#else
396#define pci_dma_hi32(a) 0
397#endif
398#define pci_dma_lo32(a) (a & 0xffffffff)
399
400#define NVRAM_DELAY() udelay(500)
401
402#if defined(__ia64__) && !defined(ia64_platform_is)
403#define ia64_platform_is(foo) (!strcmp(x, platform_name))
404#endif
405
406
407#define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
408#define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
409 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
410#define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
411 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
412
413
414static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
415static void qla1280_remove_one(struct pci_dev *);
416
417
418
419
420static void qla1280_done(struct scsi_qla_host *);
421static int qla1280_get_token(char *);
422static int qla1280_setup(char *s) __init;
423
424
425
426
427static int qla1280_load_firmware(struct scsi_qla_host *);
428static int qla1280_init_rings(struct scsi_qla_host *);
429static int qla1280_nvram_config(struct scsi_qla_host *);
430static int qla1280_mailbox_command(struct scsi_qla_host *,
431 uint8_t, uint16_t *);
432static int qla1280_bus_reset(struct scsi_qla_host *, int);
433static int qla1280_device_reset(struct scsi_qla_host *, int, int);
434static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
435static int qla1280_abort_isp(struct scsi_qla_host *);
436#ifdef QLA_64BIT_PTR
437static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
438#else
439static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
440#endif
441static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
442static void qla1280_poll(struct scsi_qla_host *);
443static void qla1280_reset_adapter(struct scsi_qla_host *);
444static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
445static void qla1280_isp_cmd(struct scsi_qla_host *);
446static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
447static void qla1280_rst_aen(struct scsi_qla_host *);
448static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
449 struct list_head *);
450static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
451 struct list_head *);
452static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
453static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
454static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
455static request_t *qla1280_req_pkt(struct scsi_qla_host *);
456static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
457 unsigned int);
458static void qla1280_get_target_parameters(struct scsi_qla_host *,
459 struct scsi_device *);
460static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
461
462
463static struct qla_driver_setup driver_setup;
464
465
466
467
468static inline uint16_t
469qla1280_data_direction(struct scsi_cmnd *cmnd)
470{
471 switch(cmnd->sc_data_direction) {
472 case DMA_FROM_DEVICE:
473 return BIT_5;
474 case DMA_TO_DEVICE:
475 return BIT_6;
476 case DMA_BIDIRECTIONAL:
477 return BIT_5 | BIT_6;
478
479
480
481
482
483 case DMA_NONE:
484 default:
485 return 0;
486 }
487}
488
489#if DEBUG_QLA1280
490static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
491static void __qla1280_dump_buffer(char *, int);
492#endif
493
494
495
496
497
498#ifdef MODULE
499static char *qla1280;
500
501
502module_param(qla1280, charp, 0);
503#else
504__setup("qla1280=", qla1280_setup);
505#endif
506
507
508
509
510
511
512
513
514#define CMD_SP(Cmnd) &Cmnd->SCp
515#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
516#define CMD_CDBP(Cmnd) Cmnd->cmnd
517#define CMD_SNSP(Cmnd) Cmnd->sense_buffer
518#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
519#define CMD_RESULT(Cmnd) Cmnd->result
520#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
521#define CMD_REQUEST(Cmnd) Cmnd->request->cmd
522
523#define CMD_HOST(Cmnd) Cmnd->device->host
524#define SCSI_BUS_32(Cmnd) Cmnd->device->channel
525#define SCSI_TCN_32(Cmnd) Cmnd->device->id
526#define SCSI_LUN_32(Cmnd) Cmnd->device->lun
527
528
529
530
531
532
533struct qla_boards {
534 char *name;
535 int numPorts;
536 int fw_index;
537};
538
539
540static struct pci_device_id qla1280_pci_tbl[] = {
541 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
542 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
543 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
544 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
545 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
546 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
547 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
548 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
549 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
550 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
551 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
552 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
553 {0,}
554};
555MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
556
557DEFINE_MUTEX(qla1280_firmware_mutex);
558
559struct qla_fw {
560 char *fwname;
561 const struct firmware *fw;
562};
563
564#define QL_NUM_FW_IMAGES 3
565
566struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
567 {"qlogic/1040.bin", NULL},
568 {"qlogic/1280.bin", NULL},
569 {"qlogic/12160.bin", NULL},
570};
571
572
573static struct qla_boards ql1280_board_tbl[] = {
574 {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
575 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
576 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
577 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
578 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
579 {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
580 {.name = " ", .numPorts = 0, .fw_index = -1},
581};
582
583static int qla1280_verbose = 1;
584
585#if DEBUG_QLA1280
586static int ql_debug_level = 1;
587#define dprintk(level, format, a...) \
588 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
589#define qla1280_dump_buffer(level, buf, size) \
590 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
591#define qla1280_print_scsi_cmd(level, cmd) \
592 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
593#else
594#define ql_debug_level 0
595#define dprintk(level, format, a...) do{}while(0)
596#define qla1280_dump_buffer(a, b, c) do{}while(0)
597#define qla1280_print_scsi_cmd(a, b) do{}while(0)
598#endif
599
600#define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
601#define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
602#define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
603#define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
604
605
606static int qla1280_read_nvram(struct scsi_qla_host *ha)
607{
608 uint16_t *wptr;
609 uint8_t chksum;
610 int cnt, i;
611 struct nvram *nv;
612
613 ENTER("qla1280_read_nvram");
614
615 if (driver_setup.no_nvram)
616 return 1;
617
618 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
619
620 wptr = (uint16_t *)&ha->nvram;
621 nv = &ha->nvram;
622 chksum = 0;
623 for (cnt = 0; cnt < 3; cnt++) {
624 *wptr = qla1280_get_nvram_word(ha, cnt);
625 chksum += *wptr & 0xff;
626 chksum += (*wptr >> 8) & 0xff;
627 wptr++;
628 }
629
630 if (nv->id0 != 'I' || nv->id1 != 'S' ||
631 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
632 dprintk(2, "Invalid nvram ID or version!\n");
633 chksum = 1;
634 } else {
635 for (; cnt < sizeof(struct nvram); cnt++) {
636 *wptr = qla1280_get_nvram_word(ha, cnt);
637 chksum += *wptr & 0xff;
638 chksum += (*wptr >> 8) & 0xff;
639 wptr++;
640 }
641 }
642
643 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
644 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
645 nv->version);
646
647
648 if (chksum) {
649 if (!driver_setup.no_nvram)
650 printk(KERN_WARNING "scsi(%ld): Unable to identify or "
651 "validate NVRAM checksum, using default "
652 "settings\n", ha->host_no);
653 ha->nvram_valid = 0;
654 } else
655 ha->nvram_valid = 1;
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673 nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
674 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
675 for(i = 0; i < MAX_BUSES; i++) {
676 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
677 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
678 }
679 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
680 LEAVE("qla1280_read_nvram");
681
682 return chksum;
683}
684
685
686
687
688
689static const char *
690qla1280_info(struct Scsi_Host *host)
691{
692 static char qla1280_scsi_name_buffer[125];
693 char *bp;
694 struct scsi_qla_host *ha;
695 struct qla_boards *bdp;
696
697 bp = &qla1280_scsi_name_buffer[0];
698 ha = (struct scsi_qla_host *)host->hostdata;
699 bdp = &ql1280_board_tbl[ha->devnum];
700 memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
701
702 sprintf (bp,
703 "QLogic %s PCI to SCSI Host Adapter\n"
704 " Firmware version: %2d.%02d.%02d, Driver version %s",
705 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
706 QLA1280_VERSION);
707 return bp;
708}
709
710
711
712
713
714
715
716
717
718
719
720
721static int
722qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
723{
724 struct Scsi_Host *host = cmd->device->host;
725 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
726 struct srb *sp = (struct srb *)CMD_SP(cmd);
727 int status;
728
729 cmd->scsi_done = fn;
730 sp->cmd = cmd;
731 sp->flags = 0;
732 sp->wait = NULL;
733 CMD_HANDLE(cmd) = (unsigned char *)NULL;
734
735 qla1280_print_scsi_cmd(5, cmd);
736
737#ifdef QLA_64BIT_PTR
738
739
740
741
742
743
744 status = qla1280_64bit_start_scsi(ha, sp);
745#else
746 status = qla1280_32bit_start_scsi(ha, sp);
747#endif
748 return status;
749}
750
751static DEF_SCSI_QCMD(qla1280_queuecommand)
752
753enum action {
754 ABORT_COMMAND,
755 DEVICE_RESET,
756 BUS_RESET,
757 ADAPTER_RESET,
758};
759
760
761static void qla1280_mailbox_timeout(struct timer_list *t)
762{
763 struct scsi_qla_host *ha = from_timer(ha, t, mailbox_timer);
764 struct device_reg __iomem *reg;
765 reg = ha->iobase;
766
767 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0);
768 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
769 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
770 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus));
771 complete(ha->mailbox_wait);
772}
773
774static int
775_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
776 struct completion *wait)
777{
778 int status = FAILED;
779 struct scsi_cmnd *cmd = sp->cmd;
780
781 spin_unlock_irq(ha->host->host_lock);
782 wait_for_completion_timeout(wait, 4*HZ);
783 spin_lock_irq(ha->host->host_lock);
784 sp->wait = NULL;
785 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
786 status = SUCCESS;
787 (*cmd->scsi_done)(cmd);
788 }
789 return status;
790}
791
792static int
793qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
794{
795 DECLARE_COMPLETION_ONSTACK(wait);
796
797 sp->wait = &wait;
798 return _qla1280_wait_for_single_command(ha, sp, &wait);
799}
800
801static int
802qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
803{
804 int cnt;
805 int status;
806 struct srb *sp;
807 struct scsi_cmnd *cmd;
808
809 status = SUCCESS;
810
811
812
813
814
815 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
816 sp = ha->outstanding_cmds[cnt];
817 if (sp) {
818 cmd = sp->cmd;
819
820 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
821 continue;
822 if (target >= 0 && SCSI_TCN_32(cmd) != target)
823 continue;
824
825 status = qla1280_wait_for_single_command(ha, sp);
826 if (status == FAILED)
827 break;
828 }
829 }
830 return status;
831}
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847static int
848qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
849{
850 struct scsi_qla_host *ha;
851 int bus, target, lun;
852 struct srb *sp;
853 int i, found;
854 int result=FAILED;
855 int wait_for_bus=-1;
856 int wait_for_target = -1;
857 DECLARE_COMPLETION_ONSTACK(wait);
858
859 ENTER("qla1280_error_action");
860
861 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
862 sp = (struct srb *)CMD_SP(cmd);
863 bus = SCSI_BUS_32(cmd);
864 target = SCSI_TCN_32(cmd);
865 lun = SCSI_LUN_32(cmd);
866
867 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
868 RD_REG_WORD(&ha->iobase->istatus));
869
870 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
871 RD_REG_WORD(&ha->iobase->host_cmd),
872 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
873
874 if (qla1280_verbose)
875 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
876 "Handle=0x%p, action=0x%x\n",
877 ha->host_no, cmd, CMD_HANDLE(cmd), action);
878
879
880
881
882
883
884
885 found = -1;
886 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
887 if (sp == ha->outstanding_cmds[i]) {
888 found = i;
889 sp->wait = &wait;
890 break;
891 }
892 }
893
894 if (found < 0) {
895 result = SUCCESS;
896 if (qla1280_verbose) {
897 printk(KERN_INFO
898 "scsi(%ld:%d:%d:%d): specified command has "
899 "already completed.\n", ha->host_no, bus,
900 target, lun);
901 }
902 }
903
904 switch (action) {
905
906 case ABORT_COMMAND:
907 dprintk(1, "qla1280: RISC aborting command\n");
908
909
910
911
912
913 if (found >= 0)
914 qla1280_abort_command(ha, sp, found);
915 break;
916
917 case DEVICE_RESET:
918 if (qla1280_verbose)
919 printk(KERN_INFO
920 "scsi(%ld:%d:%d:%d): Queueing device reset "
921 "command.\n", ha->host_no, bus, target, lun);
922 if (qla1280_device_reset(ha, bus, target) == 0) {
923
924 wait_for_bus = bus;
925 wait_for_target = target;
926 }
927 break;
928
929 case BUS_RESET:
930 if (qla1280_verbose)
931 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
932 "reset.\n", ha->host_no, bus);
933 if (qla1280_bus_reset(ha, bus) == 0) {
934
935 wait_for_bus = bus;
936 }
937 break;
938
939 case ADAPTER_RESET:
940 default:
941 if (qla1280_verbose) {
942 printk(KERN_INFO
943 "scsi(%ld): Issued ADAPTER RESET\n",
944 ha->host_no);
945 printk(KERN_INFO "scsi(%ld): I/O processing will "
946 "continue automatically\n", ha->host_no);
947 }
948 ha->flags.reset_active = 1;
949
950 if (qla1280_abort_isp(ha) != 0) {
951 result = FAILED;
952 }
953
954 ha->flags.reset_active = 0;
955 }
956
957
958
959
960
961
962
963
964
965
966 if (found >= 0)
967 result = _qla1280_wait_for_single_command(ha, sp, &wait);
968
969 if (action == ABORT_COMMAND && result != SUCCESS) {
970 printk(KERN_WARNING
971 "scsi(%li:%i:%i:%i): "
972 "Unable to abort command!\n",
973 ha->host_no, bus, target, lun);
974 }
975
976
977
978
979
980
981
982
983
984
985
986 if (result == SUCCESS && wait_for_bus >= 0) {
987 result = qla1280_wait_for_pending_commands(ha,
988 wait_for_bus, wait_for_target);
989 }
990
991 dprintk(1, "RESET returning %d\n", result);
992
993 LEAVE("qla1280_error_action");
994 return result;
995}
996
997
998
999
1000
1001static int
1002qla1280_eh_abort(struct scsi_cmnd * cmd)
1003{
1004 int rc;
1005
1006 spin_lock_irq(cmd->device->host->host_lock);
1007 rc = qla1280_error_action(cmd, ABORT_COMMAND);
1008 spin_unlock_irq(cmd->device->host->host_lock);
1009
1010 return rc;
1011}
1012
1013
1014
1015
1016
1017static int
1018qla1280_eh_device_reset(struct scsi_cmnd *cmd)
1019{
1020 int rc;
1021
1022 spin_lock_irq(cmd->device->host->host_lock);
1023 rc = qla1280_error_action(cmd, DEVICE_RESET);
1024 spin_unlock_irq(cmd->device->host->host_lock);
1025
1026 return rc;
1027}
1028
1029
1030
1031
1032
1033static int
1034qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1035{
1036 int rc;
1037
1038 spin_lock_irq(cmd->device->host->host_lock);
1039 rc = qla1280_error_action(cmd, BUS_RESET);
1040 spin_unlock_irq(cmd->device->host->host_lock);
1041
1042 return rc;
1043}
1044
1045
1046
1047
1048
1049static int
1050qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1051{
1052 int rc;
1053
1054 spin_lock_irq(cmd->device->host->host_lock);
1055 rc = qla1280_error_action(cmd, ADAPTER_RESET);
1056 spin_unlock_irq(cmd->device->host->host_lock);
1057
1058 return rc;
1059}
1060
1061static int
1062qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1063 sector_t capacity, int geom[])
1064{
1065 int heads, sectors, cylinders;
1066
1067 heads = 64;
1068 sectors = 32;
1069 cylinders = (unsigned long)capacity / (heads * sectors);
1070 if (cylinders > 1024) {
1071 heads = 255;
1072 sectors = 63;
1073 cylinders = (unsigned long)capacity / (heads * sectors);
1074
1075
1076 }
1077
1078 geom[0] = heads;
1079 geom[1] = sectors;
1080 geom[2] = cylinders;
1081
1082 return 0;
1083}
1084
1085
1086
1087static inline void
1088qla1280_disable_intrs(struct scsi_qla_host *ha)
1089{
1090 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1091 RD_REG_WORD(&ha->iobase->ictrl);
1092}
1093
1094
1095static inline void
1096qla1280_enable_intrs(struct scsi_qla_host *ha)
1097{
1098 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1099 RD_REG_WORD(&ha->iobase->ictrl);
1100}
1101
1102
1103
1104
1105
1106static irqreturn_t
1107qla1280_intr_handler(int irq, void *dev_id)
1108{
1109 struct scsi_qla_host *ha;
1110 struct device_reg __iomem *reg;
1111 u16 data;
1112 int handled = 0;
1113
1114 ENTER_INTR ("qla1280_intr_handler");
1115 ha = (struct scsi_qla_host *)dev_id;
1116
1117 spin_lock(ha->host->host_lock);
1118
1119 ha->isr_count++;
1120 reg = ha->iobase;
1121
1122 qla1280_disable_intrs(ha);
1123
1124 data = qla1280_debounce_register(®->istatus);
1125
1126 if (data & RISC_INT) {
1127 qla1280_isr(ha, &ha->done_q);
1128 handled = 1;
1129 }
1130 if (!list_empty(&ha->done_q))
1131 qla1280_done(ha);
1132
1133 spin_unlock(ha->host->host_lock);
1134
1135 qla1280_enable_intrs(ha);
1136
1137 LEAVE_INTR("qla1280_intr_handler");
1138 return IRQ_RETVAL(handled);
1139}
1140
1141
1142static int
1143qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1144{
1145 uint8_t mr;
1146 uint16_t mb[MAILBOX_REGISTER_COUNT];
1147 struct nvram *nv;
1148 int status, lun;
1149
1150 nv = &ha->nvram;
1151
1152 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1153
1154
1155 mb[0] = MBC_SET_TARGET_PARAMETERS;
1156 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1157 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1158 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1159 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1160 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1161 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1162 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1163 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1164 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1165
1166 if (IS_ISP1x160(ha)) {
1167 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1168 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1169 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1170 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1171 mr |= BIT_6;
1172 } else {
1173 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1174 }
1175 mb[3] |= nv->bus[bus].target[target].sync_period;
1176
1177 status = qla1280_mailbox_command(ha, mr, mb);
1178
1179
1180 for (lun = 0; lun < MAX_LUNS; lun++) {
1181 mb[0] = MBC_SET_DEVICE_QUEUE;
1182 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1183 mb[1] |= lun;
1184 mb[2] = nv->bus[bus].max_queue_depth;
1185 mb[3] = nv->bus[bus].target[target].execution_throttle;
1186 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1187 }
1188
1189 if (status)
1190 printk(KERN_WARNING "scsi(%ld:%i:%i): "
1191 "qla1280_set_target_parameters() failed\n",
1192 ha->host_no, bus, target);
1193 return status;
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208static int
1209qla1280_slave_configure(struct scsi_device *device)
1210{
1211 struct scsi_qla_host *ha;
1212 int default_depth = 3;
1213 int bus = device->channel;
1214 int target = device->id;
1215 int status = 0;
1216 struct nvram *nv;
1217 unsigned long flags;
1218
1219 ha = (struct scsi_qla_host *)device->host->hostdata;
1220 nv = &ha->nvram;
1221
1222 if (qla1280_check_for_dead_scsi_bus(ha, bus))
1223 return 1;
1224
1225 if (device->tagged_supported &&
1226 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1227 scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat);
1228 } else {
1229 scsi_change_queue_depth(device, default_depth);
1230 }
1231
1232 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1233 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1234 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1235
1236 if (driver_setup.no_sync ||
1237 (driver_setup.sync_mask &&
1238 (~driver_setup.sync_mask & (1 << target))))
1239 nv->bus[bus].target[target].parameter.enable_sync = 0;
1240 if (driver_setup.no_wide ||
1241 (driver_setup.wide_mask &&
1242 (~driver_setup.wide_mask & (1 << target))))
1243 nv->bus[bus].target[target].parameter.enable_wide = 0;
1244 if (IS_ISP1x160(ha)) {
1245 if (driver_setup.no_ppr ||
1246 (driver_setup.ppr_mask &&
1247 (~driver_setup.ppr_mask & (1 << target))))
1248 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1249 }
1250
1251 spin_lock_irqsave(ha->host->host_lock, flags);
1252 if (nv->bus[bus].target[target].parameter.enable_sync)
1253 status = qla1280_set_target_parameters(ha, bus, target);
1254 qla1280_get_target_parameters(ha, device);
1255 spin_unlock_irqrestore(ha->host->host_lock, flags);
1256 return status;
1257}
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267static void
1268qla1280_done(struct scsi_qla_host *ha)
1269{
1270 struct srb *sp;
1271 struct list_head *done_q;
1272 int bus, target, lun;
1273 struct scsi_cmnd *cmd;
1274
1275 ENTER("qla1280_done");
1276
1277 done_q = &ha->done_q;
1278
1279 while (!list_empty(done_q)) {
1280 sp = list_entry(done_q->next, struct srb, list);
1281
1282 list_del(&sp->list);
1283
1284 cmd = sp->cmd;
1285 bus = SCSI_BUS_32(cmd);
1286 target = SCSI_TCN_32(cmd);
1287 lun = SCSI_LUN_32(cmd);
1288
1289 switch ((CMD_RESULT(cmd) >> 16)) {
1290 case DID_RESET:
1291
1292 if (!ha->flags.abort_isp_active)
1293 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1294 break;
1295 case DID_ABORT:
1296 sp->flags &= ~SRB_ABORT_PENDING;
1297 sp->flags |= SRB_ABORTED;
1298 break;
1299 default:
1300 break;
1301 }
1302
1303
1304 scsi_dma_unmap(cmd);
1305
1306
1307 ha->actthreads--;
1308
1309 if (sp->wait == NULL)
1310 (*(cmd)->scsi_done)(cmd);
1311 else
1312 complete(sp->wait);
1313 }
1314 LEAVE("qla1280_done");
1315}
1316
1317
1318
1319
1320static int
1321qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1322{
1323 int host_status = DID_ERROR;
1324 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1325 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1326 uint32_t residual_length = le32_to_cpu(sts->residual_length);
1327 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1328#if DEBUG_QLA1280_INTR
1329 static char *reason[] = {
1330 "DID_OK",
1331 "DID_NO_CONNECT",
1332 "DID_BUS_BUSY",
1333 "DID_TIME_OUT",
1334 "DID_BAD_TARGET",
1335 "DID_ABORT",
1336 "DID_PARITY",
1337 "DID_ERROR",
1338 "DID_RESET",
1339 "DID_BAD_INTR"
1340 };
1341#endif
1342
1343 ENTER("qla1280_return_status");
1344
1345#if DEBUG_QLA1280_INTR
1346
1347
1348
1349
1350#endif
1351
1352 switch (comp_status) {
1353 case CS_COMPLETE:
1354 host_status = DID_OK;
1355 break;
1356
1357 case CS_INCOMPLETE:
1358 if (!(state_flags & SF_GOT_BUS))
1359 host_status = DID_NO_CONNECT;
1360 else if (!(state_flags & SF_GOT_TARGET))
1361 host_status = DID_BAD_TARGET;
1362 else if (!(state_flags & SF_SENT_CDB))
1363 host_status = DID_ERROR;
1364 else if (!(state_flags & SF_TRANSFERRED_DATA))
1365 host_status = DID_ERROR;
1366 else if (!(state_flags & SF_GOT_STATUS))
1367 host_status = DID_ERROR;
1368 else if (!(state_flags & SF_GOT_SENSE))
1369 host_status = DID_ERROR;
1370 break;
1371
1372 case CS_RESET:
1373 host_status = DID_RESET;
1374 break;
1375
1376 case CS_ABORTED:
1377 host_status = DID_ABORT;
1378 break;
1379
1380 case CS_TIMEOUT:
1381 host_status = DID_TIME_OUT;
1382 break;
1383
1384 case CS_DATA_OVERRUN:
1385 dprintk(2, "Data overrun 0x%x\n", residual_length);
1386 dprintk(2, "qla1280_return_status: response packet data\n");
1387 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1388 host_status = DID_ERROR;
1389 break;
1390
1391 case CS_DATA_UNDERRUN:
1392 if ((scsi_bufflen(cp) - residual_length) <
1393 cp->underflow) {
1394 printk(KERN_WARNING
1395 "scsi: Underflow detected - retrying "
1396 "command.\n");
1397 host_status = DID_ERROR;
1398 } else {
1399 scsi_set_resid(cp, residual_length);
1400 host_status = DID_OK;
1401 }
1402 break;
1403
1404 default:
1405 host_status = DID_ERROR;
1406 break;
1407 }
1408
1409#if DEBUG_QLA1280_INTR
1410 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1411 reason[host_status], scsi_status);
1412#endif
1413
1414 LEAVE("qla1280_return_status");
1415
1416 return (scsi_status & 0xff) | (host_status << 16);
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433static int
1434qla1280_initialize_adapter(struct scsi_qla_host *ha)
1435{
1436 struct device_reg __iomem *reg;
1437 int status;
1438 int bus;
1439 unsigned long flags;
1440
1441 ENTER("qla1280_initialize_adapter");
1442
1443
1444 ha->flags.online = 0;
1445 ha->flags.disable_host_adapter = 0;
1446 ha->flags.reset_active = 0;
1447 ha->flags.abort_isp_active = 0;
1448
1449#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1450 if (ia64_platform_is("sn2")) {
1451 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
1452 "dual channel lockup workaround\n", ha->host_no);
1453 ha->flags.use_pci_vchannel = 1;
1454 driver_setup.no_nvram = 1;
1455 }
1456#endif
1457
1458
1459 if (IS_ISP1040(ha))
1460 driver_setup.no_nvram = 1;
1461
1462 dprintk(1, "Configure PCI space for adapter...\n");
1463
1464 reg = ha->iobase;
1465
1466
1467 WRT_REG_WORD(®->semaphore, 0);
1468 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
1469 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT);
1470 RD_REG_WORD(®->host_cmd);
1471
1472 if (qla1280_read_nvram(ha)) {
1473 dprintk(2, "qla1280_initialize_adapter: failed to read "
1474 "NVRAM\n");
1475 }
1476
1477
1478
1479
1480
1481
1482 spin_lock_irqsave(ha->host->host_lock, flags);
1483
1484 status = qla1280_load_firmware(ha);
1485 if (status) {
1486 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1487 ha->host_no);
1488 goto out;
1489 }
1490
1491
1492 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1493 qla1280_nvram_config(ha);
1494
1495 if (ha->flags.disable_host_adapter) {
1496 status = 1;
1497 goto out;
1498 }
1499
1500 status = qla1280_init_rings(ha);
1501 if (status)
1502 goto out;
1503
1504
1505 for (bus = 0; bus < ha->ports; bus++) {
1506 if (!ha->bus_settings[bus].disable_scsi_reset &&
1507 qla1280_bus_reset(ha, bus) &&
1508 qla1280_bus_reset(ha, bus))
1509 ha->bus_settings[bus].scsi_bus_dead = 1;
1510 }
1511
1512 ha->flags.online = 1;
1513 out:
1514 spin_unlock_irqrestore(ha->host->host_lock, flags);
1515
1516 if (status)
1517 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1518
1519 LEAVE("qla1280_initialize_adapter");
1520 return status;
1521}
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535static const struct firmware *
1536qla1280_request_firmware(struct scsi_qla_host *ha)
1537{
1538 const struct firmware *fw;
1539 int err;
1540 int index;
1541 char *fwname;
1542
1543 spin_unlock_irq(ha->host->host_lock);
1544 mutex_lock(&qla1280_firmware_mutex);
1545
1546 index = ql1280_board_tbl[ha->devnum].fw_index;
1547 fw = qla1280_fw_tbl[index].fw;
1548 if (fw)
1549 goto out;
1550
1551 fwname = qla1280_fw_tbl[index].fwname;
1552 err = request_firmware(&fw, fwname, &ha->pdev->dev);
1553
1554 if (err) {
1555 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1556 fwname, err);
1557 fw = ERR_PTR(err);
1558 goto unlock;
1559 }
1560 if ((fw->size % 2) || (fw->size < 6)) {
1561 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
1562 fw->size, fwname);
1563 release_firmware(fw);
1564 fw = ERR_PTR(-EINVAL);
1565 goto unlock;
1566 }
1567
1568 qla1280_fw_tbl[index].fw = fw;
1569
1570 out:
1571 ha->fwver1 = fw->data[0];
1572 ha->fwver2 = fw->data[1];
1573 ha->fwver3 = fw->data[2];
1574 unlock:
1575 mutex_unlock(&qla1280_firmware_mutex);
1576 spin_lock_irq(ha->host->host_lock);
1577 return fw;
1578}
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590static int
1591qla1280_chip_diag(struct scsi_qla_host *ha)
1592{
1593 uint16_t mb[MAILBOX_REGISTER_COUNT];
1594 struct device_reg __iomem *reg = ha->iobase;
1595 int status = 0;
1596 int cnt;
1597 uint16_t data;
1598 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l);
1599
1600 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1601
1602
1603 WRT_REG_WORD(®->ictrl, ISP_RESET);
1604
1605
1606
1607
1608
1609
1610
1611
1612 udelay(20);
1613 data = qla1280_debounce_register(®->ictrl);
1614
1615
1616
1617 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1618 udelay(5);
1619 data = RD_REG_WORD(®->ictrl);
1620 }
1621
1622 if (!cnt)
1623 goto fail;
1624
1625
1626 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1627
1628 WRT_REG_WORD(®->cfg_1, 0);
1629
1630
1631
1632 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
1633 HC_RELEASE_RISC | HC_DISABLE_BIOS);
1634
1635 RD_REG_WORD(®->id_l);
1636 data = qla1280_debounce_register(®->mailbox0);
1637
1638
1639
1640
1641 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1642 udelay(5);
1643 data = RD_REG_WORD(®->mailbox0);
1644 }
1645
1646 if (!cnt)
1647 goto fail;
1648
1649
1650 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1651
1652 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
1653 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
1654 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
1655 RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
1656 RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
1657 printk(KERN_INFO "qla1280: Wrong product ID = "
1658 "0x%x,0x%x,0x%x,0x%x\n",
1659 RD_REG_WORD(®->mailbox1),
1660 RD_REG_WORD(®->mailbox2),
1661 RD_REG_WORD(®->mailbox3),
1662 RD_REG_WORD(®->mailbox4));
1663 goto fail;
1664 }
1665
1666
1667
1668
1669 qla1280_enable_intrs(ha);
1670
1671 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1672
1673 mb[0] = MBC_MAILBOX_REGISTER_TEST;
1674 mb[1] = 0xAAAA;
1675 mb[2] = 0x5555;
1676 mb[3] = 0xAA55;
1677 mb[4] = 0x55AA;
1678 mb[5] = 0xA5A5;
1679 mb[6] = 0x5A5A;
1680 mb[7] = 0x2525;
1681
1682 status = qla1280_mailbox_command(ha, 0xff, mb);
1683 if (status)
1684 goto fail;
1685
1686 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1687 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1688 mb[7] != 0x2525) {
1689 printk(KERN_INFO "qla1280: Failed mbox check\n");
1690 goto fail;
1691 }
1692
1693 dprintk(3, "qla1280_chip_diag: exiting normally\n");
1694 return 0;
1695 fail:
1696 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1697 return status;
1698}
1699
1700static int
1701qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1702{
1703
1704
1705 const struct firmware *fw;
1706 const __le16 *fw_data;
1707 uint16_t risc_address, risc_code_size;
1708 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1709 int err = 0;
1710
1711 fw = qla1280_request_firmware(ha);
1712 if (IS_ERR(fw))
1713 return PTR_ERR(fw);
1714
1715 fw_data = (const __le16 *)&fw->data[0];
1716 ha->fwstart = __le16_to_cpu(fw_data[2]);
1717
1718
1719 risc_address = ha->fwstart;
1720 fw_data = (const __le16 *)&fw->data[6];
1721 risc_code_size = (fw->size - 6) / 2;
1722
1723 for (i = 0; i < risc_code_size; i++) {
1724 mb[0] = MBC_WRITE_RAM_WORD;
1725 mb[1] = risc_address + i;
1726 mb[2] = __le16_to_cpu(fw_data[i]);
1727
1728 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1729 if (err) {
1730 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1731 ha->host_no);
1732 break;
1733 }
1734 }
1735
1736 return err;
1737}
1738
1739#define DUMP_IT_BACK 0
1740static int
1741qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1742{
1743
1744 const struct firmware *fw;
1745 const __le16 *fw_data;
1746 uint16_t risc_address, risc_code_size;
1747 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1748 int err = 0, num, i;
1749#if DUMP_IT_BACK
1750 uint8_t *sp, *tbuf;
1751 dma_addr_t p_tbuf;
1752
1753 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
1754 if (!tbuf)
1755 return -ENOMEM;
1756#endif
1757
1758 fw = qla1280_request_firmware(ha);
1759 if (IS_ERR(fw))
1760 return PTR_ERR(fw);
1761
1762 fw_data = (const __le16 *)&fw->data[0];
1763 ha->fwstart = __le16_to_cpu(fw_data[2]);
1764
1765
1766 risc_address = ha->fwstart;
1767 fw_data = (const __le16 *)&fw->data[6];
1768 risc_code_size = (fw->size - 6) / 2;
1769
1770 dprintk(1, "%s: DMA RISC code (%i) words\n",
1771 __func__, risc_code_size);
1772
1773 num = 0;
1774 while (risc_code_size > 0) {
1775 int warn __attribute__((unused)) = 0;
1776
1777 cnt = 2000 >> 1;
1778
1779 if (cnt > risc_code_size)
1780 cnt = risc_code_size;
1781
1782 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1783 "%d,%d(0x%x)\n",
1784 fw_data, cnt, num, risc_address);
1785 for(i = 0; i < cnt; i++)
1786 ((__le16 *)ha->request_ring)[i] = fw_data[i];
1787
1788 mb[0] = MBC_LOAD_RAM;
1789 mb[1] = risc_address;
1790 mb[4] = cnt;
1791 mb[3] = ha->request_dma & 0xffff;
1792 mb[2] = (ha->request_dma >> 16) & 0xffff;
1793 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1794 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1795 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1796 __func__, mb[0],
1797 (void *)(long)ha->request_dma,
1798 mb[6], mb[7], mb[2], mb[3]);
1799 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1800 BIT_1 | BIT_0, mb);
1801 if (err) {
1802 printk(KERN_ERR "scsi(%li): Failed to load partial "
1803 "segment of f\n", ha->host_no);
1804 goto out;
1805 }
1806
1807#if DUMP_IT_BACK
1808 mb[0] = MBC_DUMP_RAM;
1809 mb[1] = risc_address;
1810 mb[4] = cnt;
1811 mb[3] = p_tbuf & 0xffff;
1812 mb[2] = (p_tbuf >> 16) & 0xffff;
1813 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
1814 mb[6] = pci_dma_hi32(p_tbuf) >> 16;
1815
1816 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1817 BIT_1 | BIT_0, mb);
1818 if (err) {
1819 printk(KERN_ERR
1820 "Failed to dump partial segment of f/w\n");
1821 goto out;
1822 }
1823 sp = (uint8_t *)ha->request_ring;
1824 for (i = 0; i < (cnt << 1); i++) {
1825 if (tbuf[i] != sp[i] && warn++ < 10) {
1826 printk(KERN_ERR "%s: FW compare error @ "
1827 "byte(0x%x) loop#=%x\n",
1828 __func__, i, num);
1829 printk(KERN_ERR "%s: FWbyte=%x "
1830 "FWfromChip=%x\n",
1831 __func__, sp[i], tbuf[i]);
1832
1833 }
1834 }
1835#endif
1836 risc_address += cnt;
1837 risc_code_size = risc_code_size - cnt;
1838 fw_data = fw_data + cnt;
1839 num++;
1840 }
1841
1842 out:
1843#if DUMP_IT_BACK
1844 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
1845#endif
1846 return err;
1847}
1848
1849static int
1850qla1280_start_firmware(struct scsi_qla_host *ha)
1851{
1852 uint16_t mb[MAILBOX_REGISTER_COUNT];
1853 int err;
1854
1855 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1856 __func__);
1857
1858
1859 mb[0] = MBC_VERIFY_CHECKSUM;
1860
1861 mb[1] = ha->fwstart;
1862 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1863 if (err) {
1864 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1865 return err;
1866 }
1867
1868
1869 dprintk(1, "%s: start firmware running.\n", __func__);
1870 mb[0] = MBC_EXECUTE_FIRMWARE;
1871 mb[1] = ha->fwstart;
1872 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1873 if (err) {
1874 printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1875 ha->host_no);
1876 }
1877
1878 return err;
1879}
1880
1881static int
1882qla1280_load_firmware(struct scsi_qla_host *ha)
1883{
1884
1885 int err;
1886
1887 err = qla1280_chip_diag(ha);
1888 if (err)
1889 goto out;
1890 if (IS_ISP1040(ha))
1891 err = qla1280_load_firmware_pio(ha);
1892 else
1893 err = qla1280_load_firmware_dma(ha);
1894 if (err)
1895 goto out;
1896 err = qla1280_start_firmware(ha);
1897 out:
1898 return err;
1899}
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914static int
1915qla1280_init_rings(struct scsi_qla_host *ha)
1916{
1917 uint16_t mb[MAILBOX_REGISTER_COUNT];
1918 int status = 0;
1919
1920 ENTER("qla1280_init_rings");
1921
1922
1923 memset(ha->outstanding_cmds, 0,
1924 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1925
1926
1927 ha->request_ring_ptr = ha->request_ring;
1928 ha->req_ring_index = 0;
1929 ha->req_q_cnt = REQUEST_ENTRY_CNT;
1930
1931 mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1932 mb[1] = REQUEST_ENTRY_CNT;
1933 mb[3] = ha->request_dma & 0xffff;
1934 mb[2] = (ha->request_dma >> 16) & 0xffff;
1935 mb[4] = 0;
1936 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1937 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1938 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1939 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1940 &mb[0]))) {
1941
1942 ha->response_ring_ptr = ha->response_ring;
1943 ha->rsp_ring_index = 0;
1944
1945 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1946 mb[1] = RESPONSE_ENTRY_CNT;
1947 mb[3] = ha->response_dma & 0xffff;
1948 mb[2] = (ha->response_dma >> 16) & 0xffff;
1949 mb[5] = 0;
1950 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
1951 mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
1952 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1953 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1954 &mb[0]);
1955 }
1956
1957 if (status)
1958 dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1959
1960 LEAVE("qla1280_init_rings");
1961 return status;
1962}
1963
1964static void
1965qla1280_print_settings(struct nvram *nv)
1966{
1967 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1968 nv->bus[0].config_1.initiator_id);
1969 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1970 nv->bus[1].config_1.initiator_id);
1971
1972 dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1973 nv->bus[0].bus_reset_delay);
1974 dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1975 nv->bus[1].bus_reset_delay);
1976
1977 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1978 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1979 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1980 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1981
1982 dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1983 nv->bus[0].config_2.async_data_setup_time);
1984 dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1985 nv->bus[1].config_2.async_data_setup_time);
1986
1987 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1988 nv->bus[0].config_2.req_ack_active_negation);
1989 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1990 nv->bus[1].config_2.req_ack_active_negation);
1991
1992 dprintk(1, "qla1280 : data line active negation[0]=%d\n",
1993 nv->bus[0].config_2.data_line_active_negation);
1994 dprintk(1, "qla1280 : data line active negation[1]=%d\n",
1995 nv->bus[1].config_2.data_line_active_negation);
1996
1997 dprintk(1, "qla1280 : disable loading risc code=%d\n",
1998 nv->cntr_flags_1.disable_loading_risc_code);
1999
2000 dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
2001 nv->cntr_flags_1.enable_64bit_addressing);
2002
2003 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
2004 nv->bus[0].selection_timeout);
2005 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
2006 nv->bus[1].selection_timeout);
2007
2008 dprintk(1, "qla1280 : max queue depth[0]=%d\n",
2009 nv->bus[0].max_queue_depth);
2010 dprintk(1, "qla1280 : max queue depth[1]=%d\n",
2011 nv->bus[1].max_queue_depth);
2012}
2013
2014static void
2015qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
2016{
2017 struct nvram *nv = &ha->nvram;
2018
2019 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
2020 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
2021 nv->bus[bus].target[target].parameter.tag_queuing = 1;
2022 nv->bus[bus].target[target].parameter.enable_sync = 1;
2023#if 1
2024 nv->bus[bus].target[target].parameter.enable_wide = 1;
2025#endif
2026 nv->bus[bus].target[target].execution_throttle =
2027 nv->bus[bus].max_queue_depth - 1;
2028 nv->bus[bus].target[target].parameter.parity_checking = 1;
2029 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
2030
2031 if (IS_ISP1x160(ha)) {
2032 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
2033 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
2034 nv->bus[bus].target[target].sync_period = 9;
2035 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
2036 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
2037 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
2038 } else {
2039 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
2040 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
2041 nv->bus[bus].target[target].sync_period = 10;
2042 }
2043}
2044
2045static void
2046qla1280_set_defaults(struct scsi_qla_host *ha)
2047{
2048 struct nvram *nv = &ha->nvram;
2049 int bus, target;
2050
2051 dprintk(1, "Using defaults for NVRAM: \n");
2052 memset(nv, 0, sizeof(struct nvram));
2053
2054
2055 nv->firmware_feature.f.enable_fast_posting = 1;
2056 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2057 nv->termination.scsi_bus_0_control = 3;
2058 nv->termination.scsi_bus_1_control = 3;
2059 nv->termination.auto_term_support = 1;
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069 nv->isp_config.burst_enable = 1;
2070 if (IS_ISP1040(ha))
2071 nv->isp_config.fifo_threshold |= 3;
2072 else
2073 nv->isp_config.fifo_threshold |= 4;
2074
2075 if (IS_ISP1x160(ha))
2076 nv->isp_parameter = 0x01;
2077
2078 for (bus = 0; bus < MAX_BUSES; bus++) {
2079 nv->bus[bus].config_1.initiator_id = 7;
2080 nv->bus[bus].config_2.req_ack_active_negation = 1;
2081 nv->bus[bus].config_2.data_line_active_negation = 1;
2082 nv->bus[bus].selection_timeout = 250;
2083 nv->bus[bus].max_queue_depth = 32;
2084
2085 if (IS_ISP1040(ha)) {
2086 nv->bus[bus].bus_reset_delay = 3;
2087 nv->bus[bus].config_2.async_data_setup_time = 6;
2088 nv->bus[bus].retry_delay = 1;
2089 } else {
2090 nv->bus[bus].bus_reset_delay = 5;
2091 nv->bus[bus].config_2.async_data_setup_time = 8;
2092 }
2093
2094 for (target = 0; target < MAX_TARGETS; target++)
2095 qla1280_set_target_defaults(ha, bus, target);
2096 }
2097}
2098
2099static int
2100qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2101{
2102 struct nvram *nv = &ha->nvram;
2103 uint16_t mb[MAILBOX_REGISTER_COUNT];
2104 int status, lun;
2105 uint16_t flag;
2106
2107
2108 mb[0] = MBC_SET_TARGET_PARAMETERS;
2109 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2110
2111
2112
2113
2114
2115
2116 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2117 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2118
2119 if (IS_ISP1x160(ha))
2120 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2121 else
2122 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2123 mb[3] |= nv->bus[bus].target[target].sync_period;
2124 status = qla1280_mailbox_command(ha, 0x0f, mb);
2125
2126
2127 flag = (BIT_0 << target);
2128 if (nv->bus[bus].target[target].parameter.tag_queuing)
2129 ha->bus_settings[bus].qtag_enables |= flag;
2130
2131
2132 if (IS_ISP1x160(ha)) {
2133 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2134 ha->bus_settings[bus].device_enables |= flag;
2135 ha->bus_settings[bus].lun_disables |= 0;
2136 } else {
2137 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2138 ha->bus_settings[bus].device_enables |= flag;
2139
2140 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2141 ha->bus_settings[bus].lun_disables |= flag;
2142 }
2143
2144
2145 for (lun = 0; lun < MAX_LUNS; lun++) {
2146 mb[0] = MBC_SET_DEVICE_QUEUE;
2147 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2148 mb[1] |= lun;
2149 mb[2] = nv->bus[bus].max_queue_depth;
2150 mb[3] = nv->bus[bus].target[target].execution_throttle;
2151 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2152 }
2153
2154 return status;
2155}
2156
2157static int
2158qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2159{
2160 struct nvram *nv = &ha->nvram;
2161 uint16_t mb[MAILBOX_REGISTER_COUNT];
2162 int target, status;
2163
2164
2165 ha->bus_settings[bus].disable_scsi_reset =
2166 nv->bus[bus].config_1.scsi_reset_disable;
2167
2168
2169 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2170 mb[0] = MBC_SET_INITIATOR_ID;
2171 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2172 ha->bus_settings[bus].id;
2173 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2174
2175
2176 ha->bus_settings[bus].bus_reset_delay =
2177 nv->bus[bus].bus_reset_delay;
2178
2179
2180 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2181
2182
2183 for (target = 0; target < MAX_TARGETS; target++)
2184 status |= qla1280_config_target(ha, bus, target);
2185
2186 return status;
2187}
2188
2189static int
2190qla1280_nvram_config(struct scsi_qla_host *ha)
2191{
2192 struct device_reg __iomem *reg = ha->iobase;
2193 struct nvram *nv = &ha->nvram;
2194 int bus, target, status = 0;
2195 uint16_t mb[MAILBOX_REGISTER_COUNT];
2196
2197 ENTER("qla1280_nvram_config");
2198
2199 if (ha->nvram_valid) {
2200
2201 for (bus = 0; bus < MAX_BUSES; bus++)
2202 for (target = 0; target < MAX_TARGETS; target++) {
2203 nv->bus[bus].target[target].parameter.
2204 auto_request_sense = 1;
2205 }
2206 } else {
2207 qla1280_set_defaults(ha);
2208 }
2209
2210 qla1280_print_settings(nv);
2211
2212
2213 ha->flags.disable_risc_code_load =
2214 nv->cntr_flags_1.disable_loading_risc_code;
2215
2216 if (IS_ISP1040(ha)) {
2217 uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
2218
2219 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
2220
2221 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2222 cdma_conf = RD_REG_WORD(®->cdma_cfg);
2223 ddma_conf = RD_REG_WORD(®->ddma_cfg);
2224
2225
2226 if (hwrev != ISP_CFG0_1040A)
2227 cfg1 |= nv->isp_config.fifo_threshold << 4;
2228
2229 cfg1 |= nv->isp_config.burst_enable << 2;
2230 WRT_REG_WORD(®->cfg_1, cfg1);
2231
2232 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2233 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2234 } else {
2235 uint16_t cfg1, term;
2236
2237
2238 cfg1 = nv->isp_config.fifo_threshold << 4;
2239 cfg1 |= nv->isp_config.burst_enable << 2;
2240
2241 if (ha->ports > 1)
2242 cfg1 |= BIT_13;
2243 WRT_REG_WORD(®->cfg_1, cfg1);
2244
2245
2246 WRT_REG_WORD(®->gpio_enable,
2247 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2248 term = nv->termination.scsi_bus_1_control;
2249 term |= nv->termination.scsi_bus_0_control << 2;
2250 term |= nv->termination.auto_term_support << 7;
2251 RD_REG_WORD(®->id_l);
2252 WRT_REG_WORD(®->gpio_data, term);
2253 }
2254 RD_REG_WORD(®->id_l);
2255
2256
2257 mb[0] = MBC_SET_SYSTEM_PARAMETER;
2258 mb[1] = nv->isp_parameter;
2259 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2260
2261 if (IS_ISP1x40(ha)) {
2262
2263 mb[0] = MBC_SET_CLOCK_RATE;
2264 mb[1] = 40;
2265 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2266 }
2267
2268
2269 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2270 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2271 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2272 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2273#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
2274 if (ia64_platform_is("sn2")) {
2275 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
2276 "workaround\n", ha->host_no);
2277 mb[1] |= nv->firmware_feature.f.unused_9 << 9;
2278 }
2279#endif
2280 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2281
2282
2283 mb[0] = MBC_SET_RETRY_COUNT;
2284 mb[1] = nv->bus[0].retry_count;
2285 mb[2] = nv->bus[0].retry_delay;
2286 mb[6] = nv->bus[1].retry_count;
2287 mb[7] = nv->bus[1].retry_delay;
2288 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2289 BIT_1 | BIT_0, &mb[0]);
2290
2291
2292 mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2293 mb[1] = nv->bus[0].config_2.async_data_setup_time;
2294 mb[2] = nv->bus[1].config_2.async_data_setup_time;
2295 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2296
2297
2298 mb[0] = MBC_SET_ACTIVE_NEGATION;
2299 mb[1] = 0;
2300 if (nv->bus[0].config_2.req_ack_active_negation)
2301 mb[1] |= BIT_5;
2302 if (nv->bus[0].config_2.data_line_active_negation)
2303 mb[1] |= BIT_4;
2304 mb[2] = 0;
2305 if (nv->bus[1].config_2.req_ack_active_negation)
2306 mb[2] |= BIT_5;
2307 if (nv->bus[1].config_2.data_line_active_negation)
2308 mb[2] |= BIT_4;
2309 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2310
2311 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2312 mb[1] = 2;
2313 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2314
2315
2316 mb[0] = MBC_SET_PCI_CONTROL;
2317 mb[1] = BIT_1;
2318 mb[2] = BIT_1;
2319 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2320
2321 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2322 mb[1] = 8;
2323 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2324
2325
2326 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2327 mb[1] = nv->bus[0].selection_timeout;
2328 mb[2] = nv->bus[1].selection_timeout;
2329 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2330
2331 for (bus = 0; bus < ha->ports; bus++)
2332 status |= qla1280_config_bus(ha, bus);
2333
2334 if (status)
2335 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2336
2337 LEAVE("qla1280_nvram_config");
2338 return status;
2339}
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353static uint16_t
2354qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2355{
2356 uint32_t nv_cmd;
2357 uint16_t data;
2358
2359 nv_cmd = address << 16;
2360 nv_cmd |= NV_READ_OP;
2361
2362 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2363
2364 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2365 "0x%x", data);
2366
2367 return data;
2368}
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384static uint16_t
2385qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2386{
2387 struct device_reg __iomem *reg = ha->iobase;
2388 int cnt;
2389 uint16_t data = 0;
2390 uint16_t reg_data;
2391
2392
2393
2394 nv_cmd <<= 5;
2395 for (cnt = 0; cnt < 11; cnt++) {
2396 if (nv_cmd & BIT_31)
2397 qla1280_nv_write(ha, NV_DATA_OUT);
2398 else
2399 qla1280_nv_write(ha, 0);
2400 nv_cmd <<= 1;
2401 }
2402
2403
2404
2405 for (cnt = 0; cnt < 16; cnt++) {
2406 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK));
2407 RD_REG_WORD(®->id_l);
2408 NVRAM_DELAY();
2409 data <<= 1;
2410 reg_data = RD_REG_WORD(®->nvram);
2411 if (reg_data & NV_DATA_IN)
2412 data |= BIT_0;
2413 WRT_REG_WORD(®->nvram, NV_SELECT);
2414 RD_REG_WORD(®->id_l);
2415 NVRAM_DELAY();
2416 }
2417
2418
2419
2420 WRT_REG_WORD(®->nvram, NV_DESELECT);
2421 RD_REG_WORD(®->id_l);
2422 NVRAM_DELAY();
2423
2424 return data;
2425}
2426
2427static void
2428qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2429{
2430 struct device_reg __iomem *reg = ha->iobase;
2431
2432 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2433 RD_REG_WORD(®->id_l);
2434 NVRAM_DELAY();
2435 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK);
2436 RD_REG_WORD(®->id_l);
2437 NVRAM_DELAY();
2438 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2439 RD_REG_WORD(®->id_l);
2440 NVRAM_DELAY();
2441}
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458static int
2459qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2460{
2461 struct device_reg __iomem *reg = ha->iobase;
2462 int status = 0;
2463 int cnt;
2464 uint16_t *optr, *iptr;
2465 uint16_t __iomem *mptr;
2466 uint16_t data;
2467 DECLARE_COMPLETION_ONSTACK(wait);
2468
2469 ENTER("qla1280_mailbox_command");
2470
2471 if (ha->mailbox_wait) {
2472 printk(KERN_ERR "Warning mailbox wait already in use!\n");
2473 }
2474 ha->mailbox_wait = &wait;
2475
2476
2477
2478
2479
2480
2481 mptr = (uint16_t __iomem *) ®->mailbox0;
2482 iptr = mb;
2483 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2484 if (mr & BIT_0) {
2485 WRT_REG_WORD(mptr, (*iptr));
2486 }
2487
2488 mr >>= 1;
2489 mptr++;
2490 iptr++;
2491 }
2492
2493
2494
2495
2496 timer_setup(&ha->mailbox_timer, qla1280_mailbox_timeout, 0);
2497 mod_timer(&ha->mailbox_timer, jiffies + 20 * HZ);
2498
2499 spin_unlock_irq(ha->host->host_lock);
2500 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
2501 data = qla1280_debounce_register(®->istatus);
2502
2503 wait_for_completion(&wait);
2504 del_timer_sync(&ha->mailbox_timer);
2505
2506 spin_lock_irq(ha->host->host_lock);
2507
2508 ha->mailbox_wait = NULL;
2509
2510
2511 if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2512 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2513 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2514 "0x%04x\n",
2515 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus));
2516 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2517 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1),
2518 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3));
2519 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2520 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5),
2521 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7));
2522 status = 1;
2523 }
2524
2525
2526 optr = mb;
2527 iptr = (uint16_t *) &ha->mailbox_out[0];
2528 mr = MAILBOX_REGISTER_COUNT;
2529 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2530
2531 if (ha->flags.reset_marker)
2532 qla1280_rst_aen(ha);
2533
2534 if (status)
2535 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2536 "0x%x ****\n", mb[0]);
2537
2538 LEAVE("qla1280_mailbox_command");
2539 return status;
2540}
2541
2542
2543
2544
2545
2546
2547
2548
2549static void
2550qla1280_poll(struct scsi_qla_host *ha)
2551{
2552 struct device_reg __iomem *reg = ha->iobase;
2553 uint16_t data;
2554 LIST_HEAD(done_q);
2555
2556
2557
2558
2559 data = RD_REG_WORD(®->istatus);
2560 if (data & RISC_INT)
2561 qla1280_isr(ha, &done_q);
2562
2563 if (!ha->mailbox_wait) {
2564 if (ha->flags.reset_marker)
2565 qla1280_rst_aen(ha);
2566 }
2567
2568 if (!list_empty(&done_q))
2569 qla1280_done(ha);
2570
2571
2572}
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585static int
2586qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2587{
2588 uint16_t mb[MAILBOX_REGISTER_COUNT];
2589 uint16_t reset_delay;
2590 int status;
2591
2592 dprintk(3, "qla1280_bus_reset: entered\n");
2593
2594 if (qla1280_verbose)
2595 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2596 ha->host_no, bus);
2597
2598 reset_delay = ha->bus_settings[bus].bus_reset_delay;
2599 mb[0] = MBC_BUS_RESET;
2600 mb[1] = reset_delay;
2601 mb[2] = (uint16_t) bus;
2602 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2603
2604 if (status) {
2605 if (ha->bus_settings[bus].failed_reset_count > 2)
2606 ha->bus_settings[bus].scsi_bus_dead = 1;
2607 ha->bus_settings[bus].failed_reset_count++;
2608 } else {
2609 spin_unlock_irq(ha->host->host_lock);
2610 ssleep(reset_delay);
2611 spin_lock_irq(ha->host->host_lock);
2612
2613 ha->bus_settings[bus].scsi_bus_dead = 0;
2614 ha->bus_settings[bus].failed_reset_count = 0;
2615 ha->bus_settings[bus].reset_marker = 0;
2616
2617 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2618 }
2619
2620
2621
2622
2623
2624
2625 if (status)
2626 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2627 else
2628 dprintk(3, "qla1280_bus_reset: exiting normally\n");
2629
2630 return status;
2631}
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645static int
2646qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2647{
2648 uint16_t mb[MAILBOX_REGISTER_COUNT];
2649 int status;
2650
2651 ENTER("qla1280_device_reset");
2652
2653 mb[0] = MBC_ABORT_TARGET;
2654 mb[1] = (bus ? (target | BIT_7) : target) << 8;
2655 mb[2] = 1;
2656 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2657
2658
2659 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2660
2661 if (status)
2662 dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2663
2664 LEAVE("qla1280_device_reset");
2665 return status;
2666}
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679static int
2680qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2681{
2682 uint16_t mb[MAILBOX_REGISTER_COUNT];
2683 unsigned int bus, target, lun;
2684 int status;
2685
2686 ENTER("qla1280_abort_command");
2687
2688 bus = SCSI_BUS_32(sp->cmd);
2689 target = SCSI_TCN_32(sp->cmd);
2690 lun = SCSI_LUN_32(sp->cmd);
2691
2692 sp->flags |= SRB_ABORT_PENDING;
2693
2694 mb[0] = MBC_ABORT_COMMAND;
2695 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2696 mb[2] = handle >> 16;
2697 mb[3] = handle & 0xffff;
2698 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2699
2700 if (status) {
2701 dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2702 sp->flags &= ~SRB_ABORT_PENDING;
2703 }
2704
2705
2706 LEAVE("qla1280_abort_command");
2707 return status;
2708}
2709
2710
2711
2712
2713
2714
2715
2716
2717static void
2718qla1280_reset_adapter(struct scsi_qla_host *ha)
2719{
2720 struct device_reg __iomem *reg = ha->iobase;
2721
2722 ENTER("qla1280_reset_adapter");
2723
2724
2725 ha->flags.online = 0;
2726 WRT_REG_WORD(®->ictrl, ISP_RESET);
2727 WRT_REG_WORD(®->host_cmd,
2728 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2729 RD_REG_WORD(®->id_l);
2730
2731 LEAVE("qla1280_reset_adapter");
2732}
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745static void
2746qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2747{
2748 struct mrk_entry *pkt;
2749
2750 ENTER("qla1280_marker");
2751
2752
2753 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2754 pkt->entry_type = MARKER_TYPE;
2755 pkt->lun = (uint8_t) lun;
2756 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2757 pkt->modifier = type;
2758 pkt->entry_status = 0;
2759
2760
2761 qla1280_isp_cmd(ha);
2762 }
2763
2764 LEAVE("qla1280_marker");
2765}
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780#ifdef QLA_64BIT_PTR
2781static int
2782qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2783{
2784 struct device_reg __iomem *reg = ha->iobase;
2785 struct scsi_cmnd *cmd = sp->cmd;
2786 cmd_a64_entry_t *pkt;
2787 __le32 *dword_ptr;
2788 dma_addr_t dma_handle;
2789 int status = 0;
2790 int cnt;
2791 int req_cnt;
2792 int seg_cnt;
2793 u8 dir;
2794
2795 ENTER("qla1280_64bit_start_scsi:");
2796
2797
2798 req_cnt = 1;
2799 seg_cnt = scsi_dma_map(cmd);
2800 if (seg_cnt > 0) {
2801 if (seg_cnt > 2) {
2802 req_cnt += (seg_cnt - 2) / 5;
2803 if ((seg_cnt - 2) % 5)
2804 req_cnt++;
2805 }
2806 } else if (seg_cnt < 0) {
2807 status = 1;
2808 goto out;
2809 }
2810
2811 if ((req_cnt + 2) >= ha->req_q_cnt) {
2812
2813 cnt = RD_REG_WORD(®->mailbox4);
2814 if (ha->req_ring_index < cnt)
2815 ha->req_q_cnt = cnt - ha->req_ring_index;
2816 else
2817 ha->req_q_cnt =
2818 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2819 }
2820
2821 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2822 ha->req_q_cnt, seg_cnt);
2823
2824
2825 if ((req_cnt + 2) >= ha->req_q_cnt) {
2826 status = SCSI_MLQUEUE_HOST_BUSY;
2827 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2828 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2829 req_cnt);
2830 goto out;
2831 }
2832
2833
2834 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2835 ha->outstanding_cmds[cnt] != NULL; cnt++);
2836
2837 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2838 status = SCSI_MLQUEUE_HOST_BUSY;
2839 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2840 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2841 goto out;
2842 }
2843
2844 ha->outstanding_cmds[cnt] = sp;
2845 ha->req_q_cnt -= req_cnt;
2846 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2847
2848 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2849 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2850 dprintk(2, " bus %i, target %i, lun %i\n",
2851 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2852 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2853
2854
2855
2856
2857 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2858
2859 pkt->entry_type = COMMAND_A64_TYPE;
2860 pkt->entry_count = (uint8_t) req_cnt;
2861 pkt->sys_define = (uint8_t) ha->req_ring_index;
2862 pkt->entry_status = 0;
2863 pkt->handle = cpu_to_le32(cnt);
2864
2865
2866 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2867
2868
2869 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2870
2871
2872 pkt->lun = SCSI_LUN_32(cmd);
2873 pkt->target = SCSI_BUS_32(cmd) ?
2874 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2875
2876
2877 if (cmd->device->simple_tags)
2878 pkt->control_flags |= cpu_to_le16(BIT_3);
2879
2880
2881 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2882 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2883
2884
2885
2886 dir = qla1280_data_direction(cmd);
2887 pkt->control_flags |= cpu_to_le16(dir);
2888
2889
2890 pkt->dseg_count = cpu_to_le16(seg_cnt);
2891
2892
2893
2894
2895 if (seg_cnt) {
2896 struct scatterlist *sg, *s;
2897 int remseg = seg_cnt;
2898
2899 sg = scsi_sglist(cmd);
2900
2901
2902 dword_ptr = (u32 *)&pkt->dseg_0_address;
2903
2904
2905 for_each_sg(sg, s, seg_cnt, cnt) {
2906 if (cnt == 2)
2907 break;
2908
2909 dma_handle = sg_dma_address(s);
2910#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2911 if (ha->flags.use_pci_vchannel)
2912 sn_pci_set_vchan(ha->pdev,
2913 (unsigned long *)&dma_handle,
2914 SCSI_BUS_32(cmd));
2915#endif
2916 *dword_ptr++ =
2917 cpu_to_le32(pci_dma_lo32(dma_handle));
2918 *dword_ptr++ =
2919 cpu_to_le32(pci_dma_hi32(dma_handle));
2920 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2921 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2922 cpu_to_le32(pci_dma_hi32(dma_handle)),
2923 cpu_to_le32(pci_dma_lo32(dma_handle)),
2924 cpu_to_le32(sg_dma_len(sg_next(s))));
2925 remseg--;
2926 }
2927 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2928 "command packet data - b %i, t %i, l %i \n",
2929 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2930 SCSI_LUN_32(cmd));
2931 qla1280_dump_buffer(5, (char *)pkt,
2932 REQUEST_ENTRY_SIZE);
2933
2934
2935
2936
2937 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2938 "remains\n", seg_cnt);
2939
2940 while (remseg > 0) {
2941
2942 sg = s;
2943
2944 ha->req_ring_index++;
2945 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2946 ha->req_ring_index = 0;
2947 ha->request_ring_ptr =
2948 ha->request_ring;
2949 } else
2950 ha->request_ring_ptr++;
2951
2952 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2953
2954
2955 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2956
2957
2958 ((struct cont_a64_entry *) pkt)->entry_type =
2959 CONTINUE_A64_TYPE;
2960 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2961 ((struct cont_a64_entry *) pkt)->sys_define =
2962 (uint8_t)ha->req_ring_index;
2963
2964 dword_ptr =
2965 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2966
2967
2968 for_each_sg(sg, s, remseg, cnt) {
2969 if (cnt == 5)
2970 break;
2971 dma_handle = sg_dma_address(s);
2972#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2973 if (ha->flags.use_pci_vchannel)
2974 sn_pci_set_vchan(ha->pdev,
2975 (unsigned long *)&dma_handle,
2976 SCSI_BUS_32(cmd));
2977#endif
2978 *dword_ptr++ =
2979 cpu_to_le32(pci_dma_lo32(dma_handle));
2980 *dword_ptr++ =
2981 cpu_to_le32(pci_dma_hi32(dma_handle));
2982 *dword_ptr++ =
2983 cpu_to_le32(sg_dma_len(s));
2984 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2985 cpu_to_le32(pci_dma_hi32(dma_handle)),
2986 cpu_to_le32(pci_dma_lo32(dma_handle)),
2987 cpu_to_le32(sg_dma_len(s)));
2988 }
2989 remseg -= cnt;
2990 dprintk(5, "qla1280_64bit_start_scsi: "
2991 "continuation packet data - b %i, t "
2992 "%i, l %i \n", SCSI_BUS_32(cmd),
2993 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2994 qla1280_dump_buffer(5, (char *)pkt,
2995 REQUEST_ENTRY_SIZE);
2996 }
2997 } else {
2998 dprintk(5, "qla1280_64bit_start_scsi: No data, command "
2999 "packet data - b %i, t %i, l %i \n",
3000 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3001 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3002 }
3003
3004 ha->req_ring_index++;
3005 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3006 ha->req_ring_index = 0;
3007 ha->request_ring_ptr = ha->request_ring;
3008 } else
3009 ha->request_ring_ptr++;
3010
3011
3012 dprintk(2,
3013 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
3014 sp->flags |= SRB_SENT;
3015 ha->actthreads++;
3016 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3017
3018 out:
3019 if (status)
3020 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
3021 else
3022 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
3023
3024 return status;
3025}
3026#else
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047static int
3048qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3049{
3050 struct device_reg __iomem *reg = ha->iobase;
3051 struct scsi_cmnd *cmd = sp->cmd;
3052 struct cmd_entry *pkt;
3053 __le32 *dword_ptr;
3054 int status = 0;
3055 int cnt;
3056 int req_cnt;
3057 int seg_cnt;
3058 u8 dir;
3059
3060 ENTER("qla1280_32bit_start_scsi");
3061
3062 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3063 cmd->cmnd[0]);
3064
3065
3066 req_cnt = 1;
3067 seg_cnt = scsi_dma_map(cmd);
3068 if (seg_cnt) {
3069
3070
3071
3072
3073 if (seg_cnt > 4) {
3074 req_cnt += (seg_cnt - 4) / 7;
3075 if ((seg_cnt - 4) % 7)
3076 req_cnt++;
3077 }
3078 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3079 cmd, seg_cnt, req_cnt);
3080 } else if (seg_cnt < 0) {
3081 status = 1;
3082 goto out;
3083 }
3084
3085 if ((req_cnt + 2) >= ha->req_q_cnt) {
3086
3087 cnt = RD_REG_WORD(®->mailbox4);
3088 if (ha->req_ring_index < cnt)
3089 ha->req_q_cnt = cnt - ha->req_ring_index;
3090 else
3091 ha->req_q_cnt =
3092 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3093 }
3094
3095 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3096 ha->req_q_cnt, seg_cnt);
3097
3098 if ((req_cnt + 2) >= ha->req_q_cnt) {
3099 status = SCSI_MLQUEUE_HOST_BUSY;
3100 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3101 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3102 ha->req_q_cnt, req_cnt);
3103 goto out;
3104 }
3105
3106
3107 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3108 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3109
3110 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3111 status = SCSI_MLQUEUE_HOST_BUSY;
3112 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3113 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3114 goto out;
3115 }
3116
3117 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3118 ha->outstanding_cmds[cnt] = sp;
3119 ha->req_q_cnt -= req_cnt;
3120
3121
3122
3123
3124 pkt = (struct cmd_entry *) ha->request_ring_ptr;
3125
3126 pkt->entry_type = COMMAND_TYPE;
3127 pkt->entry_count = (uint8_t) req_cnt;
3128 pkt->sys_define = (uint8_t) ha->req_ring_index;
3129 pkt->entry_status = 0;
3130 pkt->handle = cpu_to_le32(cnt);
3131
3132
3133 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3134
3135
3136 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3137
3138
3139 pkt->lun = SCSI_LUN_32(cmd);
3140 pkt->target = SCSI_BUS_32(cmd) ?
3141 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3142
3143
3144 if (cmd->device->simple_tags)
3145 pkt->control_flags |= cpu_to_le16(BIT_3);
3146
3147
3148 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3149 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3150
3151
3152
3153 dir = qla1280_data_direction(cmd);
3154 pkt->control_flags |= cpu_to_le16(dir);
3155
3156
3157 pkt->dseg_count = cpu_to_le16(seg_cnt);
3158
3159
3160
3161
3162 if (seg_cnt) {
3163 struct scatterlist *sg, *s;
3164 int remseg = seg_cnt;
3165
3166 sg = scsi_sglist(cmd);
3167
3168
3169 dword_ptr = &pkt->dseg_0_address;
3170
3171 dprintk(3, "Building S/G data segments..\n");
3172 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3173
3174
3175 for_each_sg(sg, s, seg_cnt, cnt) {
3176 if (cnt == 4)
3177 break;
3178 *dword_ptr++ =
3179 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3180 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3181 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3182 (pci_dma_lo32(sg_dma_address(s))),
3183 (sg_dma_len(s)));
3184 remseg--;
3185 }
3186
3187
3188
3189 dprintk(3, "S/G Building Continuation"
3190 "...seg_cnt=0x%x remains\n", seg_cnt);
3191 while (remseg > 0) {
3192
3193 sg = s;
3194
3195 ha->req_ring_index++;
3196 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3197 ha->req_ring_index = 0;
3198 ha->request_ring_ptr =
3199 ha->request_ring;
3200 } else
3201 ha->request_ring_ptr++;
3202
3203 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3204
3205
3206 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3207
3208
3209 ((struct cont_entry *) pkt)->
3210 entry_type = CONTINUE_TYPE;
3211 ((struct cont_entry *) pkt)->entry_count = 1;
3212
3213 ((struct cont_entry *) pkt)->sys_define =
3214 (uint8_t) ha->req_ring_index;
3215
3216
3217 dword_ptr =
3218 &((struct cont_entry *) pkt)->dseg_0_address;
3219
3220
3221 for_each_sg(sg, s, remseg, cnt) {
3222 if (cnt == 7)
3223 break;
3224 *dword_ptr++ =
3225 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3226 *dword_ptr++ =
3227 cpu_to_le32(sg_dma_len(s));
3228 dprintk(1,
3229 "S/G Segment Cont. phys_addr=0x%x, "
3230 "len=0x%x\n",
3231 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3232 cpu_to_le32(sg_dma_len(s)));
3233 }
3234 remseg -= cnt;
3235 dprintk(5, "qla1280_32bit_start_scsi: "
3236 "continuation packet data - "
3237 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3238 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3239 qla1280_dump_buffer(5, (char *)pkt,
3240 REQUEST_ENTRY_SIZE);
3241 }
3242 } else {
3243 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3244 "packet data - \n");
3245 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3246 }
3247 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3248 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3249 REQUEST_ENTRY_SIZE);
3250
3251
3252 ha->req_ring_index++;
3253 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3254 ha->req_ring_index = 0;
3255 ha->request_ring_ptr = ha->request_ring;
3256 } else
3257 ha->request_ring_ptr++;
3258
3259
3260 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3261 "for pending command\n");
3262 sp->flags |= SRB_SENT;
3263 ha->actthreads++;
3264 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3265
3266out:
3267 if (status)
3268 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3269
3270 LEAVE("qla1280_32bit_start_scsi");
3271
3272 return status;
3273}
3274#endif
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287static request_t *
3288qla1280_req_pkt(struct scsi_qla_host *ha)
3289{
3290 struct device_reg __iomem *reg = ha->iobase;
3291 request_t *pkt = NULL;
3292 int cnt;
3293 uint32_t timer;
3294
3295 ENTER("qla1280_req_pkt");
3296
3297
3298
3299
3300
3301 for (timer = 15000000; timer; timer--) {
3302 if (ha->req_q_cnt > 0) {
3303
3304 cnt = RD_REG_WORD(®->mailbox4);
3305 if (ha->req_ring_index < cnt)
3306 ha->req_q_cnt = cnt - ha->req_ring_index;
3307 else
3308 ha->req_q_cnt =
3309 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3310 }
3311
3312
3313 if (ha->req_q_cnt > 0) {
3314 ha->req_q_cnt--;
3315 pkt = ha->request_ring_ptr;
3316
3317
3318 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3319
3320
3321
3322
3323
3324
3325 pkt->sys_define = (uint8_t) ha->req_ring_index;
3326
3327
3328 pkt->entry_count = 1;
3329
3330 break;
3331 }
3332
3333 udelay(2);
3334
3335
3336 qla1280_poll(ha);
3337 }
3338
3339 if (!pkt)
3340 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3341 else
3342 dprintk(3, "qla1280_req_pkt: exiting normally\n");
3343
3344 return pkt;
3345}
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355static void
3356qla1280_isp_cmd(struct scsi_qla_host *ha)
3357{
3358 struct device_reg __iomem *reg = ha->iobase;
3359
3360 ENTER("qla1280_isp_cmd");
3361
3362 dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3363 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3364 REQUEST_ENTRY_SIZE);
3365
3366
3367 ha->req_ring_index++;
3368 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3369 ha->req_ring_index = 0;
3370 ha->request_ring_ptr = ha->request_ring;
3371 } else
3372 ha->request_ring_ptr++;
3373
3374
3375
3376
3377 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3378
3379 LEAVE("qla1280_isp_cmd");
3380}
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394static void
3395qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3396{
3397 struct device_reg __iomem *reg = ha->iobase;
3398 struct response *pkt;
3399 struct srb *sp = NULL;
3400 uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3401 uint16_t *wptr;
3402 uint32_t index;
3403 u16 istatus;
3404
3405 ENTER("qla1280_isr");
3406
3407 istatus = RD_REG_WORD(®->istatus);
3408 if (!(istatus & (RISC_INT | PCI_INT)))
3409 return;
3410
3411
3412 mailbox[5] = RD_REG_WORD(®->mailbox5);
3413
3414
3415
3416 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore);
3417
3418 if (mailbox[0] & BIT_0) {
3419
3420
3421
3422 wptr = &mailbox[0];
3423 *wptr++ = RD_REG_WORD(®->mailbox0);
3424 *wptr++ = RD_REG_WORD(®->mailbox1);
3425 *wptr = RD_REG_WORD(®->mailbox2);
3426 if (mailbox[0] != MBA_SCSI_COMPLETION) {
3427 wptr++;
3428 *wptr++ = RD_REG_WORD(®->mailbox3);
3429 *wptr++ = RD_REG_WORD(®->mailbox4);
3430 wptr++;
3431 *wptr++ = RD_REG_WORD(®->mailbox6);
3432 *wptr = RD_REG_WORD(®->mailbox7);
3433 }
3434
3435
3436
3437 WRT_REG_WORD(®->semaphore, 0);
3438 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3439
3440 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3441 mailbox[0]);
3442
3443
3444 switch (mailbox[0]) {
3445 case MBA_SCSI_COMPLETION:
3446 dprintk(5, "qla1280_isr: mailbox SCSI response "
3447 "completion\n");
3448
3449 if (ha->flags.online) {
3450
3451 index = mailbox[2] << 16 | mailbox[1];
3452
3453
3454 if (index < MAX_OUTSTANDING_COMMANDS)
3455 sp = ha->outstanding_cmds[index];
3456 else
3457 sp = NULL;
3458
3459 if (sp) {
3460
3461 ha->outstanding_cmds[index] = NULL;
3462
3463
3464 CMD_RESULT(sp->cmd) = 0;
3465 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3466
3467
3468 list_add_tail(&sp->list, done_q);
3469 } else {
3470
3471
3472
3473 printk(KERN_WARNING
3474 "qla1280: ISP invalid handle\n");
3475 }
3476 }
3477 break;
3478
3479 case MBA_BUS_RESET:
3480 ha->flags.reset_marker = 1;
3481 index = mailbox[6] & BIT_0;
3482 ha->bus_settings[index].reset_marker = 1;
3483
3484 printk(KERN_DEBUG "qla1280_isr(): index %i "
3485 "asynchronous BUS_RESET\n", index);
3486 break;
3487
3488 case MBA_SYSTEM_ERR:
3489 printk(KERN_WARNING
3490 "qla1280: ISP System Error - mbx1=%xh, mbx2="
3491 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3492 mailbox[3]);
3493 break;
3494
3495 case MBA_REQ_TRANSFER_ERR:
3496 printk(KERN_WARNING
3497 "qla1280: ISP Request Transfer Error\n");
3498 break;
3499
3500 case MBA_RSP_TRANSFER_ERR:
3501 printk(KERN_WARNING
3502 "qla1280: ISP Response Transfer Error\n");
3503 break;
3504
3505 case MBA_WAKEUP_THRES:
3506 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3507 break;
3508
3509 case MBA_TIMEOUT_RESET:
3510 dprintk(2,
3511 "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3512 break;
3513
3514 case MBA_DEVICE_RESET:
3515 printk(KERN_INFO "qla1280_isr(): asynchronous "
3516 "BUS_DEVICE_RESET\n");
3517
3518 ha->flags.reset_marker = 1;
3519 index = mailbox[6] & BIT_0;
3520 ha->bus_settings[index].reset_marker = 1;
3521 break;
3522
3523 case MBA_BUS_MODE_CHANGE:
3524 dprintk(2,
3525 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3526 break;
3527
3528 default:
3529
3530 if (mailbox[0] < MBA_ASYNC_EVENT) {
3531 wptr = &mailbox[0];
3532 memcpy((uint16_t *) ha->mailbox_out, wptr,
3533 MAILBOX_REGISTER_COUNT *
3534 sizeof(uint16_t));
3535
3536 if(ha->mailbox_wait != NULL)
3537 complete(ha->mailbox_wait);
3538 }
3539 break;
3540 }
3541 } else {
3542 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3543 }
3544
3545
3546
3547
3548
3549 if (!(ha->flags.online && !ha->mailbox_wait)) {
3550 dprintk(2, "qla1280_isr: Response pointer Error\n");
3551 goto out;
3552 }
3553
3554 if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3555 goto out;
3556
3557 while (ha->rsp_ring_index != mailbox[5]) {
3558 pkt = ha->response_ring_ptr;
3559
3560 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3561 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3562 dprintk(5,"qla1280_isr: response packet data\n");
3563 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3564
3565 if (pkt->entry_type == STATUS_TYPE) {
3566 if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3567 || pkt->comp_status || pkt->entry_status) {
3568 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3569 "0x%x mailbox[5] = 0x%x, comp_status "
3570 "= 0x%x, scsi_status = 0x%x\n",
3571 ha->rsp_ring_index, mailbox[5],
3572 le16_to_cpu(pkt->comp_status),
3573 le16_to_cpu(pkt->scsi_status));
3574 }
3575 } else {
3576 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3577 "0x%x, mailbox[5] = 0x%x\n",
3578 ha->rsp_ring_index, mailbox[5]);
3579 dprintk(2, "qla1280_isr: response packet data\n");
3580 qla1280_dump_buffer(2, (char *)pkt,
3581 RESPONSE_ENTRY_SIZE);
3582 }
3583
3584 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3585 dprintk(2, "status: Cmd %p, handle %i\n",
3586 ha->outstanding_cmds[pkt->handle]->cmd,
3587 pkt->handle);
3588 if (pkt->entry_type == STATUS_TYPE)
3589 qla1280_status_entry(ha, pkt, done_q);
3590 else
3591 qla1280_error_entry(ha, pkt, done_q);
3592
3593 ha->rsp_ring_index++;
3594 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3595 ha->rsp_ring_index = 0;
3596 ha->response_ring_ptr = ha->response_ring;
3597 } else
3598 ha->response_ring_ptr++;
3599 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index);
3600 }
3601 }
3602
3603 out:
3604 LEAVE("qla1280_isr");
3605}
3606
3607
3608
3609
3610
3611
3612
3613
3614static void
3615qla1280_rst_aen(struct scsi_qla_host *ha)
3616{
3617 uint8_t bus;
3618
3619 ENTER("qla1280_rst_aen");
3620
3621 if (ha->flags.online && !ha->flags.reset_active &&
3622 !ha->flags.abort_isp_active) {
3623 ha->flags.reset_active = 1;
3624 while (ha->flags.reset_marker) {
3625
3626 ha->flags.reset_marker = 0;
3627 for (bus = 0; bus < ha->ports &&
3628 !ha->flags.reset_marker; bus++) {
3629 if (ha->bus_settings[bus].reset_marker) {
3630 ha->bus_settings[bus].reset_marker = 0;
3631 qla1280_marker(ha, bus, 0, 0,
3632 MK_SYNC_ALL);
3633 }
3634 }
3635 }
3636 }
3637
3638 LEAVE("qla1280_rst_aen");
3639}
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651static void
3652qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3653 struct list_head *done_q)
3654{
3655 unsigned int bus, target, lun;
3656 int sense_sz;
3657 struct srb *sp;
3658 struct scsi_cmnd *cmd;
3659 uint32_t handle = le32_to_cpu(pkt->handle);
3660 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3661 uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3662
3663 ENTER("qla1280_status_entry");
3664
3665
3666 if (handle < MAX_OUTSTANDING_COMMANDS)
3667 sp = ha->outstanding_cmds[handle];
3668 else
3669 sp = NULL;
3670
3671 if (!sp) {
3672 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3673 goto out;
3674 }
3675
3676
3677 ha->outstanding_cmds[handle] = NULL;
3678
3679 cmd = sp->cmd;
3680
3681
3682 bus = SCSI_BUS_32(cmd);
3683 target = SCSI_TCN_32(cmd);
3684 lun = SCSI_LUN_32(cmd);
3685
3686 if (comp_status || scsi_status) {
3687 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3688 "0x%x, handle = 0x%x\n", comp_status,
3689 scsi_status, handle);
3690 }
3691
3692
3693 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3694 (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3695 CMD_RESULT(cmd) = scsi_status & 0xff;
3696 } else {
3697
3698
3699 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3700
3701 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3702 if (comp_status != CS_ARS_FAILED) {
3703 uint16_t req_sense_length =
3704 le16_to_cpu(pkt->req_sense_length);
3705 if (req_sense_length < CMD_SNSLEN(cmd))
3706 sense_sz = req_sense_length;
3707 else
3708
3709
3710
3711
3712
3713 sense_sz = CMD_SNSLEN(cmd) - 1;
3714
3715 memcpy(cmd->sense_buffer,
3716 &pkt->req_sense_data, sense_sz);
3717 } else
3718 sense_sz = 0;
3719 memset(cmd->sense_buffer + sense_sz, 0,
3720 SCSI_SENSE_BUFFERSIZE - sense_sz);
3721
3722 dprintk(2, "qla1280_status_entry: Check "
3723 "condition Sense data, b %i, t %i, "
3724 "l %i\n", bus, target, lun);
3725 if (sense_sz)
3726 qla1280_dump_buffer(2,
3727 (char *)cmd->sense_buffer,
3728 sense_sz);
3729 }
3730 }
3731
3732 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3733
3734
3735 list_add_tail(&sp->list, done_q);
3736 out:
3737 LEAVE("qla1280_status_entry");
3738}
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749static void
3750qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3751 struct list_head *done_q)
3752{
3753 struct srb *sp;
3754 uint32_t handle = le32_to_cpu(pkt->handle);
3755
3756 ENTER("qla1280_error_entry");
3757
3758 if (pkt->entry_status & BIT_3)
3759 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3760 else if (pkt->entry_status & BIT_2)
3761 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3762 else if (pkt->entry_status & BIT_1)
3763 dprintk(2, "qla1280_error_entry: FULL flag error\n");
3764 else
3765 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3766
3767
3768 if (handle < MAX_OUTSTANDING_COMMANDS)
3769 sp = ha->outstanding_cmds[handle];
3770 else
3771 sp = NULL;
3772
3773 if (sp) {
3774
3775 ha->outstanding_cmds[handle] = NULL;
3776
3777
3778 if (pkt->entry_status & (BIT_3 + BIT_2)) {
3779
3780
3781 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3782 } else if (pkt->entry_status & BIT_1) {
3783 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3784 } else {
3785
3786 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3787 }
3788
3789 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3790
3791
3792 list_add_tail(&sp->list, done_q);
3793 }
3794#ifdef QLA_64BIT_PTR
3795 else if (pkt->entry_type == COMMAND_A64_TYPE) {
3796 printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3797 }
3798#endif
3799
3800 LEAVE("qla1280_error_entry");
3801}
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813static int
3814qla1280_abort_isp(struct scsi_qla_host *ha)
3815{
3816 struct device_reg __iomem *reg = ha->iobase;
3817 struct srb *sp;
3818 int status = 0;
3819 int cnt;
3820 int bus;
3821
3822 ENTER("qla1280_abort_isp");
3823
3824 if (ha->flags.abort_isp_active || !ha->flags.online)
3825 goto out;
3826
3827 ha->flags.abort_isp_active = 1;
3828
3829
3830 qla1280_disable_intrs(ha);
3831 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3832 RD_REG_WORD(®->id_l);
3833
3834 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3835 ha->host_no);
3836
3837 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3838 struct scsi_cmnd *cmd;
3839 sp = ha->outstanding_cmds[cnt];
3840 if (sp) {
3841 cmd = sp->cmd;
3842 CMD_RESULT(cmd) = DID_RESET << 16;
3843 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3844 ha->outstanding_cmds[cnt] = NULL;
3845 list_add_tail(&sp->list, &ha->done_q);
3846 }
3847 }
3848
3849 qla1280_done(ha);
3850
3851 status = qla1280_load_firmware(ha);
3852 if (status)
3853 goto out;
3854
3855
3856 qla1280_nvram_config (ha);
3857
3858 status = qla1280_init_rings(ha);
3859 if (status)
3860 goto out;
3861
3862
3863 for (bus = 0; bus < ha->ports; bus++)
3864 qla1280_bus_reset(ha, bus);
3865
3866 ha->flags.abort_isp_active = 0;
3867 out:
3868 if (status) {
3869 printk(KERN_WARNING
3870 "qla1280: ISP error recovery failed, board disabled");
3871 qla1280_reset_adapter(ha);
3872 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3873 }
3874
3875 LEAVE("qla1280_abort_isp");
3876 return status;
3877}
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890static u16
3891qla1280_debounce_register(volatile u16 __iomem * addr)
3892{
3893 volatile u16 ret;
3894 volatile u16 ret2;
3895
3896 ret = RD_REG_WORD(addr);
3897 ret2 = RD_REG_WORD(addr);
3898
3899 if (ret == ret2)
3900 return ret;
3901
3902 do {
3903 cpu_relax();
3904 ret = RD_REG_WORD(addr);
3905 ret2 = RD_REG_WORD(addr);
3906 } while (ret != ret2);
3907
3908 return ret;
3909}
3910
3911
3912
3913
3914
3915
3916
3917#define SET_SXP_BANK 0x0100
3918#define SCSI_PHASE_INVALID 0x87FF
3919static int
3920qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3921{
3922 uint16_t config_reg, scsi_control;
3923 struct device_reg __iomem *reg = ha->iobase;
3924
3925 if (ha->bus_settings[bus].scsi_bus_dead) {
3926 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3927 config_reg = RD_REG_WORD(®->cfg_1);
3928 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK);
3929 scsi_control = RD_REG_WORD(®->scsiControlPins);
3930 WRT_REG_WORD(®->cfg_1, config_reg);
3931 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC);
3932
3933 if (scsi_control == SCSI_PHASE_INVALID) {
3934 ha->bus_settings[bus].scsi_bus_dead = 1;
3935 return 1;
3936 } else {
3937 ha->bus_settings[bus].scsi_bus_dead = 0;
3938 ha->bus_settings[bus].failed_reset_count = 0;
3939 }
3940 }
3941 return 0;
3942}
3943
3944static void
3945qla1280_get_target_parameters(struct scsi_qla_host *ha,
3946 struct scsi_device *device)
3947{
3948 uint16_t mb[MAILBOX_REGISTER_COUNT];
3949 int bus, target, lun;
3950
3951 bus = device->channel;
3952 target = device->id;
3953 lun = device->lun;
3954
3955
3956 mb[0] = MBC_GET_TARGET_PARAMETERS;
3957 mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3958 mb[1] <<= 8;
3959 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3960 &mb[0]);
3961
3962 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3963
3964 if (mb[3] != 0) {
3965 printk(" Sync: period %d, offset %d",
3966 (mb[3] & 0xff), (mb[3] >> 8));
3967 if (mb[2] & BIT_13)
3968 printk(", Wide");
3969 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
3970 printk(", DT");
3971 } else
3972 printk(" Async");
3973
3974 if (device->simple_tags)
3975 printk(", Tagged queuing: depth %d", device->queue_depth);
3976 printk("\n");
3977}
3978
3979
3980#if DEBUG_QLA1280
3981static void
3982__qla1280_dump_buffer(char *b, int size)
3983{
3984 int cnt;
3985 u8 c;
3986
3987 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
3988 "Bh Ch Dh Eh Fh\n");
3989 printk(KERN_DEBUG "---------------------------------------------"
3990 "------------------\n");
3991
3992 for (cnt = 0; cnt < size;) {
3993 c = *b++;
3994
3995 printk("0x%02x", c);
3996 cnt++;
3997 if (!(cnt % 16))
3998 printk("\n");
3999 else
4000 printk(" ");
4001 }
4002 if (cnt % 16)
4003 printk("\n");
4004}
4005
4006
4007
4008
4009
4010static void
4011__qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
4012{
4013 struct scsi_qla_host *ha;
4014 struct Scsi_Host *host = CMD_HOST(cmd);
4015 struct srb *sp;
4016
4017
4018 int i;
4019 ha = (struct scsi_qla_host *)host->hostdata;
4020
4021 sp = (struct srb *)CMD_SP(cmd);
4022 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
4023 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
4024 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
4025 CMD_CDBLEN(cmd));
4026 printk(" CDB = ");
4027 for (i = 0; i < cmd->cmd_len; i++) {
4028 printk("0x%02x ", cmd->cmnd[i]);
4029 }
4030 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
4031 printk(" request buffer=0x%p, request buffer len=0x%x\n",
4032 scsi_sglist(cmd), scsi_bufflen(cmd));
4033
4034
4035
4036
4037
4038
4039 printk(" tag=%d, transfersize=0x%x \n",
4040 cmd->tag, cmd->transfersize);
4041 printk(" SP=0x%p\n", CMD_SP(cmd));
4042 printk(" underflow size = 0x%x, direction=0x%x\n",
4043 cmd->underflow, cmd->sc_data_direction);
4044}
4045
4046
4047
4048
4049
4050static void
4051ql1280_dump_device(struct scsi_qla_host *ha)
4052{
4053
4054 struct scsi_cmnd *cp;
4055 struct srb *sp;
4056 int i;
4057
4058 printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4059
4060 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4061 if ((sp = ha->outstanding_cmds[i]) == NULL)
4062 continue;
4063 if ((cp = sp->cmd) == NULL)
4064 continue;
4065 qla1280_print_scsi_cmd(1, cp);
4066 }
4067}
4068#endif
4069
4070
4071enum tokens {
4072 TOKEN_NVRAM,
4073 TOKEN_SYNC,
4074 TOKEN_WIDE,
4075 TOKEN_PPR,
4076 TOKEN_VERBOSE,
4077 TOKEN_DEBUG,
4078};
4079
4080struct setup_tokens {
4081 char *token;
4082 int val;
4083};
4084
4085static struct setup_tokens setup_token[] __initdata =
4086{
4087 { "nvram", TOKEN_NVRAM },
4088 { "sync", TOKEN_SYNC },
4089 { "wide", TOKEN_WIDE },
4090 { "ppr", TOKEN_PPR },
4091 { "verbose", TOKEN_VERBOSE },
4092 { "debug", TOKEN_DEBUG },
4093};
4094
4095
4096
4097
4098
4099
4100
4101
4102static int __init
4103qla1280_setup(char *s)
4104{
4105 char *cp, *ptr;
4106 unsigned long val;
4107 int toke;
4108
4109 cp = s;
4110
4111 while (cp && (ptr = strchr(cp, ':'))) {
4112 ptr++;
4113 if (!strcmp(ptr, "yes")) {
4114 val = 0x10000;
4115 ptr += 3;
4116 } else if (!strcmp(ptr, "no")) {
4117 val = 0;
4118 ptr += 2;
4119 } else
4120 val = simple_strtoul(ptr, &ptr, 0);
4121
4122 switch ((toke = qla1280_get_token(cp))) {
4123 case TOKEN_NVRAM:
4124 if (!val)
4125 driver_setup.no_nvram = 1;
4126 break;
4127 case TOKEN_SYNC:
4128 if (!val)
4129 driver_setup.no_sync = 1;
4130 else if (val != 0x10000)
4131 driver_setup.sync_mask = val;
4132 break;
4133 case TOKEN_WIDE:
4134 if (!val)
4135 driver_setup.no_wide = 1;
4136 else if (val != 0x10000)
4137 driver_setup.wide_mask = val;
4138 break;
4139 case TOKEN_PPR:
4140 if (!val)
4141 driver_setup.no_ppr = 1;
4142 else if (val != 0x10000)
4143 driver_setup.ppr_mask = val;
4144 break;
4145 case TOKEN_VERBOSE:
4146 qla1280_verbose = val;
4147 break;
4148 default:
4149 printk(KERN_INFO "qla1280: unknown boot option %s\n",
4150 cp);
4151 }
4152
4153 cp = strchr(ptr, ';');
4154 if (cp)
4155 cp++;
4156 else {
4157 break;
4158 }
4159 }
4160 return 1;
4161}
4162
4163
4164static int __init
4165qla1280_get_token(char *str)
4166{
4167 char *sep;
4168 long ret = -1;
4169 int i;
4170
4171 sep = strchr(str, ':');
4172
4173 if (sep) {
4174 for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4175 if (!strncmp(setup_token[i].token, str, (sep - str))) {
4176 ret = setup_token[i].val;
4177 break;
4178 }
4179 }
4180 }
4181
4182 return ret;
4183}
4184
4185
4186static struct scsi_host_template qla1280_driver_template = {
4187 .module = THIS_MODULE,
4188 .proc_name = "qla1280",
4189 .name = "Qlogic ISP 1280/12160",
4190 .info = qla1280_info,
4191 .slave_configure = qla1280_slave_configure,
4192 .queuecommand = qla1280_queuecommand,
4193 .eh_abort_handler = qla1280_eh_abort,
4194 .eh_device_reset_handler= qla1280_eh_device_reset,
4195 .eh_bus_reset_handler = qla1280_eh_bus_reset,
4196 .eh_host_reset_handler = qla1280_eh_adapter_reset,
4197 .bios_param = qla1280_biosparam,
4198 .can_queue = MAX_OUTSTANDING_COMMANDS,
4199 .this_id = -1,
4200 .sg_tablesize = SG_ALL,
4201 .use_clustering = ENABLE_CLUSTERING,
4202};
4203
4204
4205static int
4206qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4207{
4208 int devnum = id->driver_data;
4209 struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4210 struct Scsi_Host *host;
4211 struct scsi_qla_host *ha;
4212 int error = -ENODEV;
4213
4214
4215 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4216 printk(KERN_INFO
4217 "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4218 goto error;
4219 }
4220
4221 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4222 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4223
4224 if (pci_enable_device(pdev)) {
4225 printk(KERN_WARNING
4226 "qla1280: Failed to enabled pci device, aborting.\n");
4227 goto error;
4228 }
4229
4230 pci_set_master(pdev);
4231
4232 error = -ENOMEM;
4233 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4234 if (!host) {
4235 printk(KERN_WARNING
4236 "qla1280: Failed to register host, aborting.\n");
4237 goto error_disable_device;
4238 }
4239
4240 ha = (struct scsi_qla_host *)host->hostdata;
4241 memset(ha, 0, sizeof(struct scsi_qla_host));
4242
4243 ha->pdev = pdev;
4244 ha->devnum = devnum;
4245
4246#ifdef QLA_64BIT_PTR
4247 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
4248 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4249 printk(KERN_WARNING "scsi(%li): Unable to set a "
4250 "suitable DMA mask - aborting\n", ha->host_no);
4251 error = -ENODEV;
4252 goto error_put_host;
4253 }
4254 } else
4255 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4256 ha->host_no);
4257#else
4258 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4259 printk(KERN_WARNING "scsi(%li): Unable to set a "
4260 "suitable DMA mask - aborting\n", ha->host_no);
4261 error = -ENODEV;
4262 goto error_put_host;
4263 }
4264#endif
4265
4266 ha->request_ring = pci_alloc_consistent(ha->pdev,
4267 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4268 &ha->request_dma);
4269 if (!ha->request_ring) {
4270 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4271 goto error_put_host;
4272 }
4273
4274 ha->response_ring = pci_alloc_consistent(ha->pdev,
4275 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4276 &ha->response_dma);
4277 if (!ha->response_ring) {
4278 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4279 goto error_free_request_ring;
4280 }
4281
4282 ha->ports = bdp->numPorts;
4283
4284 ha->host = host;
4285 ha->host_no = host->host_no;
4286
4287 host->irq = pdev->irq;
4288 host->max_channel = bdp->numPorts - 1;
4289 host->max_lun = MAX_LUNS - 1;
4290 host->max_id = MAX_TARGETS;
4291 host->max_sectors = 1024;
4292 host->unique_id = host->host_no;
4293
4294 error = -ENODEV;
4295
4296#if MEMORY_MAPPED_IO
4297 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4298 if (!ha->mmpbase) {
4299 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4300 goto error_free_response_ring;
4301 }
4302
4303 host->base = (unsigned long)ha->mmpbase;
4304 ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4305#else
4306 host->io_port = pci_resource_start(ha->pdev, 0);
4307 if (!request_region(host->io_port, 0xff, "qla1280")) {
4308 printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4309 "0x%04lx-0x%04lx - already in use\n",
4310 host->io_port, host->io_port + 0xff);
4311 goto error_free_response_ring;
4312 }
4313
4314 ha->iobase = (struct device_reg *)host->io_port;
4315#endif
4316
4317 INIT_LIST_HEAD(&ha->done_q);
4318
4319
4320 qla1280_disable_intrs(ha);
4321
4322 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4323 "qla1280", ha)) {
4324 printk("qla1280 : Failed to reserve interrupt %d already "
4325 "in use\n", pdev->irq);
4326 goto error_release_region;
4327 }
4328
4329
4330 if (qla1280_initialize_adapter(ha)) {
4331 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4332 goto error_free_irq;
4333 }
4334
4335
4336 host->this_id = ha->bus_settings[0].id;
4337
4338 pci_set_drvdata(pdev, host);
4339
4340 error = scsi_add_host(host, &pdev->dev);
4341 if (error)
4342 goto error_disable_adapter;
4343 scsi_scan_host(host);
4344
4345 return 0;
4346
4347 error_disable_adapter:
4348 qla1280_disable_intrs(ha);
4349 error_free_irq:
4350 free_irq(pdev->irq, ha);
4351 error_release_region:
4352#if MEMORY_MAPPED_IO
4353 iounmap(ha->mmpbase);
4354#else
4355 release_region(host->io_port, 0xff);
4356#endif
4357 error_free_response_ring:
4358 pci_free_consistent(ha->pdev,
4359 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4360 ha->response_ring, ha->response_dma);
4361 error_free_request_ring:
4362 pci_free_consistent(ha->pdev,
4363 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4364 ha->request_ring, ha->request_dma);
4365 error_put_host:
4366 scsi_host_put(host);
4367 error_disable_device:
4368 pci_disable_device(pdev);
4369 error:
4370 return error;
4371}
4372
4373
4374static void
4375qla1280_remove_one(struct pci_dev *pdev)
4376{
4377 struct Scsi_Host *host = pci_get_drvdata(pdev);
4378 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4379
4380 scsi_remove_host(host);
4381
4382 qla1280_disable_intrs(ha);
4383
4384 free_irq(pdev->irq, ha);
4385
4386#if MEMORY_MAPPED_IO
4387 iounmap(ha->mmpbase);
4388#else
4389 release_region(host->io_port, 0xff);
4390#endif
4391
4392 pci_free_consistent(ha->pdev,
4393 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4394 ha->request_ring, ha->request_dma);
4395 pci_free_consistent(ha->pdev,
4396 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4397 ha->response_ring, ha->response_dma);
4398
4399 pci_disable_device(pdev);
4400
4401 scsi_host_put(host);
4402}
4403
4404static struct pci_driver qla1280_pci_driver = {
4405 .name = "qla1280",
4406 .id_table = qla1280_pci_tbl,
4407 .probe = qla1280_probe_one,
4408 .remove = qla1280_remove_one,
4409};
4410
4411static int __init
4412qla1280_init(void)
4413{
4414 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4415 printk(KERN_WARNING
4416 "qla1280: struct srb too big, aborting\n");
4417 return -EINVAL;
4418 }
4419
4420#ifdef MODULE
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433 if (qla1280)
4434 qla1280_setup(qla1280);
4435#endif
4436
4437 return pci_register_driver(&qla1280_pci_driver);
4438}
4439
4440static void __exit
4441qla1280_exit(void)
4442{
4443 int i;
4444
4445 pci_unregister_driver(&qla1280_pci_driver);
4446
4447 for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
4448 release_firmware(qla1280_fw_tbl[i].fw);
4449 qla1280_fw_tbl[i].fw = NULL;
4450 }
4451}
4452
4453module_init(qla1280_init);
4454module_exit(qla1280_exit);
4455
4456MODULE_AUTHOR("Qlogic & Jes Sorensen");
4457MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4458MODULE_LICENSE("GPL");
4459MODULE_FIRMWARE("qlogic/1040.bin");
4460MODULE_FIRMWARE("qlogic/1280.bin");
4461MODULE_FIRMWARE("qlogic/12160.bin");
4462MODULE_VERSION(QLA1280_VERSION);
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475