1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164#include <asm/io.h>
165#include <asm/byteorder.h>
166#include <asm/page.h>
167#include <linux/stddef.h>
168#include <linux/string.h>
169#include <linux/errno.h>
170#include <linux/kernel.h>
171#include <linux/ioport.h>
172#include <linux/slab.h>
173#include <linux/delay.h>
174#include <linux/pci.h>
175#include <linux/proc_fs.h>
176#include <linux/reboot.h>
177#include <linux/interrupt.h>
178
179#include <linux/blkdev.h>
180#include <linux/types.h>
181#include <linux/dma-mapping.h>
182
183#include <scsi/sg.h>
184#include "scsi.h"
185#include <scsi/scsi_host.h>
186
187#include "ips.h"
188
189#include <linux/module.h>
190
191#include <linux/stat.h>
192
193#include <linux/spinlock.h>
194#include <linux/init.h>
195
196#include <linux/smp.h>
197
198#ifdef MODULE
199static char *ips = NULL;
200module_param(ips, charp, 0);
201#endif
202
203
204
205
206#define IPS_VERSION_HIGH IPS_VER_MAJOR_STRING "." IPS_VER_MINOR_STRING
207#define IPS_VERSION_LOW "." IPS_VER_BUILD_STRING " "
208
209#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
210 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
211 DMA_BIDIRECTIONAL : \
212 scb->scsi_cmd->sc_data_direction)
213
214#ifdef IPS_DEBUG
215#define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n");
216#define DEBUG(i, s) if (ips_debug >= i) printk(KERN_NOTICE s "\n");
217#define DEBUG_VAR(i, s, v...) if (ips_debug >= i) printk(KERN_NOTICE s "\n", v);
218#else
219#define METHOD_TRACE(s, i)
220#define DEBUG(i, s)
221#define DEBUG_VAR(i, s, v...)
222#endif
223
224
225
226
227static int ips_eh_abort(struct scsi_cmnd *);
228static int ips_eh_reset(struct scsi_cmnd *);
229static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *);
230static const char *ips_info(struct Scsi_Host *);
231static irqreturn_t do_ipsintr(int, void *);
232static int ips_hainit(ips_ha_t *);
233static int ips_map_status(ips_ha_t *, ips_scb_t *, ips_stat_t *);
234static int ips_send_wait(ips_ha_t *, ips_scb_t *, int, int);
235static int ips_send_cmd(ips_ha_t *, ips_scb_t *);
236static int ips_online(ips_ha_t *, ips_scb_t *);
237static int ips_inquiry(ips_ha_t *, ips_scb_t *);
238static int ips_rdcap(ips_ha_t *, ips_scb_t *);
239static int ips_msense(ips_ha_t *, ips_scb_t *);
240static int ips_reqsen(ips_ha_t *, ips_scb_t *);
241static int ips_deallocatescbs(ips_ha_t *, int);
242static int ips_allocatescbs(ips_ha_t *);
243static int ips_reset_copperhead(ips_ha_t *);
244static int ips_reset_copperhead_memio(ips_ha_t *);
245static int ips_reset_morpheus(ips_ha_t *);
246static int ips_issue_copperhead(ips_ha_t *, ips_scb_t *);
247static int ips_issue_copperhead_memio(ips_ha_t *, ips_scb_t *);
248static int ips_issue_i2o(ips_ha_t *, ips_scb_t *);
249static int ips_issue_i2o_memio(ips_ha_t *, ips_scb_t *);
250static int ips_isintr_copperhead(ips_ha_t *);
251static int ips_isintr_copperhead_memio(ips_ha_t *);
252static int ips_isintr_morpheus(ips_ha_t *);
253static int ips_wait(ips_ha_t *, int, int);
254static int ips_write_driver_status(ips_ha_t *, int);
255static int ips_read_adapter_status(ips_ha_t *, int);
256static int ips_read_subsystem_parameters(ips_ha_t *, int);
257static int ips_read_config(ips_ha_t *, int);
258static int ips_clear_adapter(ips_ha_t *, int);
259static int ips_readwrite_page5(ips_ha_t *, int, int);
260static int ips_init_copperhead(ips_ha_t *);
261static int ips_init_copperhead_memio(ips_ha_t *);
262static int ips_init_morpheus(ips_ha_t *);
263static int ips_isinit_copperhead(ips_ha_t *);
264static int ips_isinit_copperhead_memio(ips_ha_t *);
265static int ips_isinit_morpheus(ips_ha_t *);
266static int ips_erase_bios(ips_ha_t *);
267static int ips_program_bios(ips_ha_t *, char *, uint32_t, uint32_t);
268static int ips_verify_bios(ips_ha_t *, char *, uint32_t, uint32_t);
269static int ips_erase_bios_memio(ips_ha_t *);
270static int ips_program_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
271static int ips_verify_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
272static int ips_flash_copperhead(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
273static int ips_flash_bios(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
274static int ips_flash_firmware(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
275static void ips_free_flash_copperhead(ips_ha_t * ha);
276static void ips_get_bios_version(ips_ha_t *, int);
277static void ips_identify_controller(ips_ha_t *);
278static void ips_chkstatus(ips_ha_t *, IPS_STATUS *);
279static void ips_enable_int_copperhead(ips_ha_t *);
280static void ips_enable_int_copperhead_memio(ips_ha_t *);
281static void ips_enable_int_morpheus(ips_ha_t *);
282static int ips_intr_copperhead(ips_ha_t *);
283static int ips_intr_morpheus(ips_ha_t *);
284static void ips_next(ips_ha_t *, int);
285static void ipsintr_blocking(ips_ha_t *, struct ips_scb *);
286static void ipsintr_done(ips_ha_t *, struct ips_scb *);
287static void ips_done(ips_ha_t *, ips_scb_t *);
288static void ips_free(ips_ha_t *);
289static void ips_init_scb(ips_ha_t *, ips_scb_t *);
290static void ips_freescb(ips_ha_t *, ips_scb_t *);
291static void ips_setup_funclist(ips_ha_t *);
292static void ips_statinit(ips_ha_t *);
293static void ips_statinit_memio(ips_ha_t *);
294static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time64_t);
295static void ips_ffdc_reset(ips_ha_t *, int);
296static void ips_ffdc_time(ips_ha_t *);
297static uint32_t ips_statupd_copperhead(ips_ha_t *);
298static uint32_t ips_statupd_copperhead_memio(ips_ha_t *);
299static uint32_t ips_statupd_morpheus(ips_ha_t *);
300static ips_scb_t *ips_getscb(ips_ha_t *);
301static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *);
302static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *);
303static void ips_putq_copp_tail(ips_copp_queue_t *,
304 ips_copp_wait_item_t *);
305static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *);
306static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *);
307static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *);
308static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *,
309 struct scsi_cmnd *);
310static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *,
311 ips_copp_wait_item_t *);
312static ips_copp_wait_item_t *ips_removeq_copp_head(ips_copp_queue_t *);
313
314static int ips_is_passthru(struct scsi_cmnd *);
315static int ips_make_passthru(ips_ha_t *, struct scsi_cmnd *, ips_scb_t *, int);
316static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
317static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *);
318static void ips_scmd_buf_write(struct scsi_cmnd * scmd, void *data,
319 unsigned int count);
320static void ips_scmd_buf_read(struct scsi_cmnd * scmd, void *data,
321 unsigned int count);
322
323static int ips_write_info(struct Scsi_Host *, char *, int);
324static int ips_show_info(struct seq_file *, struct Scsi_Host *);
325static int ips_host_info(ips_ha_t *, struct seq_file *);
326static int ips_abort_init(ips_ha_t * ha, int index);
327static int ips_init_phase2(int index);
328
329static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr);
330static int ips_register_scsi(int index);
331
332static int ips_poll_for_flush_complete(ips_ha_t * ha);
333static void ips_flush_and_reset(ips_ha_t *ha);
334
335
336
337
338static const char ips_name[] = "ips";
339static struct Scsi_Host *ips_sh[IPS_MAX_ADAPTERS];
340static ips_ha_t *ips_ha[IPS_MAX_ADAPTERS];
341static unsigned int ips_next_controller;
342static unsigned int ips_num_controllers;
343static unsigned int ips_released_controllers;
344static int ips_hotplug;
345static int ips_cmd_timeout = 60;
346static int ips_reset_timeout = 60 * 5;
347static int ips_force_memio = 1;
348static int ips_force_i2o = 1;
349static int ips_ioctlsize = IPS_IOCTL_SIZE;
350static int ips_cd_boot;
351static char *ips_FlashData = NULL;
352static dma_addr_t ips_flashbusaddr;
353static long ips_FlashDataInUse;
354static uint32_t MaxLiteCmds = 32;
355static struct scsi_host_template ips_driver_template = {
356 .info = ips_info,
357 .queuecommand = ips_queue,
358 .eh_abort_handler = ips_eh_abort,
359 .eh_host_reset_handler = ips_eh_reset,
360 .proc_name = "ips",
361 .show_info = ips_show_info,
362 .write_info = ips_write_info,
363 .slave_configure = ips_slave_configure,
364 .bios_param = ips_biosparam,
365 .this_id = -1,
366 .sg_tablesize = IPS_MAX_SG,
367 .cmd_per_lun = 3,
368 .no_write_same = 1,
369};
370
371
372
373static struct pci_device_id ips_pci_table[] = {
374 { 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
375 { 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
376 { 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
377 { 0, }
378};
379
380MODULE_DEVICE_TABLE( pci, ips_pci_table );
381
382static char ips_hot_plug_name[] = "ips";
383
384static int ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent);
385static void ips_remove_device(struct pci_dev *pci_dev);
386
387static struct pci_driver ips_pci_driver = {
388 .name = ips_hot_plug_name,
389 .id_table = ips_pci_table,
390 .probe = ips_insert_device,
391 .remove = ips_remove_device,
392};
393
394
395
396
397
398static int ips_halt(struct notifier_block *nb, ulong event, void *buf);
399
400#define MAX_ADAPTER_NAME 15
401
402static char ips_adapter_name[][30] = {
403 "ServeRAID",
404 "ServeRAID II",
405 "ServeRAID on motherboard",
406 "ServeRAID on motherboard",
407 "ServeRAID 3H",
408 "ServeRAID 3L",
409 "ServeRAID 4H",
410 "ServeRAID 4M",
411 "ServeRAID 4L",
412 "ServeRAID 4Mx",
413 "ServeRAID 4Lx",
414 "ServeRAID 5i",
415 "ServeRAID 5i",
416 "ServeRAID 6M",
417 "ServeRAID 6i",
418 "ServeRAID 7t",
419 "ServeRAID 7k",
420 "ServeRAID 7M"
421};
422
423static struct notifier_block ips_notifier = {
424 ips_halt, NULL, 0
425};
426
427
428
429
430static char ips_command_direction[] = {
431 IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT,
432 IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK,
433 IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
434 IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
435 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_OUT,
436 IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
437 IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_IN,
438 IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
439 IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK,
440 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
441 IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE,
442 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
443 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT,
444 IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_NONE,
445 IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK,
446 IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
447 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
448 IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
449 IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
450 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
451 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
452 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
453 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
454 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
455 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
456 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
457 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
458 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
459 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
460 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
461 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
462 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
463 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
464 IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE,
465 IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_OUT,
466 IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE,
467 IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN,
468 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
469 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
470 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
471 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
472 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
473 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
474 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
475 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
476 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
477 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT,
478 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
479 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
480 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
481 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK
482};
483
484
485
486
487
488
489
490
491
492
493
494static int
495ips_setup(char *ips_str)
496{
497
498 int i;
499 char *key;
500 char *value;
501 static const IPS_OPTION options[] = {
502 {"noi2o", &ips_force_i2o, 0},
503 {"nommap", &ips_force_memio, 0},
504 {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE},
505 {"cdboot", &ips_cd_boot, 0},
506 {"maxcmds", &MaxLiteCmds, 32},
507 };
508
509
510
511 while ((key = strsep(&ips_str, ",."))) {
512 if (!*key)
513 continue;
514 value = strchr(key, ':');
515 if (value)
516 *value++ = '\0';
517
518
519
520
521 for (i = 0; i < ARRAY_SIZE(options); i++) {
522 if (strncasecmp
523 (key, options[i].option_name,
524 strlen(options[i].option_name)) == 0) {
525 if (value)
526 *options[i].option_flag =
527 simple_strtoul(value, NULL, 0);
528 else
529 *options[i].option_flag =
530 options[i].option_value;
531 break;
532 }
533 }
534 }
535
536 return (1);
537}
538
539__setup("ips=", ips_setup);
540
541
542
543
544
545
546
547
548
549
550
551
552static int
553ips_detect(struct scsi_host_template * SHT)
554{
555 int i;
556
557 METHOD_TRACE("ips_detect", 1);
558
559#ifdef MODULE
560 if (ips)
561 ips_setup(ips);
562#endif
563
564 for (i = 0; i < ips_num_controllers; i++) {
565 if (ips_register_scsi(i))
566 ips_free(ips_ha[i]);
567 ips_released_controllers++;
568 }
569 ips_hotplug = 1;
570 return (ips_num_controllers);
571}
572
573
574
575
576
577static void
578ips_setup_funclist(ips_ha_t * ha)
579{
580
581
582
583
584 if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) {
585
586 ha->func.isintr = ips_isintr_morpheus;
587 ha->func.isinit = ips_isinit_morpheus;
588 ha->func.issue = ips_issue_i2o_memio;
589 ha->func.init = ips_init_morpheus;
590 ha->func.statupd = ips_statupd_morpheus;
591 ha->func.reset = ips_reset_morpheus;
592 ha->func.intr = ips_intr_morpheus;
593 ha->func.enableint = ips_enable_int_morpheus;
594 } else if (IPS_USE_MEMIO(ha)) {
595
596 ha->func.isintr = ips_isintr_copperhead_memio;
597 ha->func.isinit = ips_isinit_copperhead_memio;
598 ha->func.init = ips_init_copperhead_memio;
599 ha->func.statupd = ips_statupd_copperhead_memio;
600 ha->func.statinit = ips_statinit_memio;
601 ha->func.reset = ips_reset_copperhead_memio;
602 ha->func.intr = ips_intr_copperhead;
603 ha->func.erasebios = ips_erase_bios_memio;
604 ha->func.programbios = ips_program_bios_memio;
605 ha->func.verifybios = ips_verify_bios_memio;
606 ha->func.enableint = ips_enable_int_copperhead_memio;
607 if (IPS_USE_I2O_DELIVER(ha))
608 ha->func.issue = ips_issue_i2o_memio;
609 else
610 ha->func.issue = ips_issue_copperhead_memio;
611 } else {
612
613 ha->func.isintr = ips_isintr_copperhead;
614 ha->func.isinit = ips_isinit_copperhead;
615 ha->func.init = ips_init_copperhead;
616 ha->func.statupd = ips_statupd_copperhead;
617 ha->func.statinit = ips_statinit;
618 ha->func.reset = ips_reset_copperhead;
619 ha->func.intr = ips_intr_copperhead;
620 ha->func.erasebios = ips_erase_bios;
621 ha->func.programbios = ips_program_bios;
622 ha->func.verifybios = ips_verify_bios;
623 ha->func.enableint = ips_enable_int_copperhead;
624
625 if (IPS_USE_I2O_DELIVER(ha))
626 ha->func.issue = ips_issue_i2o;
627 else
628 ha->func.issue = ips_issue_copperhead;
629 }
630}
631
632
633
634
635
636
637
638
639
640
641static int
642ips_release(struct Scsi_Host *sh)
643{
644 ips_scb_t *scb;
645 ips_ha_t *ha;
646 int i;
647
648 METHOD_TRACE("ips_release", 1);
649
650 scsi_remove_host(sh);
651
652 for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++) ;
653
654 if (i == IPS_MAX_ADAPTERS) {
655 printk(KERN_WARNING
656 "(%s) release, invalid Scsi_Host pointer.\n", ips_name);
657 BUG();
658 return (FALSE);
659 }
660
661 ha = IPS_HA(sh);
662
663 if (!ha)
664 return (FALSE);
665
666
667 scb = &ha->scbs[ha->max_cmds - 1];
668
669 ips_init_scb(ha, scb);
670
671 scb->timeout = ips_cmd_timeout;
672 scb->cdb[0] = IPS_CMD_FLUSH;
673
674 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
675 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
676 scb->cmd.flush_cache.state = IPS_NORM_STATE;
677 scb->cmd.flush_cache.reserved = 0;
678 scb->cmd.flush_cache.reserved2 = 0;
679 scb->cmd.flush_cache.reserved3 = 0;
680 scb->cmd.flush_cache.reserved4 = 0;
681
682 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
683
684
685 if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE)
686 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Incomplete Flush.\n");
687
688 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Complete.\n");
689
690 ips_sh[i] = NULL;
691 ips_ha[i] = NULL;
692
693
694 ips_free(ha);
695
696
697 free_irq(ha->pcidev->irq, ha);
698
699 scsi_host_put(sh);
700
701 ips_released_controllers++;
702
703 return (FALSE);
704}
705
706
707
708
709
710
711
712
713
714
715static int
716ips_halt(struct notifier_block *nb, ulong event, void *buf)
717{
718 ips_scb_t *scb;
719 ips_ha_t *ha;
720 int i;
721
722 if ((event != SYS_RESTART) && (event != SYS_HALT) &&
723 (event != SYS_POWER_OFF))
724 return (NOTIFY_DONE);
725
726 for (i = 0; i < ips_next_controller; i++) {
727 ha = (ips_ha_t *) ips_ha[i];
728
729 if (!ha)
730 continue;
731
732 if (!ha->active)
733 continue;
734
735
736 scb = &ha->scbs[ha->max_cmds - 1];
737
738 ips_init_scb(ha, scb);
739
740 scb->timeout = ips_cmd_timeout;
741 scb->cdb[0] = IPS_CMD_FLUSH;
742
743 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
744 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
745 scb->cmd.flush_cache.state = IPS_NORM_STATE;
746 scb->cmd.flush_cache.reserved = 0;
747 scb->cmd.flush_cache.reserved2 = 0;
748 scb->cmd.flush_cache.reserved3 = 0;
749 scb->cmd.flush_cache.reserved4 = 0;
750
751 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
752
753
754 if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) ==
755 IPS_FAILURE)
756 IPS_PRINTK(KERN_WARNING, ha->pcidev,
757 "Incomplete Flush.\n");
758 else
759 IPS_PRINTK(KERN_WARNING, ha->pcidev,
760 "Flushing Complete.\n");
761 }
762
763 return (NOTIFY_OK);
764}
765
766
767
768
769
770
771
772
773
774
775int ips_eh_abort(struct scsi_cmnd *SC)
776{
777 ips_ha_t *ha;
778 ips_copp_wait_item_t *item;
779 int ret;
780 struct Scsi_Host *host;
781
782 METHOD_TRACE("ips_eh_abort", 1);
783
784 if (!SC)
785 return (FAILED);
786
787 host = SC->device->host;
788 ha = (ips_ha_t *) SC->device->host->hostdata;
789
790 if (!ha)
791 return (FAILED);
792
793 if (!ha->active)
794 return (FAILED);
795
796 spin_lock(host->host_lock);
797
798
799 item = ha->copp_waitlist.head;
800 while ((item) && (item->scsi_cmd != SC))
801 item = item->next;
802
803 if (item) {
804
805 ips_removeq_copp(&ha->copp_waitlist, item);
806 ret = (SUCCESS);
807
808
809 } else if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
810
811 ret = (SUCCESS);
812 } else {
813
814 ret = (FAILED);
815 }
816
817 spin_unlock(host->host_lock);
818 return ret;
819}
820
821
822
823
824
825
826
827
828
829
830
831
832static int __ips_eh_reset(struct scsi_cmnd *SC)
833{
834 int ret;
835 int i;
836 ips_ha_t *ha;
837 ips_scb_t *scb;
838 ips_copp_wait_item_t *item;
839
840 METHOD_TRACE("ips_eh_reset", 1);
841
842#ifdef NO_IPS_RESET
843 return (FAILED);
844#else
845
846 if (!SC) {
847 DEBUG(1, "Reset called with NULL scsi command");
848
849 return (FAILED);
850 }
851
852 ha = (ips_ha_t *) SC->device->host->hostdata;
853
854 if (!ha) {
855 DEBUG(1, "Reset called with NULL ha struct");
856
857 return (FAILED);
858 }
859
860 if (!ha->active)
861 return (FAILED);
862
863
864 item = ha->copp_waitlist.head;
865 while ((item) && (item->scsi_cmd != SC))
866 item = item->next;
867
868 if (item) {
869
870 ips_removeq_copp(&ha->copp_waitlist, item);
871 return (SUCCESS);
872 }
873
874
875 if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
876
877 return (SUCCESS);
878 }
879
880
881
882
883
884
885
886
887
888
889
890 if (ha->ioctl_reset == 0) {
891 scb = &ha->scbs[ha->max_cmds - 1];
892
893 ips_init_scb(ha, scb);
894
895 scb->timeout = ips_cmd_timeout;
896 scb->cdb[0] = IPS_CMD_FLUSH;
897
898 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
899 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
900 scb->cmd.flush_cache.state = IPS_NORM_STATE;
901 scb->cmd.flush_cache.reserved = 0;
902 scb->cmd.flush_cache.reserved2 = 0;
903 scb->cmd.flush_cache.reserved3 = 0;
904 scb->cmd.flush_cache.reserved4 = 0;
905
906
907 ret = ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_IORL);
908 if (ret == IPS_SUCCESS) {
909 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
910 "Reset Request - Flushed Cache\n");
911 return (SUCCESS);
912 }
913 }
914
915
916
917
918 ha->ioctl_reset = 0;
919
920
921
922
923
924 IPS_PRINTK(KERN_NOTICE, ha->pcidev, "Resetting controller.\n");
925 ret = (*ha->func.reset) (ha);
926
927 if (!ret) {
928 struct scsi_cmnd *scsi_cmd;
929
930 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
931 "Controller reset failed - controller now offline.\n");
932
933
934 DEBUG_VAR(1, "(%s%d) Failing active commands",
935 ips_name, ha->host_num);
936
937 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
938 scb->scsi_cmd->result = DID_ERROR << 16;
939 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
940 ips_freescb(ha, scb);
941 }
942
943
944 DEBUG_VAR(1, "(%s%d) Failing pending commands",
945 ips_name, ha->host_num);
946
947 while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
948 scsi_cmd->result = DID_ERROR;
949 scsi_cmd->scsi_done(scsi_cmd);
950 }
951
952 ha->active = FALSE;
953 return (FAILED);
954 }
955
956 if (!ips_clear_adapter(ha, IPS_INTR_IORL)) {
957 struct scsi_cmnd *scsi_cmd;
958
959 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
960 "Controller reset failed - controller now offline.\n");
961
962
963 DEBUG_VAR(1, "(%s%d) Failing active commands",
964 ips_name, ha->host_num);
965
966 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
967 scb->scsi_cmd->result = DID_ERROR << 16;
968 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
969 ips_freescb(ha, scb);
970 }
971
972
973 DEBUG_VAR(1, "(%s%d) Failing pending commands",
974 ips_name, ha->host_num);
975
976 while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
977 scsi_cmd->result = DID_ERROR << 16;
978 scsi_cmd->scsi_done(scsi_cmd);
979 }
980
981 ha->active = FALSE;
982 return (FAILED);
983 }
984
985
986 if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) {
987 ha->last_ffdc = ktime_get_real_seconds();
988 ha->reset_count++;
989 ips_ffdc_reset(ha, IPS_INTR_IORL);
990 }
991
992
993 DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num);
994
995 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
996 scb->scsi_cmd->result = DID_RESET << 16;
997 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
998 ips_freescb(ha, scb);
999 }
1000
1001
1002 for (i = 1; i < ha->nbus; i++)
1003 ha->dcdb_active[i - 1] = 0;
1004
1005
1006 ha->num_ioctl = 0;
1007
1008 ips_next(ha, IPS_INTR_IORL);
1009
1010 return (SUCCESS);
1011#endif
1012
1013}
1014
1015static int ips_eh_reset(struct scsi_cmnd *SC)
1016{
1017 int rc;
1018
1019 spin_lock_irq(SC->device->host->host_lock);
1020 rc = __ips_eh_reset(SC);
1021 spin_unlock_irq(SC->device->host->host_lock);
1022
1023 return rc;
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
1039{
1040 ips_ha_t *ha;
1041 ips_passthru_t *pt;
1042
1043 METHOD_TRACE("ips_queue", 1);
1044
1045 ha = (ips_ha_t *) SC->device->host->hostdata;
1046
1047 if (!ha)
1048 return (1);
1049
1050 if (!ha->active)
1051 return (DID_ERROR);
1052
1053 if (ips_is_passthru(SC)) {
1054 if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) {
1055 SC->result = DID_BUS_BUSY << 16;
1056 done(SC);
1057
1058 return (0);
1059 }
1060 } else if (ha->scb_waitlist.count == IPS_MAX_QUEUE) {
1061 SC->result = DID_BUS_BUSY << 16;
1062 done(SC);
1063
1064 return (0);
1065 }
1066
1067 SC->scsi_done = done;
1068
1069 DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)",
1070 ips_name,
1071 ha->host_num,
1072 SC->cmnd[0],
1073 SC->device->channel, SC->device->id, SC->device->lun);
1074
1075
1076 if ((scmd_channel(SC) > 0)
1077 && (scmd_id(SC) == ha->ha_id[scmd_channel(SC)])) {
1078 SC->result = DID_NO_CONNECT << 16;
1079 done(SC);
1080
1081 return (0);
1082 }
1083
1084 if (ips_is_passthru(SC)) {
1085
1086 ips_copp_wait_item_t *scratch;
1087
1088
1089
1090
1091 pt = (ips_passthru_t *) scsi_sglist(SC);
1092 if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
1093 (pt->CoppCP.cmd.reset.adapter_flag == 1)) {
1094 if (ha->scb_activelist.count != 0) {
1095 SC->result = DID_BUS_BUSY << 16;
1096 done(SC);
1097 return (0);
1098 }
1099 ha->ioctl_reset = 1;
1100 __ips_eh_reset(SC);
1101 SC->result = DID_OK << 16;
1102 SC->scsi_done(SC);
1103 return (0);
1104 }
1105
1106
1107 scratch = kmalloc(sizeof (ips_copp_wait_item_t), GFP_ATOMIC);
1108
1109 if (!scratch) {
1110 SC->result = DID_ERROR << 16;
1111 done(SC);
1112
1113 return (0);
1114 }
1115
1116 scratch->scsi_cmd = SC;
1117 scratch->next = NULL;
1118
1119 ips_putq_copp_tail(&ha->copp_waitlist, scratch);
1120 } else {
1121 ips_putq_wait_tail(&ha->scb_waitlist, SC);
1122 }
1123
1124 ips_next(ha, IPS_INTR_IORL);
1125
1126 return (0);
1127}
1128
1129static DEF_SCSI_QCMD(ips_queue)
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1141 sector_t capacity, int geom[])
1142{
1143 ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
1144 int heads;
1145 int sectors;
1146 int cylinders;
1147
1148 METHOD_TRACE("ips_biosparam", 1);
1149
1150 if (!ha)
1151
1152 return (0);
1153
1154 if (!ha->active)
1155 return (0);
1156
1157 if (!ips_read_adapter_status(ha, IPS_INTR_ON))
1158
1159 return (0);
1160
1161 if ((capacity > 0x400000) && ((ha->enq->ucMiscFlag & 0x8) == 0)) {
1162 heads = IPS_NORM_HEADS;
1163 sectors = IPS_NORM_SECTORS;
1164 } else {
1165 heads = IPS_COMP_HEADS;
1166 sectors = IPS_COMP_SECTORS;
1167 }
1168
1169 cylinders = (unsigned long) capacity / (heads * sectors);
1170
1171 DEBUG_VAR(2, "Geometry: heads: %d, sectors: %d, cylinders: %d",
1172 heads, sectors, cylinders);
1173
1174 geom[0] = heads;
1175 geom[1] = sectors;
1176 geom[2] = cylinders;
1177
1178 return (0);
1179}
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190static int
1191ips_slave_configure(struct scsi_device * SDptr)
1192{
1193 ips_ha_t *ha;
1194 int min;
1195
1196 ha = IPS_HA(SDptr->host);
1197 if (SDptr->tagged_supported && SDptr->type == TYPE_DISK) {
1198 min = ha->max_cmds / 2;
1199 if (ha->enq->ucLogDriveCount <= 2)
1200 min = ha->max_cmds - 1;
1201 scsi_change_queue_depth(SDptr, min);
1202 }
1203
1204 SDptr->skip_ms_page_8 = 1;
1205 SDptr->skip_ms_page_3f = 1;
1206 return 0;
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218static irqreturn_t
1219do_ipsintr(int irq, void *dev_id)
1220{
1221 ips_ha_t *ha;
1222 struct Scsi_Host *host;
1223 int irqstatus;
1224
1225 METHOD_TRACE("do_ipsintr", 2);
1226
1227 ha = (ips_ha_t *) dev_id;
1228 if (!ha)
1229 return IRQ_NONE;
1230 host = ips_sh[ha->host_num];
1231
1232 if (!host) {
1233 (*ha->func.intr) (ha);
1234 return IRQ_HANDLED;
1235 }
1236
1237 spin_lock(host->host_lock);
1238
1239 if (!ha->active) {
1240 spin_unlock(host->host_lock);
1241 return IRQ_HANDLED;
1242 }
1243
1244 irqstatus = (*ha->func.intr) (ha);
1245
1246 spin_unlock(host->host_lock);
1247
1248
1249 ips_next(ha, IPS_INTR_ON);
1250 return IRQ_RETVAL(irqstatus);
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264int
1265ips_intr_copperhead(ips_ha_t * ha)
1266{
1267 ips_stat_t *sp;
1268 ips_scb_t *scb;
1269 IPS_STATUS cstatus;
1270 int intrstatus;
1271
1272 METHOD_TRACE("ips_intr", 2);
1273
1274 if (!ha)
1275 return 0;
1276
1277 if (!ha->active)
1278 return 0;
1279
1280 intrstatus = (*ha->func.isintr) (ha);
1281
1282 if (!intrstatus) {
1283
1284
1285
1286
1287 return 0;
1288 }
1289
1290 while (TRUE) {
1291 sp = &ha->sp;
1292
1293 intrstatus = (*ha->func.isintr) (ha);
1294
1295 if (!intrstatus)
1296 break;
1297 else
1298 cstatus.value = (*ha->func.statupd) (ha);
1299
1300 if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
1301
1302 continue;
1303 }
1304
1305 ips_chkstatus(ha, &cstatus);
1306 scb = (ips_scb_t *) sp->scb_addr;
1307
1308
1309
1310
1311
1312 (*scb->callback) (ha, scb);
1313 }
1314 return 1;
1315}
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328int
1329ips_intr_morpheus(ips_ha_t * ha)
1330{
1331 ips_stat_t *sp;
1332 ips_scb_t *scb;
1333 IPS_STATUS cstatus;
1334 int intrstatus;
1335
1336 METHOD_TRACE("ips_intr_morpheus", 2);
1337
1338 if (!ha)
1339 return 0;
1340
1341 if (!ha->active)
1342 return 0;
1343
1344 intrstatus = (*ha->func.isintr) (ha);
1345
1346 if (!intrstatus) {
1347
1348
1349
1350
1351 return 0;
1352 }
1353
1354 while (TRUE) {
1355 sp = &ha->sp;
1356
1357 intrstatus = (*ha->func.isintr) (ha);
1358
1359 if (!intrstatus)
1360 break;
1361 else
1362 cstatus.value = (*ha->func.statupd) (ha);
1363
1364 if (cstatus.value == 0xffffffff)
1365
1366 break;
1367
1368 if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
1369 IPS_PRINTK(KERN_WARNING, ha->pcidev,
1370 "Spurious interrupt; no ccb.\n");
1371
1372 continue;
1373 }
1374
1375 ips_chkstatus(ha, &cstatus);
1376 scb = (ips_scb_t *) sp->scb_addr;
1377
1378
1379
1380
1381
1382 (*scb->callback) (ha, scb);
1383 }
1384 return 1;
1385}
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396static const char *
1397ips_info(struct Scsi_Host *SH)
1398{
1399 static char buffer[256];
1400 char *bp;
1401 ips_ha_t *ha;
1402
1403 METHOD_TRACE("ips_info", 1);
1404
1405 ha = IPS_HA(SH);
1406
1407 if (!ha)
1408 return (NULL);
1409
1410 bp = &buffer[0];
1411 memset(bp, 0, sizeof (buffer));
1412
1413 sprintf(bp, "%s%s%s Build %d", "IBM PCI ServeRAID ",
1414 IPS_VERSION_HIGH, IPS_VERSION_LOW, IPS_BUILD_IDENT);
1415
1416 if (ha->ad_type > 0 && ha->ad_type <= MAX_ADAPTER_NAME) {
1417 strcat(bp, " <");
1418 strcat(bp, ips_adapter_name[ha->ad_type - 1]);
1419 strcat(bp, ">");
1420 }
1421
1422 return (bp);
1423}
1424
1425static int
1426ips_write_info(struct Scsi_Host *host, char *buffer, int length)
1427{
1428 int i;
1429 ips_ha_t *ha = NULL;
1430
1431
1432 for (i = 0; i < ips_next_controller; i++) {
1433 if (ips_sh[i]) {
1434 if (ips_sh[i] == host) {
1435 ha = (ips_ha_t *) ips_sh[i]->hostdata;
1436 break;
1437 }
1438 }
1439 }
1440
1441 if (!ha)
1442 return (-EINVAL);
1443
1444 return 0;
1445}
1446
1447static int
1448ips_show_info(struct seq_file *m, struct Scsi_Host *host)
1449{
1450 int i;
1451 ips_ha_t *ha = NULL;
1452
1453
1454 for (i = 0; i < ips_next_controller; i++) {
1455 if (ips_sh[i]) {
1456 if (ips_sh[i] == host) {
1457 ha = (ips_ha_t *) ips_sh[i]->hostdata;
1458 break;
1459 }
1460 }
1461 }
1462
1463 if (!ha)
1464 return (-EINVAL);
1465
1466 return ips_host_info(ha, m);
1467}
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482static int ips_is_passthru(struct scsi_cmnd *SC)
1483{
1484 unsigned long flags;
1485
1486 METHOD_TRACE("ips_is_passthru", 1);
1487
1488 if (!SC)
1489 return (0);
1490
1491 if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
1492 (SC->device->channel == 0) &&
1493 (SC->device->id == IPS_ADAPTER_ID) &&
1494 (SC->device->lun == 0) && scsi_sglist(SC)) {
1495 struct scatterlist *sg = scsi_sglist(SC);
1496 char *buffer;
1497
1498
1499
1500 local_irq_save(flags);
1501 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
1502 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
1503 buffer[2] == 'P' && buffer[3] == 'P') {
1504 kunmap_atomic(buffer - sg->offset);
1505 local_irq_restore(flags);
1506 return 1;
1507 }
1508 kunmap_atomic(buffer - sg->offset);
1509 local_irq_restore(flags);
1510 }
1511 return 0;
1512}
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522static int
1523ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
1524{
1525 void *bigger_buf;
1526 dma_addr_t dma_busaddr;
1527
1528 if (ha->ioctl_data && length <= ha->ioctl_len)
1529 return 0;
1530
1531 bigger_buf = dma_alloc_coherent(&ha->pcidev->dev, length, &dma_busaddr,
1532 GFP_KERNEL);
1533 if (bigger_buf) {
1534
1535 dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
1536 ha->ioctl_data, ha->ioctl_busaddr);
1537
1538 ha->ioctl_data = (char *) bigger_buf;
1539 ha->ioctl_len = length;
1540 ha->ioctl_busaddr = dma_busaddr;
1541 } else {
1542 return -1;
1543 }
1544 return 0;
1545}
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556static int
1557ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
1558{
1559 ips_passthru_t *pt;
1560 int length = 0;
1561 int i, ret;
1562 struct scatterlist *sg = scsi_sglist(SC);
1563
1564 METHOD_TRACE("ips_make_passthru", 1);
1565
1566 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
1567 length += sg->length;
1568
1569 if (length < sizeof (ips_passthru_t)) {
1570
1571 DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
1572 ips_name, ha->host_num);
1573 return (IPS_FAILURE);
1574 }
1575 if (ips_alloc_passthru_buffer(ha, length)) {
1576
1577
1578 if (ha->ioctl_data) {
1579 pt = (ips_passthru_t *) ha->ioctl_data;
1580 ips_scmd_buf_read(SC, pt, sizeof (ips_passthru_t));
1581 pt->BasicStatus = 0x0B;
1582 pt->ExtendedStatus = 0x00;
1583 ips_scmd_buf_write(SC, pt, sizeof (ips_passthru_t));
1584 }
1585 return IPS_FAILURE;
1586 }
1587 ha->ioctl_datasize = length;
1588
1589 ips_scmd_buf_read(SC, ha->ioctl_data, ha->ioctl_datasize);
1590 pt = (ips_passthru_t *) ha->ioctl_data;
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602 switch (pt->CoppCmd) {
1603 case IPS_NUMCTRLS:
1604 memcpy(ha->ioctl_data + sizeof (ips_passthru_t),
1605 &ips_num_controllers, sizeof (int));
1606 ips_scmd_buf_write(SC, ha->ioctl_data,
1607 sizeof (ips_passthru_t) + sizeof (int));
1608 SC->result = DID_OK << 16;
1609
1610 return (IPS_SUCCESS_IMM);
1611
1612 case IPS_COPPUSRCMD:
1613 case IPS_COPPIOCCMD:
1614 if (SC->cmnd[0] == IPS_IOCTL_COMMAND) {
1615 if (length < (sizeof (ips_passthru_t) + pt->CmdBSize)) {
1616
1617 DEBUG_VAR(1,
1618 "(%s%d) Passthru structure wrong size",
1619 ips_name, ha->host_num);
1620
1621 return (IPS_FAILURE);
1622 }
1623
1624 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
1625 pt->CoppCP.cmd.flashfw.op_code ==
1626 IPS_CMD_RW_BIOSFW) {
1627 ret = ips_flash_copperhead(ha, pt, scb);
1628 ips_scmd_buf_write(SC, ha->ioctl_data,
1629 sizeof (ips_passthru_t));
1630 return ret;
1631 }
1632 if (ips_usrcmd(ha, pt, scb))
1633 return (IPS_SUCCESS);
1634 else
1635 return (IPS_FAILURE);
1636 }
1637
1638 break;
1639
1640 }
1641
1642 return (IPS_FAILURE);
1643}
1644
1645
1646
1647
1648
1649
1650static int
1651ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1652{
1653 int datasize;
1654
1655
1656
1657 if (IPS_IS_TROMBONE(ha) && pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) {
1658 if (ips_usrcmd(ha, pt, scb))
1659 return IPS_SUCCESS;
1660 else
1661 return IPS_FAILURE;
1662 }
1663 pt->BasicStatus = 0x0B;
1664 pt->ExtendedStatus = 0;
1665 scb->scsi_cmd->result = DID_OK << 16;
1666
1667
1668 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1669 pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
1670 pt->BasicStatus = 0;
1671 return ips_flash_bios(ha, pt, scb);
1672 } else if (pt->CoppCP.cmd.flashfw.packet_num == 0) {
1673 if (ips_FlashData && !test_and_set_bit(0, &ips_FlashDataInUse)){
1674 ha->flash_data = ips_FlashData;
1675 ha->flash_busaddr = ips_flashbusaddr;
1676 ha->flash_len = PAGE_SIZE << 7;
1677 ha->flash_datasize = 0;
1678 } else if (!ha->flash_data) {
1679 datasize = pt->CoppCP.cmd.flashfw.total_packets *
1680 pt->CoppCP.cmd.flashfw.count;
1681 ha->flash_data = dma_alloc_coherent(&ha->pcidev->dev,
1682 datasize, &ha->flash_busaddr, GFP_KERNEL);
1683 if (!ha->flash_data){
1684 printk(KERN_WARNING "Unable to allocate a flash buffer\n");
1685 return IPS_FAILURE;
1686 }
1687 ha->flash_datasize = 0;
1688 ha->flash_len = datasize;
1689 } else
1690 return IPS_FAILURE;
1691 } else {
1692 if (pt->CoppCP.cmd.flashfw.count + ha->flash_datasize >
1693 ha->flash_len) {
1694 ips_free_flash_copperhead(ha);
1695 IPS_PRINTK(KERN_WARNING, ha->pcidev,
1696 "failed size sanity check\n");
1697 return IPS_FAILURE;
1698 }
1699 }
1700 if (!ha->flash_data)
1701 return IPS_FAILURE;
1702 pt->BasicStatus = 0;
1703 memcpy(&ha->flash_data[ha->flash_datasize], pt + 1,
1704 pt->CoppCP.cmd.flashfw.count);
1705 ha->flash_datasize += pt->CoppCP.cmd.flashfw.count;
1706 if (pt->CoppCP.cmd.flashfw.packet_num ==
1707 pt->CoppCP.cmd.flashfw.total_packets - 1) {
1708 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE)
1709 return ips_flash_bios(ha, pt, scb);
1710 else if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE)
1711 return ips_flash_firmware(ha, pt, scb);
1712 }
1713 return IPS_SUCCESS_IMM;
1714}
1715
1716
1717
1718
1719
1720
1721static int
1722ips_flash_bios(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1723{
1724
1725 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1726 pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_BIOS) {
1727 if ((!ha->func.programbios) || (!ha->func.erasebios) ||
1728 (!ha->func.verifybios))
1729 goto error;
1730 if ((*ha->func.erasebios) (ha)) {
1731 DEBUG_VAR(1,
1732 "(%s%d) flash bios failed - unable to erase flash",
1733 ips_name, ha->host_num);
1734 goto error;
1735 } else
1736 if ((*ha->func.programbios) (ha,
1737 ha->flash_data +
1738 IPS_BIOS_HEADER,
1739 ha->flash_datasize -
1740 IPS_BIOS_HEADER, 0)) {
1741 DEBUG_VAR(1,
1742 "(%s%d) flash bios failed - unable to flash",
1743 ips_name, ha->host_num);
1744 goto error;
1745 } else
1746 if ((*ha->func.verifybios) (ha,
1747 ha->flash_data +
1748 IPS_BIOS_HEADER,
1749 ha->flash_datasize -
1750 IPS_BIOS_HEADER, 0)) {
1751 DEBUG_VAR(1,
1752 "(%s%d) flash bios failed - unable to verify flash",
1753 ips_name, ha->host_num);
1754 goto error;
1755 }
1756 ips_free_flash_copperhead(ha);
1757 return IPS_SUCCESS_IMM;
1758 } else if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1759 pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
1760 if (!ha->func.erasebios)
1761 goto error;
1762 if ((*ha->func.erasebios) (ha)) {
1763 DEBUG_VAR(1,
1764 "(%s%d) flash bios failed - unable to erase flash",
1765 ips_name, ha->host_num);
1766 goto error;
1767 }
1768 return IPS_SUCCESS_IMM;
1769 }
1770 error:
1771 pt->BasicStatus = 0x0B;
1772 pt->ExtendedStatus = 0x00;
1773 ips_free_flash_copperhead(ha);
1774 return IPS_FAILURE;
1775}
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785static int
1786ips_fill_scb_sg_single(ips_ha_t * ha, dma_addr_t busaddr,
1787 ips_scb_t * scb, int indx, unsigned int e_len)
1788{
1789
1790 int ret_val = 0;
1791
1792 if ((scb->data_len + e_len) > ha->max_xfer) {
1793 e_len = ha->max_xfer - scb->data_len;
1794 scb->breakup = indx;
1795 ++scb->sg_break;
1796 ret_val = -1;
1797 } else {
1798 scb->breakup = 0;
1799 scb->sg_break = 0;
1800 }
1801 if (IPS_USE_ENH_SGLIST(ha)) {
1802 scb->sg_list.enh_list[indx].address_lo =
1803 cpu_to_le32(lower_32_bits(busaddr));
1804 scb->sg_list.enh_list[indx].address_hi =
1805 cpu_to_le32(upper_32_bits(busaddr));
1806 scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len);
1807 } else {
1808 scb->sg_list.std_list[indx].address =
1809 cpu_to_le32(lower_32_bits(busaddr));
1810 scb->sg_list.std_list[indx].length = cpu_to_le32(e_len);
1811 }
1812
1813 ++scb->sg_len;
1814 scb->data_len += e_len;
1815 return ret_val;
1816}
1817
1818
1819
1820
1821
1822
1823static int
1824ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1825{
1826 IPS_SG_LIST sg_list;
1827 uint32_t cmd_busaddr;
1828
1829 if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE &&
1830 pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_FW) {
1831 memset(&pt->CoppCP.cmd, 0, sizeof (IPS_HOST_COMMAND));
1832 pt->CoppCP.cmd.flashfw.op_code = IPS_CMD_DOWNLOAD;
1833 pt->CoppCP.cmd.flashfw.count = cpu_to_le32(ha->flash_datasize);
1834 } else {
1835 pt->BasicStatus = 0x0B;
1836 pt->ExtendedStatus = 0x00;
1837 ips_free_flash_copperhead(ha);
1838 return IPS_FAILURE;
1839 }
1840
1841 sg_list.list = scb->sg_list.list;
1842 cmd_busaddr = scb->scb_busaddr;
1843
1844 memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
1845
1846 scb->sg_list.list = sg_list.list;
1847 scb->scb_busaddr = cmd_busaddr;
1848 scb->bus = scb->scsi_cmd->device->channel;
1849 scb->target_id = scb->scsi_cmd->device->id;
1850 scb->lun = scb->scsi_cmd->device->lun;
1851 scb->sg_len = 0;
1852 scb->data_len = 0;
1853 scb->flags = 0;
1854 scb->op_code = 0;
1855 scb->callback = ipsintr_done;
1856 scb->timeout = ips_cmd_timeout;
1857
1858 scb->data_len = ha->flash_datasize;
1859 scb->data_busaddr =
1860 dma_map_single(&ha->pcidev->dev, ha->flash_data, scb->data_len,
1861 IPS_DMA_DIR(scb));
1862 scb->flags |= IPS_SCB_MAP_SINGLE;
1863 scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
1864 scb->cmd.flashfw.buffer_addr = cpu_to_le32(scb->data_busaddr);
1865 if (pt->TimeOut)
1866 scb->timeout = pt->TimeOut;
1867 scb->scsi_cmd->result = DID_OK << 16;
1868 return IPS_SUCCESS;
1869}
1870
1871
1872
1873
1874
1875
1876static void
1877ips_free_flash_copperhead(ips_ha_t * ha)
1878{
1879 if (ha->flash_data == ips_FlashData)
1880 test_and_clear_bit(0, &ips_FlashDataInUse);
1881 else if (ha->flash_data)
1882 dma_free_coherent(&ha->pcidev->dev, ha->flash_len,
1883 ha->flash_data, ha->flash_busaddr);
1884 ha->flash_data = NULL;
1885}
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896static int
1897ips_usrcmd(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1898{
1899 IPS_SG_LIST sg_list;
1900 uint32_t cmd_busaddr;
1901
1902 METHOD_TRACE("ips_usrcmd", 1);
1903
1904 if ((!scb) || (!pt) || (!ha))
1905 return (0);
1906
1907
1908 sg_list.list = scb->sg_list.list;
1909 cmd_busaddr = scb->scb_busaddr;
1910
1911 memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
1912 memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof (IPS_DCDB_TABLE));
1913
1914
1915 scb->sg_list.list = sg_list.list;
1916 scb->scb_busaddr = cmd_busaddr;
1917 scb->bus = scb->scsi_cmd->device->channel;
1918 scb->target_id = scb->scsi_cmd->device->id;
1919 scb->lun = scb->scsi_cmd->device->lun;
1920 scb->sg_len = 0;
1921 scb->data_len = 0;
1922 scb->flags = 0;
1923 scb->op_code = 0;
1924 scb->callback = ipsintr_done;
1925 scb->timeout = ips_cmd_timeout;
1926 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
1927
1928
1929 if ((scb->cmd.basic_io.op_code == IPS_CMD_READ_SG) ||
1930 (scb->cmd.basic_io.op_code == IPS_CMD_WRITE_SG) ||
1931 (scb->cmd.basic_io.op_code == IPS_CMD_DCDB_SG))
1932 return (0);
1933
1934 if (pt->CmdBSize) {
1935 scb->data_len = pt->CmdBSize;
1936 scb->data_busaddr = ha->ioctl_busaddr + sizeof (ips_passthru_t);
1937 } else {
1938 scb->data_busaddr = 0L;
1939 }
1940
1941 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
1942 scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
1943 (unsigned long) &scb->
1944 dcdb -
1945 (unsigned long) scb);
1946
1947 if (pt->CmdBSize) {
1948 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
1949 scb->dcdb.buffer_pointer =
1950 cpu_to_le32(scb->data_busaddr);
1951 else
1952 scb->cmd.basic_io.sg_addr =
1953 cpu_to_le32(scb->data_busaddr);
1954 }
1955
1956
1957 if (pt->TimeOut) {
1958 scb->timeout = pt->TimeOut;
1959
1960 if (pt->TimeOut <= 10)
1961 scb->dcdb.cmd_attribute |= IPS_TIMEOUT10;
1962 else if (pt->TimeOut <= 60)
1963 scb->dcdb.cmd_attribute |= IPS_TIMEOUT60;
1964 else
1965 scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M;
1966 }
1967
1968
1969 scb->scsi_cmd->result = DID_OK << 16;
1970
1971
1972 return (1);
1973}
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984static void
1985ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
1986{
1987 ips_passthru_t *pt;
1988
1989 METHOD_TRACE("ips_cleanup_passthru", 1);
1990
1991 if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
1992 DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
1993 ips_name, ha->host_num);
1994
1995 return;
1996 }
1997 pt = (ips_passthru_t *) ha->ioctl_data;
1998
1999
2000 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
2001 memcpy(&pt->CoppCP.dcdb, &scb->dcdb, sizeof (IPS_DCDB_TABLE));
2002
2003 pt->BasicStatus = scb->basic_status;
2004 pt->ExtendedStatus = scb->extended_status;
2005 pt->AdapterType = ha->ad_type;
2006
2007 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
2008 (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD ||
2009 scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW))
2010 ips_free_flash_copperhead(ha);
2011
2012 ips_scmd_buf_write(scb->scsi_cmd, ha->ioctl_data, ha->ioctl_datasize);
2013}
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024static int
2025ips_host_info(ips_ha_t *ha, struct seq_file *m)
2026{
2027 METHOD_TRACE("ips_host_info", 1);
2028
2029 seq_puts(m, "\nIBM ServeRAID General Information:\n\n");
2030
2031 if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) &&
2032 (le16_to_cpu(ha->nvram->adapter_type) != 0))
2033 seq_printf(m, "\tController Type : %s\n",
2034 ips_adapter_name[ha->ad_type - 1]);
2035 else
2036 seq_puts(m, "\tController Type : Unknown\n");
2037
2038 if (ha->io_addr)
2039 seq_printf(m,
2040 "\tIO region : 0x%x (%d bytes)\n",
2041 ha->io_addr, ha->io_len);
2042
2043 if (ha->mem_addr) {
2044 seq_printf(m,
2045 "\tMemory region : 0x%x (%d bytes)\n",
2046 ha->mem_addr, ha->mem_len);
2047 seq_printf(m,
2048 "\tShared memory address : 0x%lx\n",
2049 (unsigned long)ha->mem_ptr);
2050 }
2051
2052 seq_printf(m, "\tIRQ number : %d\n", ha->pcidev->irq);
2053
2054
2055
2056
2057 if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) {
2058 if (ha->nvram->bios_low[3] == 0) {
2059 seq_printf(m,
2060 "\tBIOS Version : %c%c%c%c%c%c%c\n",
2061 ha->nvram->bios_high[0], ha->nvram->bios_high[1],
2062 ha->nvram->bios_high[2], ha->nvram->bios_high[3],
2063 ha->nvram->bios_low[0], ha->nvram->bios_low[1],
2064 ha->nvram->bios_low[2]);
2065
2066 } else {
2067 seq_printf(m,
2068 "\tBIOS Version : %c%c%c%c%c%c%c%c\n",
2069 ha->nvram->bios_high[0], ha->nvram->bios_high[1],
2070 ha->nvram->bios_high[2], ha->nvram->bios_high[3],
2071 ha->nvram->bios_low[0], ha->nvram->bios_low[1],
2072 ha->nvram->bios_low[2], ha->nvram->bios_low[3]);
2073 }
2074
2075 }
2076
2077 if (ha->enq->CodeBlkVersion[7] == 0) {
2078 seq_printf(m,
2079 "\tFirmware Version : %c%c%c%c%c%c%c\n",
2080 ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
2081 ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
2082 ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
2083 ha->enq->CodeBlkVersion[6]);
2084 } else {
2085 seq_printf(m,
2086 "\tFirmware Version : %c%c%c%c%c%c%c%c\n",
2087 ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
2088 ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
2089 ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
2090 ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]);
2091 }
2092
2093 if (ha->enq->BootBlkVersion[7] == 0) {
2094 seq_printf(m,
2095 "\tBoot Block Version : %c%c%c%c%c%c%c\n",
2096 ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
2097 ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
2098 ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
2099 ha->enq->BootBlkVersion[6]);
2100 } else {
2101 seq_printf(m,
2102 "\tBoot Block Version : %c%c%c%c%c%c%c%c\n",
2103 ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
2104 ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
2105 ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
2106 ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]);
2107 }
2108
2109 seq_printf(m, "\tDriver Version : %s%s\n",
2110 IPS_VERSION_HIGH, IPS_VERSION_LOW);
2111
2112 seq_printf(m, "\tDriver Build : %d\n",
2113 IPS_BUILD_IDENT);
2114
2115 seq_printf(m, "\tMax Physical Devices : %d\n",
2116 ha->enq->ucMaxPhysicalDevices);
2117 seq_printf(m, "\tMax Active Commands : %d\n",
2118 ha->max_cmds);
2119 seq_printf(m, "\tCurrent Queued Commands : %d\n",
2120 ha->scb_waitlist.count);
2121 seq_printf(m, "\tCurrent Active Commands : %d\n",
2122 ha->scb_activelist.count - ha->num_ioctl);
2123 seq_printf(m, "\tCurrent Queued PT Commands : %d\n",
2124 ha->copp_waitlist.count);
2125 seq_printf(m, "\tCurrent Active PT Commands : %d\n",
2126 ha->num_ioctl);
2127
2128 seq_putc(m, '\n');
2129
2130 return 0;
2131}
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142static void
2143ips_identify_controller(ips_ha_t * ha)
2144{
2145 METHOD_TRACE("ips_identify_controller", 1);
2146
2147 switch (ha->pcidev->device) {
2148 case IPS_DEVICEID_COPPERHEAD:
2149 if (ha->pcidev->revision <= IPS_REVID_SERVERAID) {
2150 ha->ad_type = IPS_ADTYPE_SERVERAID;
2151 } else if (ha->pcidev->revision == IPS_REVID_SERVERAID2) {
2152 ha->ad_type = IPS_ADTYPE_SERVERAID2;
2153 } else if (ha->pcidev->revision == IPS_REVID_NAVAJO) {
2154 ha->ad_type = IPS_ADTYPE_NAVAJO;
2155 } else if ((ha->pcidev->revision == IPS_REVID_SERVERAID2)
2156 && (ha->slot_num == 0)) {
2157 ha->ad_type = IPS_ADTYPE_KIOWA;
2158 } else if ((ha->pcidev->revision >= IPS_REVID_CLARINETP1) &&
2159 (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) {
2160 if (ha->enq->ucMaxPhysicalDevices == 15)
2161 ha->ad_type = IPS_ADTYPE_SERVERAID3L;
2162 else
2163 ha->ad_type = IPS_ADTYPE_SERVERAID3;
2164 } else if ((ha->pcidev->revision >= IPS_REVID_TROMBONE32) &&
2165 (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) {
2166 ha->ad_type = IPS_ADTYPE_SERVERAID4H;
2167 }
2168 break;
2169
2170 case IPS_DEVICEID_MORPHEUS:
2171 switch (ha->pcidev->subsystem_device) {
2172 case IPS_SUBDEVICEID_4L:
2173 ha->ad_type = IPS_ADTYPE_SERVERAID4L;
2174 break;
2175
2176 case IPS_SUBDEVICEID_4M:
2177 ha->ad_type = IPS_ADTYPE_SERVERAID4M;
2178 break;
2179
2180 case IPS_SUBDEVICEID_4MX:
2181 ha->ad_type = IPS_ADTYPE_SERVERAID4MX;
2182 break;
2183
2184 case IPS_SUBDEVICEID_4LX:
2185 ha->ad_type = IPS_ADTYPE_SERVERAID4LX;
2186 break;
2187
2188 case IPS_SUBDEVICEID_5I2:
2189 ha->ad_type = IPS_ADTYPE_SERVERAID5I2;
2190 break;
2191
2192 case IPS_SUBDEVICEID_5I1:
2193 ha->ad_type = IPS_ADTYPE_SERVERAID5I1;
2194 break;
2195 }
2196
2197 break;
2198
2199 case IPS_DEVICEID_MARCO:
2200 switch (ha->pcidev->subsystem_device) {
2201 case IPS_SUBDEVICEID_6M:
2202 ha->ad_type = IPS_ADTYPE_SERVERAID6M;
2203 break;
2204 case IPS_SUBDEVICEID_6I:
2205 ha->ad_type = IPS_ADTYPE_SERVERAID6I;
2206 break;
2207 case IPS_SUBDEVICEID_7k:
2208 ha->ad_type = IPS_ADTYPE_SERVERAID7k;
2209 break;
2210 case IPS_SUBDEVICEID_7M:
2211 ha->ad_type = IPS_ADTYPE_SERVERAID7M;
2212 break;
2213 }
2214 break;
2215 }
2216}
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227static void
2228ips_get_bios_version(ips_ha_t * ha, int intr)
2229{
2230 ips_scb_t *scb;
2231 int ret;
2232 uint8_t major;
2233 uint8_t minor;
2234 uint8_t subminor;
2235 uint8_t *buffer;
2236
2237 METHOD_TRACE("ips_get_bios_version", 1);
2238
2239 major = 0;
2240 minor = 0;
2241
2242 memcpy(ha->bios_version, " ?", 8);
2243
2244 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) {
2245 if (IPS_USE_MEMIO(ha)) {
2246
2247
2248
2249 writel(0, ha->mem_ptr + IPS_REG_FLAP);
2250 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2251 udelay(25);
2252
2253 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
2254 return;
2255
2256 writel(1, ha->mem_ptr + IPS_REG_FLAP);
2257 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2258 udelay(25);
2259
2260 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
2261 return;
2262
2263
2264 writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP);
2265 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2266 udelay(25);
2267
2268 major = readb(ha->mem_ptr + IPS_REG_FLDP);
2269
2270
2271 writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP);
2272 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2273 udelay(25);
2274 minor = readb(ha->mem_ptr + IPS_REG_FLDP);
2275
2276
2277 writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP);
2278 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2279 udelay(25);
2280 subminor = readb(ha->mem_ptr + IPS_REG_FLDP);
2281
2282 } else {
2283
2284
2285
2286 outl(0, ha->io_addr + IPS_REG_FLAP);
2287 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2288 udelay(25);
2289
2290 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
2291 return;
2292
2293 outl(1, ha->io_addr + IPS_REG_FLAP);
2294 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2295 udelay(25);
2296
2297 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
2298 return;
2299
2300
2301 outl(0x1FF, ha->io_addr + IPS_REG_FLAP);
2302 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2303 udelay(25);
2304
2305 major = inb(ha->io_addr + IPS_REG_FLDP);
2306
2307
2308 outl(0x1FE, ha->io_addr + IPS_REG_FLAP);
2309 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2310 udelay(25);
2311
2312 minor = inb(ha->io_addr + IPS_REG_FLDP);
2313
2314
2315 outl(0x1FD, ha->io_addr + IPS_REG_FLAP);
2316 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2317 udelay(25);
2318
2319 subminor = inb(ha->io_addr + IPS_REG_FLDP);
2320
2321 }
2322 } else {
2323
2324
2325 buffer = ha->ioctl_data;
2326
2327 memset(buffer, 0, 0x1000);
2328
2329 scb = &ha->scbs[ha->max_cmds - 1];
2330
2331 ips_init_scb(ha, scb);
2332
2333 scb->timeout = ips_cmd_timeout;
2334 scb->cdb[0] = IPS_CMD_RW_BIOSFW;
2335
2336 scb->cmd.flashfw.op_code = IPS_CMD_RW_BIOSFW;
2337 scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
2338 scb->cmd.flashfw.type = 1;
2339 scb->cmd.flashfw.direction = 0;
2340 scb->cmd.flashfw.count = cpu_to_le32(0x800);
2341 scb->cmd.flashfw.total_packets = 1;
2342 scb->cmd.flashfw.packet_num = 0;
2343 scb->data_len = 0x1000;
2344 scb->cmd.flashfw.buffer_addr = ha->ioctl_busaddr;
2345
2346
2347 if (((ret =
2348 ips_send_wait(ha, scb, ips_cmd_timeout,
2349 intr)) == IPS_FAILURE)
2350 || (ret == IPS_SUCCESS_IMM)
2351 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
2352
2353
2354 return;
2355 }
2356
2357 if ((buffer[0xC0] == 0x55) && (buffer[0xC1] == 0xAA)) {
2358 major = buffer[0x1ff + 0xC0];
2359 minor = buffer[0x1fe + 0xC0];
2360 subminor = buffer[0x1fd + 0xC0];
2361 } else {
2362 return;
2363 }
2364 }
2365
2366 ha->bios_version[0] = hex_asc_upper_hi(major);
2367 ha->bios_version[1] = '.';
2368 ha->bios_version[2] = hex_asc_upper_lo(major);
2369 ha->bios_version[3] = hex_asc_upper_lo(subminor);
2370 ha->bios_version[4] = '.';
2371 ha->bios_version[5] = hex_asc_upper_hi(minor);
2372 ha->bios_version[6] = hex_asc_upper_lo(minor);
2373 ha->bios_version[7] = 0;
2374}
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387static int
2388ips_hainit(ips_ha_t * ha)
2389{
2390 int i;
2391
2392 METHOD_TRACE("ips_hainit", 1);
2393
2394 if (!ha)
2395 return (0);
2396
2397 if (ha->func.statinit)
2398 (*ha->func.statinit) (ha);
2399
2400 if (ha->func.enableint)
2401 (*ha->func.enableint) (ha);
2402
2403
2404 ha->reset_count = 1;
2405 ha->last_ffdc = ktime_get_real_seconds();
2406 ips_ffdc_reset(ha, IPS_INTR_IORL);
2407
2408 if (!ips_read_config(ha, IPS_INTR_IORL)) {
2409 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2410 "unable to read config from controller.\n");
2411
2412 return (0);
2413 }
2414
2415 if (!ips_read_adapter_status(ha, IPS_INTR_IORL)) {
2416 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2417 "unable to read controller status.\n");
2418
2419 return (0);
2420 }
2421
2422
2423 ips_identify_controller(ha);
2424
2425 if (!ips_read_subsystem_parameters(ha, IPS_INTR_IORL)) {
2426 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2427 "unable to read subsystem parameters.\n");
2428
2429 return (0);
2430 }
2431
2432
2433 if (!ips_write_driver_status(ha, IPS_INTR_IORL)) {
2434 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2435 "unable to write driver info to controller.\n");
2436
2437 return (0);
2438 }
2439
2440
2441 if ((ha->conf->ucLogDriveCount > 0) && (ha->requires_esl == 1))
2442 ips_clear_adapter(ha, IPS_INTR_IORL);
2443
2444
2445 ha->ntargets = IPS_MAX_TARGETS + 1;
2446 ha->nlun = 1;
2447 ha->nbus = (ha->enq->ucMaxPhysicalDevices / IPS_MAX_TARGETS) + 1;
2448
2449 switch (ha->conf->logical_drive[0].ucStripeSize) {
2450 case 4:
2451 ha->max_xfer = 0x10000;
2452 break;
2453
2454 case 5:
2455 ha->max_xfer = 0x20000;
2456 break;
2457
2458 case 6:
2459 ha->max_xfer = 0x40000;
2460 break;
2461
2462 case 7:
2463 default:
2464 ha->max_xfer = 0x80000;
2465 break;
2466 }
2467
2468
2469 if (le32_to_cpu(ha->subsys->param[4]) & 0x1) {
2470
2471 ha->max_cmds = ha->enq->ucConcurrentCmdCount;
2472 } else {
2473
2474 switch (ha->conf->logical_drive[0].ucStripeSize) {
2475 case 4:
2476 ha->max_cmds = 32;
2477 break;
2478
2479 case 5:
2480 ha->max_cmds = 16;
2481 break;
2482
2483 case 6:
2484 ha->max_cmds = 8;
2485 break;
2486
2487 case 7:
2488 default:
2489 ha->max_cmds = 4;
2490 break;
2491 }
2492 }
2493
2494
2495 if ((ha->ad_type == IPS_ADTYPE_SERVERAID3L) ||
2496 (ha->ad_type == IPS_ADTYPE_SERVERAID4L) ||
2497 (ha->ad_type == IPS_ADTYPE_SERVERAID4LX)) {
2498 if ((ha->max_cmds > MaxLiteCmds) && (MaxLiteCmds))
2499 ha->max_cmds = MaxLiteCmds;
2500 }
2501
2502
2503 ha->ha_id[0] = IPS_ADAPTER_ID;
2504 for (i = 1; i < ha->nbus; i++) {
2505 ha->ha_id[i] = ha->conf->init_id[i - 1] & 0x1f;
2506 ha->dcdb_active[i - 1] = 0;
2507 }
2508
2509 return (1);
2510}
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521static void
2522ips_next(ips_ha_t * ha, int intr)
2523{
2524 ips_scb_t *scb;
2525 struct scsi_cmnd *SC;
2526 struct scsi_cmnd *p;
2527 struct scsi_cmnd *q;
2528 ips_copp_wait_item_t *item;
2529 int ret;
2530 struct Scsi_Host *host;
2531 METHOD_TRACE("ips_next", 1);
2532
2533 if (!ha)
2534 return;
2535 host = ips_sh[ha->host_num];
2536
2537
2538
2539
2540 if (intr == IPS_INTR_ON)
2541 spin_lock(host->host_lock);
2542
2543 if ((ha->subsys->param[3] & 0x300000)
2544 && (ha->scb_activelist.count == 0)) {
2545 time64_t now = ktime_get_real_seconds();
2546 if (now - ha->last_ffdc > IPS_SECS_8HOURS) {
2547 ha->last_ffdc = now;
2548 ips_ffdc_time(ha);
2549 }
2550 }
2551
2552
2553
2554
2555
2556
2557
2558
2559 while ((ha->num_ioctl < IPS_MAX_IOCTL) &&
2560 (ha->copp_waitlist.head) && (scb = ips_getscb(ha))) {
2561
2562 item = ips_removeq_copp_head(&ha->copp_waitlist);
2563 ha->num_ioctl++;
2564 if (intr == IPS_INTR_ON)
2565 spin_unlock(host->host_lock);
2566 scb->scsi_cmd = item->scsi_cmd;
2567 kfree(item);
2568
2569 ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr);
2570
2571 if (intr == IPS_INTR_ON)
2572 spin_lock(host->host_lock);
2573 switch (ret) {
2574 case IPS_FAILURE:
2575 if (scb->scsi_cmd) {
2576 scb->scsi_cmd->result = DID_ERROR << 16;
2577 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2578 }
2579
2580 ips_freescb(ha, scb);
2581 break;
2582 case IPS_SUCCESS_IMM:
2583 if (scb->scsi_cmd) {
2584 scb->scsi_cmd->result = DID_OK << 16;
2585 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2586 }
2587
2588 ips_freescb(ha, scb);
2589 break;
2590 default:
2591 break;
2592 }
2593
2594 if (ret != IPS_SUCCESS) {
2595 ha->num_ioctl--;
2596 continue;
2597 }
2598
2599 ret = ips_send_cmd(ha, scb);
2600
2601 if (ret == IPS_SUCCESS)
2602 ips_putq_scb_head(&ha->scb_activelist, scb);
2603 else
2604 ha->num_ioctl--;
2605
2606 switch (ret) {
2607 case IPS_FAILURE:
2608 if (scb->scsi_cmd) {
2609 scb->scsi_cmd->result = DID_ERROR << 16;
2610 }
2611
2612 ips_freescb(ha, scb);
2613 break;
2614 case IPS_SUCCESS_IMM:
2615 ips_freescb(ha, scb);
2616 break;
2617 default:
2618 break;
2619 }
2620
2621 }
2622
2623
2624
2625
2626
2627 p = ha->scb_waitlist.head;
2628 while ((p) && (scb = ips_getscb(ha))) {
2629 if ((scmd_channel(p) > 0)
2630 && (ha->
2631 dcdb_active[scmd_channel(p) -
2632 1] & (1 << scmd_id(p)))) {
2633 ips_freescb(ha, scb);
2634 p = (struct scsi_cmnd *) p->host_scribble;
2635 continue;
2636 }
2637
2638 q = p;
2639 SC = ips_removeq_wait(&ha->scb_waitlist, q);
2640
2641 if (intr == IPS_INTR_ON)
2642 spin_unlock(host->host_lock);
2643
2644 SC->result = DID_OK;
2645 SC->host_scribble = NULL;
2646
2647 scb->target_id = SC->device->id;
2648 scb->lun = SC->device->lun;
2649 scb->bus = SC->device->channel;
2650 scb->scsi_cmd = SC;
2651 scb->breakup = 0;
2652 scb->data_len = 0;
2653 scb->callback = ipsintr_done;
2654 scb->timeout = ips_cmd_timeout;
2655 memset(&scb->cmd, 0, 16);
2656
2657
2658 memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
2659
2660 scb->sg_count = scsi_dma_map(SC);
2661 BUG_ON(scb->sg_count < 0);
2662 if (scb->sg_count) {
2663 struct scatterlist *sg;
2664 int i;
2665
2666 scb->flags |= IPS_SCB_MAP_SG;
2667
2668 scsi_for_each_sg(SC, sg, scb->sg_count, i) {
2669 if (ips_fill_scb_sg_single
2670 (ha, sg_dma_address(sg), scb, i,
2671 sg_dma_len(sg)) < 0)
2672 break;
2673 }
2674 scb->dcdb.transfer_length = scb->data_len;
2675 } else {
2676 scb->data_busaddr = 0L;
2677 scb->sg_len = 0;
2678 scb->data_len = 0;
2679 scb->dcdb.transfer_length = 0;
2680 }
2681
2682 scb->dcdb.cmd_attribute =
2683 ips_command_direction[scb->scsi_cmd->cmnd[0]];
2684
2685
2686
2687 if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) &&
2688 (scb->data_len == 0))
2689 scb->dcdb.cmd_attribute = 0;
2690
2691 if (!(scb->dcdb.cmd_attribute & 0x3))
2692 scb->dcdb.transfer_length = 0;
2693
2694 if (scb->data_len >= IPS_MAX_XFER) {
2695 scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
2696 scb->dcdb.transfer_length = 0;
2697 }
2698 if (intr == IPS_INTR_ON)
2699 spin_lock(host->host_lock);
2700
2701 ret = ips_send_cmd(ha, scb);
2702
2703 switch (ret) {
2704 case IPS_SUCCESS:
2705 ips_putq_scb_head(&ha->scb_activelist, scb);
2706 break;
2707 case IPS_FAILURE:
2708 if (scb->scsi_cmd) {
2709 scb->scsi_cmd->result = DID_ERROR << 16;
2710 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2711 }
2712
2713 if (scb->bus)
2714 ha->dcdb_active[scb->bus - 1] &=
2715 ~(1 << scb->target_id);
2716
2717 ips_freescb(ha, scb);
2718 break;
2719 case IPS_SUCCESS_IMM:
2720 if (scb->scsi_cmd)
2721 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2722
2723 if (scb->bus)
2724 ha->dcdb_active[scb->bus - 1] &=
2725 ~(1 << scb->target_id);
2726
2727 ips_freescb(ha, scb);
2728 break;
2729 default:
2730 break;
2731 }
2732
2733 p = (struct scsi_cmnd *) p->host_scribble;
2734
2735 }
2736
2737 if (intr == IPS_INTR_ON)
2738 spin_unlock(host->host_lock);
2739}
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752static void
2753ips_putq_scb_head(ips_scb_queue_t * queue, ips_scb_t * item)
2754{
2755 METHOD_TRACE("ips_putq_scb_head", 1);
2756
2757 if (!item)
2758 return;
2759
2760 item->q_next = queue->head;
2761 queue->head = item;
2762
2763 if (!queue->tail)
2764 queue->tail = item;
2765
2766 queue->count++;
2767}
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780static ips_scb_t *
2781ips_removeq_scb_head(ips_scb_queue_t * queue)
2782{
2783 ips_scb_t *item;
2784
2785 METHOD_TRACE("ips_removeq_scb_head", 1);
2786
2787 item = queue->head;
2788
2789 if (!item) {
2790 return (NULL);
2791 }
2792
2793 queue->head = item->q_next;
2794 item->q_next = NULL;
2795
2796 if (queue->tail == item)
2797 queue->tail = NULL;
2798
2799 queue->count--;
2800
2801 return (item);
2802}
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815static ips_scb_t *
2816ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item)
2817{
2818 ips_scb_t *p;
2819
2820 METHOD_TRACE("ips_removeq_scb", 1);
2821
2822 if (!item)
2823 return (NULL);
2824
2825 if (item == queue->head) {
2826 return (ips_removeq_scb_head(queue));
2827 }
2828
2829 p = queue->head;
2830
2831 while ((p) && (item != p->q_next))
2832 p = p->q_next;
2833
2834 if (p) {
2835
2836 p->q_next = item->q_next;
2837
2838 if (!item->q_next)
2839 queue->tail = p;
2840
2841 item->q_next = NULL;
2842 queue->count--;
2843
2844 return (item);
2845 }
2846
2847 return (NULL);
2848}
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item)
2862{
2863 METHOD_TRACE("ips_putq_wait_tail", 1);
2864
2865 if (!item)
2866 return;
2867
2868 item->host_scribble = NULL;
2869
2870 if (queue->tail)
2871 queue->tail->host_scribble = (char *) item;
2872
2873 queue->tail = item;
2874
2875 if (!queue->head)
2876 queue->head = item;
2877
2878 queue->count++;
2879}
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue)
2893{
2894 struct scsi_cmnd *item;
2895
2896 METHOD_TRACE("ips_removeq_wait_head", 1);
2897
2898 item = queue->head;
2899
2900 if (!item) {
2901 return (NULL);
2902 }
2903
2904 queue->head = (struct scsi_cmnd *) item->host_scribble;
2905 item->host_scribble = NULL;
2906
2907 if (queue->tail == item)
2908 queue->tail = NULL;
2909
2910 queue->count--;
2911
2912 return (item);
2913}
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue,
2927 struct scsi_cmnd *item)
2928{
2929 struct scsi_cmnd *p;
2930
2931 METHOD_TRACE("ips_removeq_wait", 1);
2932
2933 if (!item)
2934 return (NULL);
2935
2936 if (item == queue->head) {
2937 return (ips_removeq_wait_head(queue));
2938 }
2939
2940 p = queue->head;
2941
2942 while ((p) && (item != (struct scsi_cmnd *) p->host_scribble))
2943 p = (struct scsi_cmnd *) p->host_scribble;
2944
2945 if (p) {
2946
2947 p->host_scribble = item->host_scribble;
2948
2949 if (!item->host_scribble)
2950 queue->tail = p;
2951
2952 item->host_scribble = NULL;
2953 queue->count--;
2954
2955 return (item);
2956 }
2957
2958 return (NULL);
2959}
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972static void
2973ips_putq_copp_tail(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
2974{
2975 METHOD_TRACE("ips_putq_copp_tail", 1);
2976
2977 if (!item)
2978 return;
2979
2980 item->next = NULL;
2981
2982 if (queue->tail)
2983 queue->tail->next = item;
2984
2985 queue->tail = item;
2986
2987 if (!queue->head)
2988 queue->head = item;
2989
2990 queue->count++;
2991}
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004static ips_copp_wait_item_t *
3005ips_removeq_copp_head(ips_copp_queue_t * queue)
3006{
3007 ips_copp_wait_item_t *item;
3008
3009 METHOD_TRACE("ips_removeq_copp_head", 1);
3010
3011 item = queue->head;
3012
3013 if (!item) {
3014 return (NULL);
3015 }
3016
3017 queue->head = item->next;
3018 item->next = NULL;
3019
3020 if (queue->tail == item)
3021 queue->tail = NULL;
3022
3023 queue->count--;
3024
3025 return (item);
3026}
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039static ips_copp_wait_item_t *
3040ips_removeq_copp(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
3041{
3042 ips_copp_wait_item_t *p;
3043
3044 METHOD_TRACE("ips_removeq_copp", 1);
3045
3046 if (!item)
3047 return (NULL);
3048
3049 if (item == queue->head) {
3050 return (ips_removeq_copp_head(queue));
3051 }
3052
3053 p = queue->head;
3054
3055 while ((p) && (item != p->next))
3056 p = p->next;
3057
3058 if (p) {
3059
3060 p->next = item->next;
3061
3062 if (!item->next)
3063 queue->tail = p;
3064
3065 item->next = NULL;
3066 queue->count--;
3067
3068 return (item);
3069 }
3070
3071 return (NULL);
3072}
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083static void
3084ipsintr_blocking(ips_ha_t * ha, ips_scb_t * scb)
3085{
3086 METHOD_TRACE("ipsintr_blocking", 2);
3087
3088 ips_freescb(ha, scb);
3089 if ((ha->waitflag == TRUE) && (ha->cmd_in_progress == scb->cdb[0])) {
3090 ha->waitflag = FALSE;
3091
3092 return;
3093 }
3094}
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105static void
3106ipsintr_done(ips_ha_t * ha, ips_scb_t * scb)
3107{
3108 METHOD_TRACE("ipsintr_done", 2);
3109
3110 if (!scb) {
3111 IPS_PRINTK(KERN_WARNING, ha->pcidev,
3112 "Spurious interrupt; scb NULL.\n");
3113
3114 return;
3115 }
3116
3117 if (scb->scsi_cmd == NULL) {
3118
3119 IPS_PRINTK(KERN_WARNING, ha->pcidev,
3120 "Spurious interrupt; scsi_cmd not set.\n");
3121
3122 return;
3123 }
3124
3125 ips_done(ha, scb);
3126}
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137static void
3138ips_done(ips_ha_t * ha, ips_scb_t * scb)
3139{
3140 int ret;
3141
3142 METHOD_TRACE("ips_done", 1);
3143
3144 if (!scb)
3145 return;
3146
3147 if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) {
3148 ips_cleanup_passthru(ha, scb);
3149 ha->num_ioctl--;
3150 } else {
3151
3152
3153
3154
3155
3156 if ((scb->breakup) || (scb->sg_break)) {
3157 struct scatterlist *sg;
3158 int i, sg_dma_index, ips_sg_index = 0;
3159
3160
3161 scb->data_len = 0;
3162
3163 sg = scsi_sglist(scb->scsi_cmd);
3164
3165
3166 sg_dma_index = scb->breakup;
3167 for (i = 0; i < scb->breakup; i++)
3168 sg = sg_next(sg);
3169
3170
3171 ips_fill_scb_sg_single(ha,
3172 sg_dma_address(sg),
3173 scb, ips_sg_index++,
3174 sg_dma_len(sg));
3175
3176 for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
3177 sg_dma_index++, sg = sg_next(sg)) {
3178 if (ips_fill_scb_sg_single
3179 (ha,
3180 sg_dma_address(sg),
3181 scb, ips_sg_index++,
3182 sg_dma_len(sg)) < 0)
3183 break;
3184 }
3185
3186 scb->dcdb.transfer_length = scb->data_len;
3187 scb->dcdb.cmd_attribute |=
3188 ips_command_direction[scb->scsi_cmd->cmnd[0]];
3189
3190 if (!(scb->dcdb.cmd_attribute & 0x3))
3191 scb->dcdb.transfer_length = 0;
3192
3193 if (scb->data_len >= IPS_MAX_XFER) {
3194 scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
3195 scb->dcdb.transfer_length = 0;
3196 }
3197
3198 ret = ips_send_cmd(ha, scb);
3199
3200 switch (ret) {
3201 case IPS_FAILURE:
3202 if (scb->scsi_cmd) {
3203 scb->scsi_cmd->result = DID_ERROR << 16;
3204 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
3205 }
3206
3207 ips_freescb(ha, scb);
3208 break;
3209 case IPS_SUCCESS_IMM:
3210 if (scb->scsi_cmd) {
3211 scb->scsi_cmd->result = DID_ERROR << 16;
3212 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
3213 }
3214
3215 ips_freescb(ha, scb);
3216 break;
3217 default:
3218 break;
3219 }
3220
3221 return;
3222 }
3223 }
3224
3225 if (scb->bus) {
3226 ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id);
3227 }
3228
3229 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
3230
3231 ips_freescb(ha, scb);
3232}
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243static int
3244ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
3245{
3246 int errcode;
3247 int device_error;
3248 uint32_t transfer_len;
3249 IPS_DCDB_TABLE_TAPE *tapeDCDB;
3250 IPS_SCSI_INQ_DATA inquiryData;
3251
3252 METHOD_TRACE("ips_map_status", 1);
3253
3254 if (scb->bus) {
3255 DEBUG_VAR(2,
3256 "(%s%d) Physical device error (%d %d %d): %x %x, Sense Key: %x, ASC: %x, ASCQ: %x",
3257 ips_name, ha->host_num,
3258 scb->scsi_cmd->device->channel,
3259 scb->scsi_cmd->device->id, scb->scsi_cmd->device->lun,
3260 scb->basic_status, scb->extended_status,
3261 scb->extended_status ==
3262 IPS_ERR_CKCOND ? scb->dcdb.sense_info[2] & 0xf : 0,
3263 scb->extended_status ==
3264 IPS_ERR_CKCOND ? scb->dcdb.sense_info[12] : 0,
3265 scb->extended_status ==
3266 IPS_ERR_CKCOND ? scb->dcdb.sense_info[13] : 0);
3267 }
3268
3269
3270 errcode = DID_ERROR;
3271 device_error = 0;
3272
3273 switch (scb->basic_status & IPS_GSC_STATUS_MASK) {
3274 case IPS_CMD_TIMEOUT:
3275 errcode = DID_TIME_OUT;
3276 break;
3277
3278 case IPS_INVAL_OPCO:
3279 case IPS_INVAL_CMD_BLK:
3280 case IPS_INVAL_PARM_BLK:
3281 case IPS_LD_ERROR:
3282 case IPS_CMD_CMPLT_WERROR:
3283 break;
3284
3285 case IPS_PHYS_DRV_ERROR:
3286 switch (scb->extended_status) {
3287 case IPS_ERR_SEL_TO:
3288 if (scb->bus)
3289 errcode = DID_NO_CONNECT;
3290
3291 break;
3292
3293 case IPS_ERR_OU_RUN:
3294 if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) ||
3295 (scb->cmd.dcdb.op_code ==
3296 IPS_CMD_EXTENDED_DCDB_SG)) {
3297 tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3298 transfer_len = tapeDCDB->transfer_length;
3299 } else {
3300 transfer_len =
3301 (uint32_t) scb->dcdb.transfer_length;
3302 }
3303
3304 if ((scb->bus) && (transfer_len < scb->data_len)) {
3305
3306 errcode = DID_OK;
3307
3308
3309 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3310 ips_scmd_buf_read(scb->scsi_cmd,
3311 &inquiryData, sizeof (inquiryData));
3312 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) {
3313 errcode = DID_TIME_OUT;
3314 break;
3315 }
3316 }
3317 } else
3318 errcode = DID_ERROR;
3319
3320 break;
3321
3322 case IPS_ERR_RECOVERY:
3323
3324 if (scb->bus)
3325 errcode = DID_OK;
3326
3327 break;
3328
3329 case IPS_ERR_HOST_RESET:
3330 case IPS_ERR_DEV_RESET:
3331 errcode = DID_RESET;
3332 break;
3333
3334 case IPS_ERR_CKCOND:
3335 if (scb->bus) {
3336 if ((scb->cmd.dcdb.op_code ==
3337 IPS_CMD_EXTENDED_DCDB)
3338 || (scb->cmd.dcdb.op_code ==
3339 IPS_CMD_EXTENDED_DCDB_SG)) {
3340 tapeDCDB =
3341 (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3342 memcpy(scb->scsi_cmd->sense_buffer,
3343 tapeDCDB->sense_info,
3344 SCSI_SENSE_BUFFERSIZE);
3345 } else {
3346 memcpy(scb->scsi_cmd->sense_buffer,
3347 scb->dcdb.sense_info,
3348 SCSI_SENSE_BUFFERSIZE);
3349 }
3350 device_error = 2;
3351 }
3352
3353 errcode = DID_OK;
3354
3355 break;
3356
3357 default:
3358 errcode = DID_ERROR;
3359 break;
3360
3361 }
3362 }
3363
3364 scb->scsi_cmd->result = device_error | (errcode << 16);
3365
3366 return (1);
3367}
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380static int
3381ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
3382{
3383 int ret;
3384
3385 METHOD_TRACE("ips_send_wait", 1);
3386
3387 if (intr != IPS_FFDC) {
3388 ha->waitflag = TRUE;
3389 ha->cmd_in_progress = scb->cdb[0];
3390 }
3391 scb->callback = ipsintr_blocking;
3392 ret = ips_send_cmd(ha, scb);
3393
3394 if ((ret == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM))
3395 return (ret);
3396
3397 if (intr != IPS_FFDC)
3398 ret = ips_wait(ha, timeout, intr);
3399
3400 return (ret);
3401}
3402
3403
3404
3405
3406
3407
3408
3409
3410static void
3411ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
3412{
3413 unsigned long flags;
3414
3415 local_irq_save(flags);
3416 scsi_sg_copy_from_buffer(scmd, data, count);
3417 local_irq_restore(flags);
3418}
3419
3420
3421
3422
3423
3424
3425
3426
3427static void
3428ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
3429{
3430 unsigned long flags;
3431
3432 local_irq_save(flags);
3433 scsi_sg_copy_to_buffer(scmd, data, count);
3434 local_irq_restore(flags);
3435}
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446static int
3447ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
3448{
3449 int ret;
3450 char *sp;
3451 int device_error;
3452 IPS_DCDB_TABLE_TAPE *tapeDCDB;
3453 int TimeOut;
3454
3455 METHOD_TRACE("ips_send_cmd", 1);
3456
3457 ret = IPS_SUCCESS;
3458
3459 if (!scb->scsi_cmd) {
3460
3461
3462 if (scb->bus > 0) {
3463
3464
3465 if ((ha->waitflag == TRUE) &&
3466 (ha->cmd_in_progress == scb->cdb[0])) {
3467 ha->waitflag = FALSE;
3468 }
3469
3470 return (1);
3471 }
3472 } else if ((scb->bus == 0) && (!ips_is_passthru(scb->scsi_cmd))) {
3473
3474 ret = IPS_SUCCESS_IMM;
3475
3476 switch (scb->scsi_cmd->cmnd[0]) {
3477 case ALLOW_MEDIUM_REMOVAL:
3478 case REZERO_UNIT:
3479 case ERASE:
3480 case WRITE_FILEMARKS:
3481 case SPACE:
3482 scb->scsi_cmd->result = DID_ERROR << 16;
3483 break;
3484
3485 case START_STOP:
3486 scb->scsi_cmd->result = DID_OK << 16;
3487 break;
3488
3489 case TEST_UNIT_READY:
3490 case INQUIRY:
3491 if (scb->target_id == IPS_ADAPTER_ID) {
3492
3493
3494
3495
3496 if (scb->scsi_cmd->cmnd[0] == TEST_UNIT_READY)
3497 scb->scsi_cmd->result = DID_OK << 16;
3498
3499 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3500 IPS_SCSI_INQ_DATA inquiry;
3501
3502 memset(&inquiry, 0,
3503 sizeof (IPS_SCSI_INQ_DATA));
3504
3505 inquiry.DeviceType =
3506 IPS_SCSI_INQ_TYPE_PROCESSOR;
3507 inquiry.DeviceTypeQualifier =
3508 IPS_SCSI_INQ_LU_CONNECTED;
3509 inquiry.Version = IPS_SCSI_INQ_REV2;
3510 inquiry.ResponseDataFormat =
3511 IPS_SCSI_INQ_RD_REV2;
3512 inquiry.AdditionalLength = 31;
3513 inquiry.Flags[0] =
3514 IPS_SCSI_INQ_Address16;
3515 inquiry.Flags[1] =
3516 IPS_SCSI_INQ_WBus16 |
3517 IPS_SCSI_INQ_Sync;
3518 memcpy(inquiry.VendorId, "IBM ",
3519 8);
3520 memcpy(inquiry.ProductId,
3521 "SERVERAID ", 16);
3522 memcpy(inquiry.ProductRevisionLevel,
3523 "1.00", 4);
3524
3525 ips_scmd_buf_write(scb->scsi_cmd,
3526 &inquiry,
3527 sizeof (inquiry));
3528
3529 scb->scsi_cmd->result = DID_OK << 16;
3530 }
3531 } else {
3532 scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
3533 scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
3534 scb->cmd.logical_info.reserved = 0;
3535 scb->cmd.logical_info.reserved2 = 0;
3536 scb->data_len = sizeof (IPS_LD_INFO);
3537 scb->data_busaddr = ha->logical_drive_info_dma_addr;
3538 scb->flags = 0;
3539 scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
3540 ret = IPS_SUCCESS;
3541 }
3542
3543 break;
3544
3545 case REQUEST_SENSE:
3546 ips_reqsen(ha, scb);
3547 scb->scsi_cmd->result = DID_OK << 16;
3548 break;
3549
3550 case READ_6:
3551 case WRITE_6:
3552 if (!scb->sg_len) {
3553 scb->cmd.basic_io.op_code =
3554 (scb->scsi_cmd->cmnd[0] ==
3555 READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE;
3556 scb->cmd.basic_io.enhanced_sg = 0;
3557 scb->cmd.basic_io.sg_addr =
3558 cpu_to_le32(scb->data_busaddr);
3559 } else {
3560 scb->cmd.basic_io.op_code =
3561 (scb->scsi_cmd->cmnd[0] ==
3562 READ_6) ? IPS_CMD_READ_SG :
3563 IPS_CMD_WRITE_SG;
3564 scb->cmd.basic_io.enhanced_sg =
3565 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3566 scb->cmd.basic_io.sg_addr =
3567 cpu_to_le32(scb->sg_busaddr);
3568 }
3569
3570 scb->cmd.basic_io.segment_4G = 0;
3571 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3572 scb->cmd.basic_io.log_drv = scb->target_id;
3573 scb->cmd.basic_io.sg_count = scb->sg_len;
3574
3575 if (scb->cmd.basic_io.lba)
3576 le32_add_cpu(&scb->cmd.basic_io.lba,
3577 le16_to_cpu(scb->cmd.basic_io.
3578 sector_count));
3579 else
3580 scb->cmd.basic_io.lba =
3581 (((scb->scsi_cmd->
3582 cmnd[1] & 0x1f) << 16) | (scb->scsi_cmd->
3583 cmnd[2] << 8) |
3584 (scb->scsi_cmd->cmnd[3]));
3585
3586 scb->cmd.basic_io.sector_count =
3587 cpu_to_le16(scb->data_len / IPS_BLKSIZE);
3588
3589 if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0)
3590 scb->cmd.basic_io.sector_count =
3591 cpu_to_le16(256);
3592
3593 ret = IPS_SUCCESS;
3594 break;
3595
3596 case READ_10:
3597 case WRITE_10:
3598 if (!scb->sg_len) {
3599 scb->cmd.basic_io.op_code =
3600 (scb->scsi_cmd->cmnd[0] ==
3601 READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE;
3602 scb->cmd.basic_io.enhanced_sg = 0;
3603 scb->cmd.basic_io.sg_addr =
3604 cpu_to_le32(scb->data_busaddr);
3605 } else {
3606 scb->cmd.basic_io.op_code =
3607 (scb->scsi_cmd->cmnd[0] ==
3608 READ_10) ? IPS_CMD_READ_SG :
3609 IPS_CMD_WRITE_SG;
3610 scb->cmd.basic_io.enhanced_sg =
3611 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3612 scb->cmd.basic_io.sg_addr =
3613 cpu_to_le32(scb->sg_busaddr);
3614 }
3615
3616 scb->cmd.basic_io.segment_4G = 0;
3617 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3618 scb->cmd.basic_io.log_drv = scb->target_id;
3619 scb->cmd.basic_io.sg_count = scb->sg_len;
3620
3621 if (scb->cmd.basic_io.lba)
3622 le32_add_cpu(&scb->cmd.basic_io.lba,
3623 le16_to_cpu(scb->cmd.basic_io.
3624 sector_count));
3625 else
3626 scb->cmd.basic_io.lba =
3627 ((scb->scsi_cmd->cmnd[2] << 24) | (scb->
3628 scsi_cmd->
3629 cmnd[3]
3630 << 16) |
3631 (scb->scsi_cmd->cmnd[4] << 8) | scb->
3632 scsi_cmd->cmnd[5]);
3633
3634 scb->cmd.basic_io.sector_count =
3635 cpu_to_le16(scb->data_len / IPS_BLKSIZE);
3636
3637 if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) {
3638
3639
3640
3641
3642
3643 scb->scsi_cmd->result = DID_OK << 16;
3644 } else
3645 ret = IPS_SUCCESS;
3646
3647 break;
3648
3649 case RESERVE:
3650 case RELEASE:
3651 scb->scsi_cmd->result = DID_OK << 16;
3652 break;
3653
3654 case MODE_SENSE:
3655 scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
3656 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3657 scb->cmd.basic_io.segment_4G = 0;
3658 scb->cmd.basic_io.enhanced_sg = 0;
3659 scb->data_len = sizeof (*ha->enq);
3660 scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
3661 ret = IPS_SUCCESS;
3662 break;
3663
3664 case READ_CAPACITY:
3665 scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
3666 scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
3667 scb->cmd.logical_info.reserved = 0;
3668 scb->cmd.logical_info.reserved2 = 0;
3669 scb->cmd.logical_info.reserved3 = 0;
3670 scb->data_len = sizeof (IPS_LD_INFO);
3671 scb->data_busaddr = ha->logical_drive_info_dma_addr;
3672 scb->flags = 0;
3673 scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
3674 ret = IPS_SUCCESS;
3675 break;
3676
3677 case SEND_DIAGNOSTIC:
3678 case REASSIGN_BLOCKS:
3679 case FORMAT_UNIT:
3680 case SEEK_10:
3681 case VERIFY:
3682 case READ_DEFECT_DATA:
3683 case READ_BUFFER:
3684 case WRITE_BUFFER:
3685 scb->scsi_cmd->result = DID_OK << 16;
3686 break;
3687
3688 default:
3689
3690
3691
3692 sp = (char *) scb->scsi_cmd->sense_buffer;
3693
3694 sp[0] = 0x70;
3695 sp[2] = ILLEGAL_REQUEST;
3696 sp[7] = 0x0A;
3697 sp[12] = 0x20;
3698 sp[13] = 0x00;
3699
3700 device_error = 2;
3701 scb->scsi_cmd->result = device_error | (DID_OK << 16);
3702 break;
3703 }
3704 }
3705
3706 if (ret == IPS_SUCCESS_IMM)
3707 return (ret);
3708
3709
3710 if (scb->bus > 0) {
3711
3712
3713
3714 if (ha->conf->dev[scb->bus - 1][scb->target_id].ucState == 0) {
3715 scb->scsi_cmd->result = DID_NO_CONNECT << 16;
3716 return (IPS_SUCCESS_IMM);
3717 }
3718
3719 ha->dcdb_active[scb->bus - 1] |= (1 << scb->target_id);
3720 scb->cmd.dcdb.command_id = IPS_COMMAND_ID(ha, scb);
3721 scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
3722 (unsigned long) &scb->
3723 dcdb -
3724 (unsigned long) scb);
3725 scb->cmd.dcdb.reserved = 0;
3726 scb->cmd.dcdb.reserved2 = 0;
3727 scb->cmd.dcdb.reserved3 = 0;
3728 scb->cmd.dcdb.segment_4G = 0;
3729 scb->cmd.dcdb.enhanced_sg = 0;
3730
3731 TimeOut = scb->scsi_cmd->request->timeout;
3732
3733 if (ha->subsys->param[4] & 0x00100000) {
3734 if (!scb->sg_len) {
3735 scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB;
3736 } else {
3737 scb->cmd.dcdb.op_code =
3738 IPS_CMD_EXTENDED_DCDB_SG;
3739 scb->cmd.dcdb.enhanced_sg =
3740 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3741 }
3742
3743 tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3744 tapeDCDB->device_address =
3745 ((scb->bus - 1) << 4) | scb->target_id;
3746 tapeDCDB->cmd_attribute |= IPS_DISCONNECT_ALLOWED;
3747 tapeDCDB->cmd_attribute &= ~IPS_TRANSFER64K;
3748
3749 if (TimeOut) {
3750 if (TimeOut < (10 * HZ))
3751 tapeDCDB->cmd_attribute |= IPS_TIMEOUT10;
3752 else if (TimeOut < (60 * HZ))
3753 tapeDCDB->cmd_attribute |= IPS_TIMEOUT60;
3754 else if (TimeOut < (1200 * HZ))
3755 tapeDCDB->cmd_attribute |= IPS_TIMEOUT20M;
3756 }
3757
3758 tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len;
3759 tapeDCDB->reserved_for_LUN = 0;
3760 tapeDCDB->transfer_length = scb->data_len;
3761 if (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)
3762 tapeDCDB->buffer_pointer =
3763 cpu_to_le32(scb->sg_busaddr);
3764 else
3765 tapeDCDB->buffer_pointer =
3766 cpu_to_le32(scb->data_busaddr);
3767 tapeDCDB->sg_count = scb->sg_len;
3768 tapeDCDB->sense_length = sizeof (tapeDCDB->sense_info);
3769 tapeDCDB->scsi_status = 0;
3770 tapeDCDB->reserved = 0;
3771 memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd,
3772 scb->scsi_cmd->cmd_len);
3773 } else {
3774 if (!scb->sg_len) {
3775 scb->cmd.dcdb.op_code = IPS_CMD_DCDB;
3776 } else {
3777 scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG;
3778 scb->cmd.dcdb.enhanced_sg =
3779 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3780 }
3781
3782 scb->dcdb.device_address =
3783 ((scb->bus - 1) << 4) | scb->target_id;
3784 scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED;
3785
3786 if (TimeOut) {
3787 if (TimeOut < (10 * HZ))
3788 scb->dcdb.cmd_attribute |= IPS_TIMEOUT10;
3789 else if (TimeOut < (60 * HZ))
3790 scb->dcdb.cmd_attribute |= IPS_TIMEOUT60;
3791 else if (TimeOut < (1200 * HZ))
3792 scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M;
3793 }
3794
3795 scb->dcdb.transfer_length = scb->data_len;
3796 if (scb->dcdb.cmd_attribute & IPS_TRANSFER64K)
3797 scb->dcdb.transfer_length = 0;
3798 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB_SG)
3799 scb->dcdb.buffer_pointer =
3800 cpu_to_le32(scb->sg_busaddr);
3801 else
3802 scb->dcdb.buffer_pointer =
3803 cpu_to_le32(scb->data_busaddr);
3804 scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len;
3805 scb->dcdb.sense_length = sizeof (scb->dcdb.sense_info);
3806 scb->dcdb.sg_count = scb->sg_len;
3807 scb->dcdb.reserved = 0;
3808 memcpy(scb->dcdb.scsi_cdb, scb->scsi_cmd->cmnd,
3809 scb->scsi_cmd->cmd_len);
3810 scb->dcdb.scsi_status = 0;
3811 scb->dcdb.reserved2[0] = 0;
3812 scb->dcdb.reserved2[1] = 0;
3813 scb->dcdb.reserved2[2] = 0;
3814 }
3815 }
3816
3817 return ((*ha->func.issue) (ha, scb));
3818}
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829static void
3830ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
3831{
3832 ips_scb_t *scb;
3833 ips_stat_t *sp;
3834 uint8_t basic_status;
3835 uint8_t ext_status;
3836 int errcode;
3837 IPS_SCSI_INQ_DATA inquiryData;
3838
3839 METHOD_TRACE("ips_chkstatus", 1);
3840
3841 scb = &ha->scbs[pstatus->fields.command_id];
3842 scb->basic_status = basic_status =
3843 pstatus->fields.basic_status & IPS_BASIC_STATUS_MASK;
3844 scb->extended_status = ext_status = pstatus->fields.extended_status;
3845
3846 sp = &ha->sp;
3847 sp->residue_len = 0;
3848 sp->scb_addr = (void *) scb;
3849
3850
3851 ips_removeq_scb(&ha->scb_activelist, scb);
3852
3853 if (!scb->scsi_cmd)
3854
3855 return;
3856
3857 DEBUG_VAR(2, "(%s%d) ips_chkstatus: cmd 0x%X id %d (%d %d %d)",
3858 ips_name,
3859 ha->host_num,
3860 scb->cdb[0],
3861 scb->cmd.basic_io.command_id,
3862 scb->bus, scb->target_id, scb->lun);
3863
3864 if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd)))
3865
3866 return;
3867
3868 errcode = DID_OK;
3869
3870 if (((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_SUCCESS) ||
3871 ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR)) {
3872
3873 if (scb->bus == 0) {
3874 if ((basic_status & IPS_GSC_STATUS_MASK) ==
3875 IPS_CMD_RECOVERED_ERROR) {
3876 DEBUG_VAR(1,
3877 "(%s%d) Recovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
3878 ips_name, ha->host_num,
3879 scb->cmd.basic_io.op_code,
3880 basic_status, ext_status);
3881 }
3882
3883 switch (scb->scsi_cmd->cmnd[0]) {
3884 case ALLOW_MEDIUM_REMOVAL:
3885 case REZERO_UNIT:
3886 case ERASE:
3887 case WRITE_FILEMARKS:
3888 case SPACE:
3889 errcode = DID_ERROR;
3890 break;
3891
3892 case START_STOP:
3893 break;
3894
3895 case TEST_UNIT_READY:
3896 if (!ips_online(ha, scb)) {
3897 errcode = DID_TIME_OUT;
3898 }
3899 break;
3900
3901 case INQUIRY:
3902 if (ips_online(ha, scb)) {
3903 ips_inquiry(ha, scb);
3904 } else {
3905 errcode = DID_TIME_OUT;
3906 }
3907 break;
3908
3909 case REQUEST_SENSE:
3910 ips_reqsen(ha, scb);
3911 break;
3912
3913 case READ_6:
3914 case WRITE_6:
3915 case READ_10:
3916 case WRITE_10:
3917 case RESERVE:
3918 case RELEASE:
3919 break;
3920
3921 case MODE_SENSE:
3922 if (!ips_online(ha, scb)
3923 || !ips_msense(ha, scb)) {
3924 errcode = DID_ERROR;
3925 }
3926 break;
3927
3928 case READ_CAPACITY:
3929 if (ips_online(ha, scb))
3930 ips_rdcap(ha, scb);
3931 else {
3932 errcode = DID_TIME_OUT;
3933 }
3934 break;
3935
3936 case SEND_DIAGNOSTIC:
3937 case REASSIGN_BLOCKS:
3938 break;
3939
3940 case FORMAT_UNIT:
3941 errcode = DID_ERROR;
3942 break;
3943
3944 case SEEK_10:
3945 case VERIFY:
3946 case READ_DEFECT_DATA:
3947 case READ_BUFFER:
3948 case WRITE_BUFFER:
3949 break;
3950
3951 default:
3952 errcode = DID_ERROR;
3953 }
3954
3955 scb->scsi_cmd->result = errcode << 16;
3956 } else {
3957
3958 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3959 ips_scmd_buf_read(scb->scsi_cmd,
3960 &inquiryData, sizeof (inquiryData));
3961 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK)
3962 scb->scsi_cmd->result = DID_TIME_OUT << 16;
3963 }
3964 }
3965 } else {
3966 if (scb->bus == 0) {
3967 DEBUG_VAR(1,
3968 "(%s%d) Unrecovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
3969 ips_name, ha->host_num,
3970 scb->cmd.basic_io.op_code, basic_status,
3971 ext_status);
3972 }
3973
3974 ips_map_status(ha, scb, sp);
3975 }
3976}
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987static int
3988ips_online(ips_ha_t * ha, ips_scb_t * scb)
3989{
3990 METHOD_TRACE("ips_online", 1);
3991
3992 if (scb->target_id >= IPS_MAX_LD)
3993 return (0);
3994
3995 if ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1) {
3996 memset(ha->logical_drive_info, 0, sizeof (IPS_LD_INFO));
3997 return (0);
3998 }
3999
4000 if (ha->logical_drive_info->drive_info[scb->target_id].state !=
4001 IPS_LD_OFFLINE
4002 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4003 IPS_LD_FREE
4004 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4005 IPS_LD_CRS
4006 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4007 IPS_LD_SYS)
4008 return (1);
4009 else
4010 return (0);
4011}
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022static int
4023ips_inquiry(ips_ha_t * ha, ips_scb_t * scb)
4024{
4025 IPS_SCSI_INQ_DATA inquiry;
4026
4027 METHOD_TRACE("ips_inquiry", 1);
4028
4029 memset(&inquiry, 0, sizeof (IPS_SCSI_INQ_DATA));
4030
4031 inquiry.DeviceType = IPS_SCSI_INQ_TYPE_DASD;
4032 inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED;
4033 inquiry.Version = IPS_SCSI_INQ_REV2;
4034 inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2;
4035 inquiry.AdditionalLength = 31;
4036 inquiry.Flags[0] = IPS_SCSI_INQ_Address16;
4037 inquiry.Flags[1] =
4038 IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync | IPS_SCSI_INQ_CmdQue;
4039 memcpy(inquiry.VendorId, "IBM ", 8);
4040 memcpy(inquiry.ProductId, "SERVERAID ", 16);
4041 memcpy(inquiry.ProductRevisionLevel, "1.00", 4);
4042
4043 ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof (inquiry));
4044
4045 return (1);
4046}
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057static int
4058ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
4059{
4060 IPS_SCSI_CAPACITY cap;
4061
4062 METHOD_TRACE("ips_rdcap", 1);
4063
4064 if (scsi_bufflen(scb->scsi_cmd) < 8)
4065 return (0);
4066
4067 cap.lba =
4068 cpu_to_be32(le32_to_cpu
4069 (ha->logical_drive_info->
4070 drive_info[scb->target_id].sector_count) - 1);
4071 cap.len = cpu_to_be32((uint32_t) IPS_BLKSIZE);
4072
4073 ips_scmd_buf_write(scb->scsi_cmd, &cap, sizeof (cap));
4074
4075 return (1);
4076}
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087static int
4088ips_msense(ips_ha_t * ha, ips_scb_t * scb)
4089{
4090 uint16_t heads;
4091 uint16_t sectors;
4092 uint32_t cylinders;
4093 IPS_SCSI_MODE_PAGE_DATA mdata;
4094
4095 METHOD_TRACE("ips_msense", 1);
4096
4097 if (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) > 0x400000 &&
4098 (ha->enq->ucMiscFlag & 0x8) == 0) {
4099 heads = IPS_NORM_HEADS;
4100 sectors = IPS_NORM_SECTORS;
4101 } else {
4102 heads = IPS_COMP_HEADS;
4103 sectors = IPS_COMP_SECTORS;
4104 }
4105
4106 cylinders =
4107 (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) -
4108 1) / (heads * sectors);
4109
4110 memset(&mdata, 0, sizeof (IPS_SCSI_MODE_PAGE_DATA));
4111
4112 mdata.hdr.BlockDescLength = 8;
4113
4114 switch (scb->scsi_cmd->cmnd[2] & 0x3f) {
4115 case 0x03:
4116 mdata.pdata.pg3.PageCode = 3;
4117 mdata.pdata.pg3.PageLength = sizeof (IPS_SCSI_MODE_PAGE3);
4118 mdata.hdr.DataLength =
4119 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg3.PageLength;
4120 mdata.pdata.pg3.TracksPerZone = 0;
4121 mdata.pdata.pg3.AltSectorsPerZone = 0;
4122 mdata.pdata.pg3.AltTracksPerZone = 0;
4123 mdata.pdata.pg3.AltTracksPerVolume = 0;
4124 mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors);
4125 mdata.pdata.pg3.BytesPerSector = cpu_to_be16(IPS_BLKSIZE);
4126 mdata.pdata.pg3.Interleave = cpu_to_be16(1);
4127 mdata.pdata.pg3.TrackSkew = 0;
4128 mdata.pdata.pg3.CylinderSkew = 0;
4129 mdata.pdata.pg3.flags = IPS_SCSI_MP3_SoftSector;
4130 break;
4131
4132 case 0x4:
4133 mdata.pdata.pg4.PageCode = 4;
4134 mdata.pdata.pg4.PageLength = sizeof (IPS_SCSI_MODE_PAGE4);
4135 mdata.hdr.DataLength =
4136 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg4.PageLength;
4137 mdata.pdata.pg4.CylindersHigh =
4138 cpu_to_be16((cylinders >> 8) & 0xFFFF);
4139 mdata.pdata.pg4.CylindersLow = (cylinders & 0xFF);
4140 mdata.pdata.pg4.Heads = heads;
4141 mdata.pdata.pg4.WritePrecompHigh = 0;
4142 mdata.pdata.pg4.WritePrecompLow = 0;
4143 mdata.pdata.pg4.ReducedWriteCurrentHigh = 0;
4144 mdata.pdata.pg4.ReducedWriteCurrentLow = 0;
4145 mdata.pdata.pg4.StepRate = cpu_to_be16(1);
4146 mdata.pdata.pg4.LandingZoneHigh = 0;
4147 mdata.pdata.pg4.LandingZoneLow = 0;
4148 mdata.pdata.pg4.flags = 0;
4149 mdata.pdata.pg4.RotationalOffset = 0;
4150 mdata.pdata.pg4.MediumRotationRate = 0;
4151 break;
4152 case 0x8:
4153 mdata.pdata.pg8.PageCode = 8;
4154 mdata.pdata.pg8.PageLength = sizeof (IPS_SCSI_MODE_PAGE8);
4155 mdata.hdr.DataLength =
4156 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg8.PageLength;
4157
4158 break;
4159
4160 default:
4161 return (0);
4162 }
4163
4164 ips_scmd_buf_write(scb->scsi_cmd, &mdata, sizeof (mdata));
4165
4166 return (1);
4167}
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178static int
4179ips_reqsen(ips_ha_t * ha, ips_scb_t * scb)
4180{
4181 IPS_SCSI_REQSEN reqsen;
4182
4183 METHOD_TRACE("ips_reqsen", 1);
4184
4185 memset(&reqsen, 0, sizeof (IPS_SCSI_REQSEN));
4186
4187 reqsen.ResponseCode =
4188 IPS_SCSI_REQSEN_VALID | IPS_SCSI_REQSEN_CURRENT_ERR;
4189 reqsen.AdditionalLength = 10;
4190 reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE;
4191 reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE;
4192
4193 ips_scmd_buf_write(scb->scsi_cmd, &reqsen, sizeof (reqsen));
4194
4195 return (1);
4196}
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207static void
4208ips_free(ips_ha_t * ha)
4209{
4210
4211 METHOD_TRACE("ips_free", 1);
4212
4213 if (ha) {
4214 if (ha->enq) {
4215 dma_free_coherent(&ha->pcidev->dev, sizeof(IPS_ENQ),
4216 ha->enq, ha->enq_busaddr);
4217 ha->enq = NULL;
4218 }
4219
4220 kfree(ha->conf);
4221 ha->conf = NULL;
4222
4223 if (ha->adapt) {
4224 dma_free_coherent(&ha->pcidev->dev,
4225 sizeof (IPS_ADAPTER) +
4226 sizeof (IPS_IO_CMD), ha->adapt,
4227 ha->adapt->hw_status_start);
4228 ha->adapt = NULL;
4229 }
4230
4231 if (ha->logical_drive_info) {
4232 dma_free_coherent(&ha->pcidev->dev,
4233 sizeof (IPS_LD_INFO),
4234 ha->logical_drive_info,
4235 ha->logical_drive_info_dma_addr);
4236 ha->logical_drive_info = NULL;
4237 }
4238
4239 kfree(ha->nvram);
4240 ha->nvram = NULL;
4241
4242 kfree(ha->subsys);
4243 ha->subsys = NULL;
4244
4245 if (ha->ioctl_data) {
4246 dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
4247 ha->ioctl_data, ha->ioctl_busaddr);
4248 ha->ioctl_data = NULL;
4249 ha->ioctl_datasize = 0;
4250 ha->ioctl_len = 0;
4251 }
4252 ips_deallocatescbs(ha, ha->max_cmds);
4253
4254
4255 if (ha->mem_ptr) {
4256 iounmap(ha->ioremap_ptr);
4257 ha->ioremap_ptr = NULL;
4258 ha->mem_ptr = NULL;
4259 }
4260
4261 ha->mem_addr = 0;
4262
4263 }
4264}
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275static int
4276ips_deallocatescbs(ips_ha_t * ha, int cmds)
4277{
4278 if (ha->scbs) {
4279 dma_free_coherent(&ha->pcidev->dev,
4280 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
4281 ha->scbs->sg_list.list,
4282 ha->scbs->sg_busaddr);
4283 dma_free_coherent(&ha->pcidev->dev, sizeof (ips_scb_t) * cmds,
4284 ha->scbs, ha->scbs->scb_busaddr);
4285 ha->scbs = NULL;
4286 }
4287 return 1;
4288}
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299static int
4300ips_allocatescbs(ips_ha_t * ha)
4301{
4302 ips_scb_t *scb_p;
4303 IPS_SG_LIST ips_sg;
4304 int i;
4305 dma_addr_t command_dma, sg_dma;
4306
4307 METHOD_TRACE("ips_allocatescbs", 1);
4308
4309
4310 ha->scbs = dma_alloc_coherent(&ha->pcidev->dev,
4311 ha->max_cmds * sizeof (ips_scb_t),
4312 &command_dma, GFP_KERNEL);
4313 if (ha->scbs == NULL)
4314 return 0;
4315 ips_sg.list = dma_alloc_coherent(&ha->pcidev->dev,
4316 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * ha->max_cmds,
4317 &sg_dma, GFP_KERNEL);
4318 if (ips_sg.list == NULL) {
4319 dma_free_coherent(&ha->pcidev->dev,
4320 ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
4321 command_dma);
4322 return 0;
4323 }
4324
4325 memset(ha->scbs, 0, ha->max_cmds * sizeof (ips_scb_t));
4326
4327 for (i = 0; i < ha->max_cmds; i++) {
4328 scb_p = &ha->scbs[i];
4329 scb_p->scb_busaddr = command_dma + sizeof (ips_scb_t) * i;
4330
4331 if (IPS_USE_ENH_SGLIST(ha)) {
4332 scb_p->sg_list.enh_list =
4333 ips_sg.enh_list + i * IPS_MAX_SG;
4334 scb_p->sg_busaddr =
4335 sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
4336 } else {
4337 scb_p->sg_list.std_list =
4338 ips_sg.std_list + i * IPS_MAX_SG;
4339 scb_p->sg_busaddr =
4340 sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
4341 }
4342
4343
4344 if (i < ha->max_cmds - 1) {
4345 scb_p->q_next = ha->scb_freelist;
4346 ha->scb_freelist = scb_p;
4347 }
4348 }
4349
4350
4351 return (1);
4352}
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363static void
4364ips_init_scb(ips_ha_t * ha, ips_scb_t * scb)
4365{
4366 IPS_SG_LIST sg_list;
4367 uint32_t cmd_busaddr, sg_busaddr;
4368 METHOD_TRACE("ips_init_scb", 1);
4369
4370 if (scb == NULL)
4371 return;
4372
4373 sg_list.list = scb->sg_list.list;
4374 cmd_busaddr = scb->scb_busaddr;
4375 sg_busaddr = scb->sg_busaddr;
4376
4377 memset(scb, 0, sizeof (ips_scb_t));
4378 memset(ha->dummy, 0, sizeof (IPS_IO_CMD));
4379
4380
4381 ha->dummy->op_code = 0xFF;
4382 ha->dummy->ccsar = cpu_to_le32(ha->adapt->hw_status_start
4383 + sizeof (IPS_ADAPTER));
4384 ha->dummy->command_id = IPS_MAX_CMDS;
4385
4386
4387 scb->scb_busaddr = cmd_busaddr;
4388 scb->sg_busaddr = sg_busaddr;
4389 scb->sg_list.list = sg_list.list;
4390
4391
4392 scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE);
4393 scb->cmd.basic_io.ccsar = cpu_to_le32(ha->adapt->hw_status_start
4394 + sizeof (IPS_ADAPTER));
4395}
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408static ips_scb_t *
4409ips_getscb(ips_ha_t * ha)
4410{
4411 ips_scb_t *scb;
4412
4413 METHOD_TRACE("ips_getscb", 1);
4414
4415 if ((scb = ha->scb_freelist) == NULL) {
4416
4417 return (NULL);
4418 }
4419
4420 ha->scb_freelist = scb->q_next;
4421 scb->flags = 0;
4422 scb->q_next = NULL;
4423
4424 ips_init_scb(ha, scb);
4425
4426 return (scb);
4427}
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440static void
4441ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
4442{
4443
4444 METHOD_TRACE("ips_freescb", 1);
4445 if (scb->flags & IPS_SCB_MAP_SG)
4446 scsi_dma_unmap(scb->scsi_cmd);
4447 else if (scb->flags & IPS_SCB_MAP_SINGLE)
4448 dma_unmap_single(&ha->pcidev->dev, scb->data_busaddr,
4449 scb->data_len, IPS_DMA_DIR(scb));
4450
4451
4452 if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
4453 scb->q_next = ha->scb_freelist;
4454 ha->scb_freelist = scb;
4455 }
4456}
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467static int
4468ips_isinit_copperhead(ips_ha_t * ha)
4469{
4470 uint8_t scpr;
4471 uint8_t isr;
4472
4473 METHOD_TRACE("ips_isinit_copperhead", 1);
4474
4475 isr = inb(ha->io_addr + IPS_REG_HISR);
4476 scpr = inb(ha->io_addr + IPS_REG_SCPR);
4477
4478 if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
4479 return (0);
4480 else
4481 return (1);
4482}
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493static int
4494ips_isinit_copperhead_memio(ips_ha_t * ha)
4495{
4496 uint8_t isr = 0;
4497 uint8_t scpr;
4498
4499 METHOD_TRACE("ips_is_init_copperhead_memio", 1);
4500
4501 isr = readb(ha->mem_ptr + IPS_REG_HISR);
4502 scpr = readb(ha->mem_ptr + IPS_REG_SCPR);
4503
4504 if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
4505 return (0);
4506 else
4507 return (1);
4508}
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519static int
4520ips_isinit_morpheus(ips_ha_t * ha)
4521{
4522 uint32_t post;
4523 uint32_t bits;
4524
4525 METHOD_TRACE("ips_is_init_morpheus", 1);
4526
4527 if (ips_isintr_morpheus(ha))
4528 ips_flush_and_reset(ha);
4529
4530 post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4531 bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4532
4533 if (post == 0)
4534 return (0);
4535 else if (bits & 0x3)
4536 return (0);
4537 else
4538 return (1);
4539}
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551static void
4552ips_flush_and_reset(ips_ha_t *ha)
4553{
4554 ips_scb_t *scb;
4555 int ret;
4556 int time;
4557 int done;
4558 dma_addr_t command_dma;
4559
4560
4561 scb = dma_alloc_coherent(&ha->pcidev->dev, sizeof(ips_scb_t),
4562 &command_dma, GFP_KERNEL);
4563 if (scb) {
4564 memset(scb, 0, sizeof(ips_scb_t));
4565 ips_init_scb(ha, scb);
4566 scb->scb_busaddr = command_dma;
4567
4568 scb->timeout = ips_cmd_timeout;
4569 scb->cdb[0] = IPS_CMD_FLUSH;
4570
4571 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
4572 scb->cmd.flush_cache.command_id = IPS_MAX_CMDS;
4573 scb->cmd.flush_cache.state = IPS_NORM_STATE;
4574 scb->cmd.flush_cache.reserved = 0;
4575 scb->cmd.flush_cache.reserved2 = 0;
4576 scb->cmd.flush_cache.reserved3 = 0;
4577 scb->cmd.flush_cache.reserved4 = 0;
4578
4579 ret = ips_send_cmd(ha, scb);
4580
4581 if (ret == IPS_SUCCESS) {
4582 time = 60 * IPS_ONE_SEC;
4583 done = 0;
4584
4585 while ((time > 0) && (!done)) {
4586 done = ips_poll_for_flush_complete(ha);
4587
4588 udelay(1000);
4589 time--;
4590 }
4591 }
4592 }
4593
4594
4595 (*ha->func.reset) (ha);
4596
4597 dma_free_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), scb, command_dma);
4598 return;
4599}
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611static int
4612ips_poll_for_flush_complete(ips_ha_t * ha)
4613{
4614 IPS_STATUS cstatus;
4615
4616 while (TRUE) {
4617 cstatus.value = (*ha->func.statupd) (ha);
4618
4619 if (cstatus.value == 0xffffffff)
4620 break;
4621
4622
4623 if (cstatus.fields.command_id == IPS_MAX_CMDS)
4624 return 1;
4625 }
4626
4627 return 0;
4628}
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638static void
4639ips_enable_int_copperhead(ips_ha_t * ha)
4640{
4641 METHOD_TRACE("ips_enable_int_copperhead", 1);
4642
4643 outb(ha->io_addr + IPS_REG_HISR, IPS_BIT_EI);
4644 inb(ha->io_addr + IPS_REG_HISR);
4645}
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655static void
4656ips_enable_int_copperhead_memio(ips_ha_t * ha)
4657{
4658 METHOD_TRACE("ips_enable_int_copperhead_memio", 1);
4659
4660 writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
4661 readb(ha->mem_ptr + IPS_REG_HISR);
4662}
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672static void
4673ips_enable_int_morpheus(ips_ha_t * ha)
4674{
4675 uint32_t Oimr;
4676
4677 METHOD_TRACE("ips_enable_int_morpheus", 1);
4678
4679 Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4680 Oimr &= ~0x08;
4681 writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
4682 readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4683}
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694static int
4695ips_init_copperhead(ips_ha_t * ha)
4696{
4697 uint8_t Isr;
4698 uint8_t Cbsp;
4699 uint8_t PostByte[IPS_MAX_POST_BYTES];
4700 int i, j;
4701
4702 METHOD_TRACE("ips_init_copperhead", 1);
4703
4704 for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
4705 for (j = 0; j < 45; j++) {
4706 Isr = inb(ha->io_addr + IPS_REG_HISR);
4707 if (Isr & IPS_BIT_GHI)
4708 break;
4709
4710
4711 MDELAY(IPS_ONE_SEC);
4712 }
4713
4714 if (j >= 45)
4715
4716 return (0);
4717
4718 PostByte[i] = inb(ha->io_addr + IPS_REG_ISPR);
4719 outb(Isr, ha->io_addr + IPS_REG_HISR);
4720 }
4721
4722 if (PostByte[0] < IPS_GOOD_POST_STATUS) {
4723 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4724 "reset controller fails (post status %x %x).\n",
4725 PostByte[0], PostByte[1]);
4726
4727 return (0);
4728 }
4729
4730 for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
4731 for (j = 0; j < 240; j++) {
4732 Isr = inb(ha->io_addr + IPS_REG_HISR);
4733 if (Isr & IPS_BIT_GHI)
4734 break;
4735
4736
4737 MDELAY(IPS_ONE_SEC);
4738 }
4739
4740 if (j >= 240)
4741
4742 return (0);
4743
4744 inb(ha->io_addr + IPS_REG_ISPR);
4745 outb(Isr, ha->io_addr + IPS_REG_HISR);
4746 }
4747
4748 for (i = 0; i < 240; i++) {
4749 Cbsp = inb(ha->io_addr + IPS_REG_CBSP);
4750
4751 if ((Cbsp & IPS_BIT_OP) == 0)
4752 break;
4753
4754
4755 MDELAY(IPS_ONE_SEC);
4756 }
4757
4758 if (i >= 240)
4759
4760 return (0);
4761
4762
4763 outl(0x1010, ha->io_addr + IPS_REG_CCCR);
4764
4765
4766 outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR);
4767
4768 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
4769
4770 outl(0, ha->io_addr + IPS_REG_NDAE);
4771
4772
4773 outb(IPS_BIT_EI, ha->io_addr + IPS_REG_HISR);
4774
4775 return (1);
4776}
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787static int
4788ips_init_copperhead_memio(ips_ha_t * ha)
4789{
4790 uint8_t Isr = 0;
4791 uint8_t Cbsp;
4792 uint8_t PostByte[IPS_MAX_POST_BYTES];
4793 int i, j;
4794
4795 METHOD_TRACE("ips_init_copperhead_memio", 1);
4796
4797 for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
4798 for (j = 0; j < 45; j++) {
4799 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
4800 if (Isr & IPS_BIT_GHI)
4801 break;
4802
4803
4804 MDELAY(IPS_ONE_SEC);
4805 }
4806
4807 if (j >= 45)
4808
4809 return (0);
4810
4811 PostByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR);
4812 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
4813 }
4814
4815 if (PostByte[0] < IPS_GOOD_POST_STATUS) {
4816 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4817 "reset controller fails (post status %x %x).\n",
4818 PostByte[0], PostByte[1]);
4819
4820 return (0);
4821 }
4822
4823 for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
4824 for (j = 0; j < 240; j++) {
4825 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
4826 if (Isr & IPS_BIT_GHI)
4827 break;
4828
4829
4830 MDELAY(IPS_ONE_SEC);
4831 }
4832
4833 if (j >= 240)
4834
4835 return (0);
4836
4837 readb(ha->mem_ptr + IPS_REG_ISPR);
4838 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
4839 }
4840
4841 for (i = 0; i < 240; i++) {
4842 Cbsp = readb(ha->mem_ptr + IPS_REG_CBSP);
4843
4844 if ((Cbsp & IPS_BIT_OP) == 0)
4845 break;
4846
4847
4848 MDELAY(IPS_ONE_SEC);
4849 }
4850
4851 if (i >= 240)
4852
4853 return (0);
4854
4855
4856 writel(0x1010, ha->mem_ptr + IPS_REG_CCCR);
4857
4858
4859 writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR);
4860
4861 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
4862
4863 writel(0, ha->mem_ptr + IPS_REG_NDAE);
4864
4865
4866 writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
4867
4868
4869 return (1);
4870}
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881static int
4882ips_init_morpheus(ips_ha_t * ha)
4883{
4884 uint32_t Post;
4885 uint32_t Config;
4886 uint32_t Isr;
4887 uint32_t Oimr;
4888 int i;
4889
4890 METHOD_TRACE("ips_init_morpheus", 1);
4891
4892
4893 for (i = 0; i < 45; i++) {
4894 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4895
4896 if (Isr & IPS_BIT_I960_MSG0I)
4897 break;
4898
4899
4900 MDELAY(IPS_ONE_SEC);
4901 }
4902
4903 if (i >= 45) {
4904
4905 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4906 "timeout waiting for post.\n");
4907
4908 return (0);
4909 }
4910
4911 Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4912
4913 if (Post == 0x4F00) {
4914 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4915 "Flashing Battery PIC, Please wait ...\n");
4916
4917
4918 Isr = (uint32_t) IPS_BIT_I960_MSG0I;
4919 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4920
4921 for (i = 0; i < 120; i++) {
4922 Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4923 if (Post != 0x4F00)
4924 break;
4925
4926 MDELAY(IPS_ONE_SEC);
4927 }
4928
4929 if (i >= 120) {
4930 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4931 "timeout waiting for Battery PIC Flash\n");
4932 return (0);
4933 }
4934
4935 }
4936
4937
4938 Isr = (uint32_t) IPS_BIT_I960_MSG0I;
4939 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4940
4941 if (Post < (IPS_GOOD_POST_STATUS << 8)) {
4942 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4943 "reset controller fails (post status %x).\n", Post);
4944
4945 return (0);
4946 }
4947
4948
4949 for (i = 0; i < 240; i++) {
4950 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4951
4952 if (Isr & IPS_BIT_I960_MSG1I)
4953 break;
4954
4955
4956 MDELAY(IPS_ONE_SEC);
4957 }
4958
4959 if (i >= 240) {
4960
4961 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4962 "timeout waiting for config.\n");
4963
4964 return (0);
4965 }
4966
4967 Config = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
4968
4969
4970 Isr = (uint32_t) IPS_BIT_I960_MSG1I;
4971 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4972
4973
4974 Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4975 Oimr &= ~0x8;
4976 writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
4977
4978
4979
4980
4981 if (Post == 0xEF10) {
4982 if ((Config == 0x000F) || (Config == 0x0009))
4983 ha->requires_esl = 1;
4984 }
4985
4986 return (1);
4987}
4988
4989
4990
4991
4992
4993
4994
4995
4996
4997
4998static int
4999ips_reset_copperhead(ips_ha_t * ha)
5000{
5001 int reset_counter;
5002
5003 METHOD_TRACE("ips_reset_copperhead", 1);
5004
5005 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d",
5006 ips_name, ha->host_num, ha->io_addr, ha->pcidev->irq);
5007
5008 reset_counter = 0;
5009
5010 while (reset_counter < 2) {
5011 reset_counter++;
5012
5013 outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
5014
5015
5016 MDELAY(IPS_ONE_SEC);
5017
5018 outb(0, ha->io_addr + IPS_REG_SCPR);
5019
5020
5021 MDELAY(IPS_ONE_SEC);
5022
5023 if ((*ha->func.init) (ha))
5024 break;
5025 else if (reset_counter >= 2) {
5026
5027 return (0);
5028 }
5029 }
5030
5031 return (1);
5032}
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043static int
5044ips_reset_copperhead_memio(ips_ha_t * ha)
5045{
5046 int reset_counter;
5047
5048 METHOD_TRACE("ips_reset_copperhead_memio", 1);
5049
5050 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d",
5051 ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
5052
5053 reset_counter = 0;
5054
5055 while (reset_counter < 2) {
5056 reset_counter++;
5057
5058 writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
5059
5060
5061 MDELAY(IPS_ONE_SEC);
5062
5063 writeb(0, ha->mem_ptr + IPS_REG_SCPR);
5064
5065
5066 MDELAY(IPS_ONE_SEC);
5067
5068 if ((*ha->func.init) (ha))
5069 break;
5070 else if (reset_counter >= 2) {
5071
5072 return (0);
5073 }
5074 }
5075
5076 return (1);
5077}
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088static int
5089ips_reset_morpheus(ips_ha_t * ha)
5090{
5091 int reset_counter;
5092 uint8_t junk;
5093
5094 METHOD_TRACE("ips_reset_morpheus", 1);
5095
5096 DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d",
5097 ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
5098
5099 reset_counter = 0;
5100
5101 while (reset_counter < 2) {
5102 reset_counter++;
5103
5104 writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
5105
5106
5107 MDELAY(5 * IPS_ONE_SEC);
5108
5109
5110 pci_read_config_byte(ha->pcidev, 4, &junk);
5111
5112 if ((*ha->func.init) (ha))
5113 break;
5114 else if (reset_counter >= 2) {
5115
5116 return (0);
5117 }
5118 }
5119
5120 return (1);
5121}
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132static void
5133ips_statinit(ips_ha_t * ha)
5134{
5135 uint32_t phys_status_start;
5136
5137 METHOD_TRACE("ips_statinit", 1);
5138
5139 ha->adapt->p_status_start = ha->adapt->status;
5140 ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
5141 ha->adapt->p_status_tail = ha->adapt->status;
5142
5143 phys_status_start = ha->adapt->hw_status_start;
5144 outl(phys_status_start, ha->io_addr + IPS_REG_SQSR);
5145 outl(phys_status_start + IPS_STATUS_Q_SIZE,
5146 ha->io_addr + IPS_REG_SQER);
5147 outl(phys_status_start + IPS_STATUS_SIZE,
5148 ha->io_addr + IPS_REG_SQHR);
5149 outl(phys_status_start, ha->io_addr + IPS_REG_SQTR);
5150
5151 ha->adapt->hw_status_tail = phys_status_start;
5152}
5153
5154
5155
5156
5157
5158
5159
5160
5161
5162
5163static void
5164ips_statinit_memio(ips_ha_t * ha)
5165{
5166 uint32_t phys_status_start;
5167
5168 METHOD_TRACE("ips_statinit_memio", 1);
5169
5170 ha->adapt->p_status_start = ha->adapt->status;
5171 ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
5172 ha->adapt->p_status_tail = ha->adapt->status;
5173
5174 phys_status_start = ha->adapt->hw_status_start;
5175 writel(phys_status_start, ha->mem_ptr + IPS_REG_SQSR);
5176 writel(phys_status_start + IPS_STATUS_Q_SIZE,
5177 ha->mem_ptr + IPS_REG_SQER);
5178 writel(phys_status_start + IPS_STATUS_SIZE, ha->mem_ptr + IPS_REG_SQHR);
5179 writel(phys_status_start, ha->mem_ptr + IPS_REG_SQTR);
5180
5181 ha->adapt->hw_status_tail = phys_status_start;
5182}
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193static uint32_t
5194ips_statupd_copperhead(ips_ha_t * ha)
5195{
5196 METHOD_TRACE("ips_statupd_copperhead", 1);
5197
5198 if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
5199 ha->adapt->p_status_tail++;
5200 ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
5201 } else {
5202 ha->adapt->p_status_tail = ha->adapt->p_status_start;
5203 ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
5204 }
5205
5206 outl(ha->adapt->hw_status_tail,
5207 ha->io_addr + IPS_REG_SQTR);
5208
5209 return (ha->adapt->p_status_tail->value);
5210}
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221static uint32_t
5222ips_statupd_copperhead_memio(ips_ha_t * ha)
5223{
5224 METHOD_TRACE("ips_statupd_copperhead_memio", 1);
5225
5226 if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
5227 ha->adapt->p_status_tail++;
5228 ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
5229 } else {
5230 ha->adapt->p_status_tail = ha->adapt->p_status_start;
5231 ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
5232 }
5233
5234 writel(ha->adapt->hw_status_tail, ha->mem_ptr + IPS_REG_SQTR);
5235
5236 return (ha->adapt->p_status_tail->value);
5237}
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248static uint32_t
5249ips_statupd_morpheus(ips_ha_t * ha)
5250{
5251 uint32_t val;
5252
5253 METHOD_TRACE("ips_statupd_morpheus", 1);
5254
5255 val = readl(ha->mem_ptr + IPS_REG_I2O_OUTMSGQ);
5256
5257 return (val);
5258}
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269static int
5270ips_issue_copperhead(ips_ha_t * ha, ips_scb_t * scb)
5271{
5272 uint32_t TimeOut;
5273 uint32_t val;
5274
5275 METHOD_TRACE("ips_issue_copperhead", 1);
5276
5277 if (scb->scsi_cmd) {
5278 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5279 ips_name,
5280 ha->host_num,
5281 scb->cdb[0],
5282 scb->cmd.basic_io.command_id,
5283 scb->bus, scb->target_id, scb->lun);
5284 } else {
5285 DEBUG_VAR(2, KERN_NOTICE "(%s%d) ips_issue: logical cmd id %d",
5286 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5287 }
5288
5289 TimeOut = 0;
5290
5291 while ((val =
5292 le32_to_cpu(inl(ha->io_addr + IPS_REG_CCCR))) & IPS_BIT_SEM) {
5293 udelay(1000);
5294
5295 if (++TimeOut >= IPS_SEM_TIMEOUT) {
5296 if (!(val & IPS_BIT_START_STOP))
5297 break;
5298
5299 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5300 "ips_issue val [0x%x].\n", val);
5301 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5302 "ips_issue semaphore chk timeout.\n");
5303
5304 return (IPS_FAILURE);
5305 }
5306 }
5307
5308 outl(scb->scb_busaddr, ha->io_addr + IPS_REG_CCSAR);
5309 outw(IPS_BIT_START_CMD, ha->io_addr + IPS_REG_CCCR);
5310
5311 return (IPS_SUCCESS);
5312}
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323static int
5324ips_issue_copperhead_memio(ips_ha_t * ha, ips_scb_t * scb)
5325{
5326 uint32_t TimeOut;
5327 uint32_t val;
5328
5329 METHOD_TRACE("ips_issue_copperhead_memio", 1);
5330
5331 if (scb->scsi_cmd) {
5332 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5333 ips_name,
5334 ha->host_num,
5335 scb->cdb[0],
5336 scb->cmd.basic_io.command_id,
5337 scb->bus, scb->target_id, scb->lun);
5338 } else {
5339 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5340 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5341 }
5342
5343 TimeOut = 0;
5344
5345 while ((val = readl(ha->mem_ptr + IPS_REG_CCCR)) & IPS_BIT_SEM) {
5346 udelay(1000);
5347
5348 if (++TimeOut >= IPS_SEM_TIMEOUT) {
5349 if (!(val & IPS_BIT_START_STOP))
5350 break;
5351
5352 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5353 "ips_issue val [0x%x].\n", val);
5354 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5355 "ips_issue semaphore chk timeout.\n");
5356
5357 return (IPS_FAILURE);
5358 }
5359 }
5360
5361 writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_CCSAR);
5362 writel(IPS_BIT_START_CMD, ha->mem_ptr + IPS_REG_CCCR);
5363
5364 return (IPS_SUCCESS);
5365}
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375
5376static int
5377ips_issue_i2o(ips_ha_t * ha, ips_scb_t * scb)
5378{
5379
5380 METHOD_TRACE("ips_issue_i2o", 1);
5381
5382 if (scb->scsi_cmd) {
5383 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5384 ips_name,
5385 ha->host_num,
5386 scb->cdb[0],
5387 scb->cmd.basic_io.command_id,
5388 scb->bus, scb->target_id, scb->lun);
5389 } else {
5390 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5391 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5392 }
5393
5394 outl(scb->scb_busaddr, ha->io_addr + IPS_REG_I2O_INMSGQ);
5395
5396 return (IPS_SUCCESS);
5397}
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408static int
5409ips_issue_i2o_memio(ips_ha_t * ha, ips_scb_t * scb)
5410{
5411
5412 METHOD_TRACE("ips_issue_i2o_memio", 1);
5413
5414 if (scb->scsi_cmd) {
5415 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5416 ips_name,
5417 ha->host_num,
5418 scb->cdb[0],
5419 scb->cmd.basic_io.command_id,
5420 scb->bus, scb->target_id, scb->lun);
5421 } else {
5422 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5423 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5424 }
5425
5426 writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_I2O_INMSGQ);
5427
5428 return (IPS_SUCCESS);
5429}
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440static int
5441ips_isintr_copperhead(ips_ha_t * ha)
5442{
5443 uint8_t Isr;
5444
5445 METHOD_TRACE("ips_isintr_copperhead", 2);
5446
5447 Isr = inb(ha->io_addr + IPS_REG_HISR);
5448
5449 if (Isr == 0xFF)
5450
5451 return (0);
5452
5453 if (Isr & IPS_BIT_SCE)
5454 return (1);
5455 else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
5456
5457
5458 outb(Isr, ha->io_addr + IPS_REG_HISR);
5459 }
5460
5461 return (0);
5462}
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473static int
5474ips_isintr_copperhead_memio(ips_ha_t * ha)
5475{
5476 uint8_t Isr;
5477
5478 METHOD_TRACE("ips_isintr_memio", 2);
5479
5480 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
5481
5482 if (Isr == 0xFF)
5483
5484 return (0);
5485
5486 if (Isr & IPS_BIT_SCE)
5487 return (1);
5488 else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
5489
5490
5491 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
5492 }
5493
5494 return (0);
5495}
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506static int
5507ips_isintr_morpheus(ips_ha_t * ha)
5508{
5509 uint32_t Isr;
5510
5511 METHOD_TRACE("ips_isintr_morpheus", 2);
5512
5513 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
5514
5515 if (Isr & IPS_BIT_I2O_OPQI)
5516 return (1);
5517 else
5518 return (0);
5519}
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530static int
5531ips_wait(ips_ha_t * ha, int time, int intr)
5532{
5533 int ret;
5534 int done;
5535
5536 METHOD_TRACE("ips_wait", 1);
5537
5538 ret = IPS_FAILURE;
5539 done = FALSE;
5540
5541 time *= IPS_ONE_SEC;
5542
5543 while ((time > 0) && (!done)) {
5544 if (intr == IPS_INTR_ON) {
5545 if (ha->waitflag == FALSE) {
5546 ret = IPS_SUCCESS;
5547 done = TRUE;
5548 break;
5549 }
5550 } else if (intr == IPS_INTR_IORL) {
5551 if (ha->waitflag == FALSE) {
5552
5553
5554
5555
5556
5557 ret = IPS_SUCCESS;
5558 done = TRUE;
5559 break;
5560 }
5561
5562
5563
5564
5565
5566
5567
5568 (*ha->func.intr) (ha);
5569 }
5570
5571
5572 udelay(1000);
5573 time--;
5574 }
5575
5576 return (ret);
5577}
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588static int
5589ips_write_driver_status(ips_ha_t * ha, int intr)
5590{
5591 METHOD_TRACE("ips_write_driver_status", 1);
5592
5593 if (!ips_readwrite_page5(ha, FALSE, intr)) {
5594 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5595 "unable to read NVRAM page 5.\n");
5596
5597 return (0);
5598 }
5599
5600
5601
5602 if (le32_to_cpu(ha->nvram->signature) != IPS_NVRAM_P5_SIG) {
5603 DEBUG_VAR(1,
5604 "(%s%d) NVRAM page 5 has an invalid signature: %X.",
5605 ips_name, ha->host_num, ha->nvram->signature);
5606 ha->nvram->signature = IPS_NVRAM_P5_SIG;
5607 }
5608
5609 DEBUG_VAR(2,
5610 "(%s%d) Ad Type: %d, Ad Slot: %d, BIOS: %c%c%c%c %c%c%c%c.",
5611 ips_name, ha->host_num, le16_to_cpu(ha->nvram->adapter_type),
5612 ha->nvram->adapter_slot, ha->nvram->bios_high[0],
5613 ha->nvram->bios_high[1], ha->nvram->bios_high[2],
5614 ha->nvram->bios_high[3], ha->nvram->bios_low[0],
5615 ha->nvram->bios_low[1], ha->nvram->bios_low[2],
5616 ha->nvram->bios_low[3]);
5617
5618 ips_get_bios_version(ha, intr);
5619
5620
5621 ha->nvram->operating_system = IPS_OS_LINUX;
5622 ha->nvram->adapter_type = ha->ad_type;
5623 memcpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4);
5624 memcpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4);
5625 memcpy((char *) ha->nvram->bios_high, ha->bios_version, 4);
5626 memcpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4);
5627
5628 ha->nvram->versioning = 0;
5629
5630
5631 if (!ips_readwrite_page5(ha, TRUE, intr)) {
5632 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5633 "unable to write NVRAM page 5.\n");
5634
5635 return (0);
5636 }
5637
5638
5639 ha->slot_num = ha->nvram->adapter_slot;
5640
5641 return (1);
5642}
5643
5644
5645
5646
5647
5648
5649
5650
5651
5652
5653static int
5654ips_read_adapter_status(ips_ha_t * ha, int intr)
5655{
5656 ips_scb_t *scb;
5657 int ret;
5658
5659 METHOD_TRACE("ips_read_adapter_status", 1);
5660
5661 scb = &ha->scbs[ha->max_cmds - 1];
5662
5663 ips_init_scb(ha, scb);
5664
5665 scb->timeout = ips_cmd_timeout;
5666 scb->cdb[0] = IPS_CMD_ENQUIRY;
5667
5668 scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
5669 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5670 scb->cmd.basic_io.sg_count = 0;
5671 scb->cmd.basic_io.lba = 0;
5672 scb->cmd.basic_io.sector_count = 0;
5673 scb->cmd.basic_io.log_drv = 0;
5674 scb->data_len = sizeof (*ha->enq);
5675 scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
5676
5677
5678 if (((ret =
5679 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5680 || (ret == IPS_SUCCESS_IMM)
5681 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5682 return (0);
5683
5684 return (1);
5685}
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696static int
5697ips_read_subsystem_parameters(ips_ha_t * ha, int intr)
5698{
5699 ips_scb_t *scb;
5700 int ret;
5701
5702 METHOD_TRACE("ips_read_subsystem_parameters", 1);
5703
5704 scb = &ha->scbs[ha->max_cmds - 1];
5705
5706 ips_init_scb(ha, scb);
5707
5708 scb->timeout = ips_cmd_timeout;
5709 scb->cdb[0] = IPS_CMD_GET_SUBSYS;
5710
5711 scb->cmd.basic_io.op_code = IPS_CMD_GET_SUBSYS;
5712 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5713 scb->cmd.basic_io.sg_count = 0;
5714 scb->cmd.basic_io.lba = 0;
5715 scb->cmd.basic_io.sector_count = 0;
5716 scb->cmd.basic_io.log_drv = 0;
5717 scb->data_len = sizeof (*ha->subsys);
5718 scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
5719
5720
5721 if (((ret =
5722 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5723 || (ret == IPS_SUCCESS_IMM)
5724 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5725 return (0);
5726
5727 memcpy(ha->subsys, ha->ioctl_data, sizeof(*ha->subsys));
5728 return (1);
5729}
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740static int
5741ips_read_config(ips_ha_t * ha, int intr)
5742{
5743 ips_scb_t *scb;
5744 int i;
5745 int ret;
5746
5747 METHOD_TRACE("ips_read_config", 1);
5748
5749
5750 for (i = 0; i < 4; i++)
5751 ha->conf->init_id[i] = 7;
5752
5753 scb = &ha->scbs[ha->max_cmds - 1];
5754
5755 ips_init_scb(ha, scb);
5756
5757 scb->timeout = ips_cmd_timeout;
5758 scb->cdb[0] = IPS_CMD_READ_CONF;
5759
5760 scb->cmd.basic_io.op_code = IPS_CMD_READ_CONF;
5761 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5762 scb->data_len = sizeof (*ha->conf);
5763 scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
5764
5765
5766 if (((ret =
5767 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5768 || (ret == IPS_SUCCESS_IMM)
5769 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
5770
5771 memset(ha->conf, 0, sizeof (IPS_CONF));
5772
5773
5774 for (i = 0; i < 4; i++)
5775 ha->conf->init_id[i] = 7;
5776
5777
5778 if ((scb->basic_status & IPS_GSC_STATUS_MASK) ==
5779 IPS_CMD_CMPLT_WERROR)
5780 return (1);
5781
5782 return (0);
5783 }
5784
5785 memcpy(ha->conf, ha->ioctl_data, sizeof(*ha->conf));
5786 return (1);
5787}
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797
5798static int
5799ips_readwrite_page5(ips_ha_t * ha, int write, int intr)
5800{
5801 ips_scb_t *scb;
5802 int ret;
5803
5804 METHOD_TRACE("ips_readwrite_page5", 1);
5805
5806 scb = &ha->scbs[ha->max_cmds - 1];
5807
5808 ips_init_scb(ha, scb);
5809
5810 scb->timeout = ips_cmd_timeout;
5811 scb->cdb[0] = IPS_CMD_RW_NVRAM_PAGE;
5812
5813 scb->cmd.nvram.op_code = IPS_CMD_RW_NVRAM_PAGE;
5814 scb->cmd.nvram.command_id = IPS_COMMAND_ID(ha, scb);
5815 scb->cmd.nvram.page = 5;
5816 scb->cmd.nvram.write = write;
5817 scb->cmd.nvram.reserved = 0;
5818 scb->cmd.nvram.reserved2 = 0;
5819 scb->data_len = sizeof (*ha->nvram);
5820 scb->cmd.nvram.buffer_addr = ha->ioctl_busaddr;
5821 if (write)
5822 memcpy(ha->ioctl_data, ha->nvram, sizeof(*ha->nvram));
5823
5824
5825 if (((ret =
5826 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5827 || (ret == IPS_SUCCESS_IMM)
5828 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
5829
5830 memset(ha->nvram, 0, sizeof (IPS_NVRAM_P5));
5831
5832 return (0);
5833 }
5834 if (!write)
5835 memcpy(ha->nvram, ha->ioctl_data, sizeof(*ha->nvram));
5836 return (1);
5837}
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848static int
5849ips_clear_adapter(ips_ha_t * ha, int intr)
5850{
5851 ips_scb_t *scb;
5852 int ret;
5853
5854 METHOD_TRACE("ips_clear_adapter", 1);
5855
5856 scb = &ha->scbs[ha->max_cmds - 1];
5857
5858 ips_init_scb(ha, scb);
5859
5860 scb->timeout = ips_reset_timeout;
5861 scb->cdb[0] = IPS_CMD_CONFIG_SYNC;
5862
5863 scb->cmd.config_sync.op_code = IPS_CMD_CONFIG_SYNC;
5864 scb->cmd.config_sync.command_id = IPS_COMMAND_ID(ha, scb);
5865 scb->cmd.config_sync.channel = 0;
5866 scb->cmd.config_sync.source_target = IPS_POCL;
5867 scb->cmd.config_sync.reserved = 0;
5868 scb->cmd.config_sync.reserved2 = 0;
5869 scb->cmd.config_sync.reserved3 = 0;
5870
5871
5872 if (((ret =
5873 ips_send_wait(ha, scb, ips_reset_timeout, intr)) == IPS_FAILURE)
5874 || (ret == IPS_SUCCESS_IMM)
5875 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5876 return (0);
5877
5878
5879 ips_init_scb(ha, scb);
5880
5881 scb->cdb[0] = IPS_CMD_ERROR_TABLE;
5882 scb->timeout = ips_reset_timeout;
5883
5884 scb->cmd.unlock_stripe.op_code = IPS_CMD_ERROR_TABLE;
5885 scb->cmd.unlock_stripe.command_id = IPS_COMMAND_ID(ha, scb);
5886 scb->cmd.unlock_stripe.log_drv = 0;
5887 scb->cmd.unlock_stripe.control = IPS_CSL;
5888 scb->cmd.unlock_stripe.reserved = 0;
5889 scb->cmd.unlock_stripe.reserved2 = 0;
5890 scb->cmd.unlock_stripe.reserved3 = 0;
5891
5892
5893 if (((ret =
5894 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5895 || (ret == IPS_SUCCESS_IMM)
5896 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5897 return (0);
5898
5899 return (1);
5900}
5901
5902
5903
5904
5905
5906
5907
5908
5909
5910
5911static void
5912ips_ffdc_reset(ips_ha_t * ha, int intr)
5913{
5914 ips_scb_t *scb;
5915
5916 METHOD_TRACE("ips_ffdc_reset", 1);
5917
5918 scb = &ha->scbs[ha->max_cmds - 1];
5919
5920 ips_init_scb(ha, scb);
5921
5922 scb->timeout = ips_cmd_timeout;
5923 scb->cdb[0] = IPS_CMD_FFDC;
5924 scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
5925 scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
5926 scb->cmd.ffdc.reset_count = ha->reset_count;
5927 scb->cmd.ffdc.reset_type = 0x80;
5928
5929
5930 ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
5931
5932
5933 ips_send_wait(ha, scb, ips_cmd_timeout, intr);
5934}
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944
5945static void
5946ips_ffdc_time(ips_ha_t * ha)
5947{
5948 ips_scb_t *scb;
5949
5950 METHOD_TRACE("ips_ffdc_time", 1);
5951
5952 DEBUG_VAR(1, "(%s%d) Sending time update.", ips_name, ha->host_num);
5953
5954 scb = &ha->scbs[ha->max_cmds - 1];
5955
5956 ips_init_scb(ha, scb);
5957
5958 scb->timeout = ips_cmd_timeout;
5959 scb->cdb[0] = IPS_CMD_FFDC;
5960 scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
5961 scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
5962 scb->cmd.ffdc.reset_count = 0;
5963 scb->cmd.ffdc.reset_type = 0;
5964
5965
5966 ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
5967
5968
5969 ips_send_wait(ha, scb, ips_cmd_timeout, IPS_FFDC);
5970}
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980static void
5981ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time64_t current_time)
5982{
5983 struct tm tm;
5984
5985 METHOD_TRACE("ips_fix_ffdc_time", 1);
5986
5987 time64_to_tm(current_time, 0, &tm);
5988
5989 scb->cmd.ffdc.hour = tm.tm_hour;
5990 scb->cmd.ffdc.minute = tm.tm_min;
5991 scb->cmd.ffdc.second = tm.tm_sec;
5992 scb->cmd.ffdc.yearH = (tm.tm_year + 1900) / 100;
5993 scb->cmd.ffdc.yearL = tm.tm_year % 100;
5994 scb->cmd.ffdc.month = tm.tm_mon + 1;
5995 scb->cmd.ffdc.day = tm.tm_mday;
5996}
5997
5998
5999
6000
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010static int
6011ips_erase_bios(ips_ha_t * ha)
6012{
6013 int timeout;
6014 uint8_t status = 0;
6015
6016 METHOD_TRACE("ips_erase_bios", 1);
6017
6018 status = 0;
6019
6020
6021 outl(0, ha->io_addr + IPS_REG_FLAP);
6022 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6023 udelay(25);
6024
6025 outb(0x50, ha->io_addr + IPS_REG_FLDP);
6026 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6027 udelay(25);
6028
6029
6030 outb(0x20, ha->io_addr + IPS_REG_FLDP);
6031 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6032 udelay(25);
6033
6034
6035 outb(0xD0, ha->io_addr + IPS_REG_FLDP);
6036 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6037 udelay(25);
6038
6039
6040 outb(0x70, ha->io_addr + IPS_REG_FLDP);
6041 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6042 udelay(25);
6043
6044 timeout = 80000;
6045
6046 while (timeout > 0) {
6047 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6048 outl(0, ha->io_addr + IPS_REG_FLAP);
6049 udelay(25);
6050 }
6051
6052 status = inb(ha->io_addr + IPS_REG_FLDP);
6053
6054 if (status & 0x80)
6055 break;
6056
6057 MDELAY(1);
6058 timeout--;
6059 }
6060
6061
6062 if (timeout <= 0) {
6063
6064
6065
6066 outb(0xB0, ha->io_addr + IPS_REG_FLDP);
6067 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6068 udelay(25);
6069
6070
6071 timeout = 10000;
6072 while (timeout > 0) {
6073 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6074 outl(0, ha->io_addr + IPS_REG_FLAP);
6075 udelay(25);
6076 }
6077
6078 status = inb(ha->io_addr + IPS_REG_FLDP);
6079
6080 if (status & 0xC0)
6081 break;
6082
6083 MDELAY(1);
6084 timeout--;
6085 }
6086
6087 return (1);
6088 }
6089
6090
6091 if (status & 0x08)
6092
6093 return (1);
6094
6095
6096 if (status & 0x30)
6097
6098 return (1);
6099
6100
6101
6102 outb(0x50, ha->io_addr + IPS_REG_FLDP);
6103 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6104 udelay(25);
6105
6106
6107 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6108 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6109 udelay(25);
6110
6111 return (0);
6112}
6113
6114
6115
6116
6117
6118
6119
6120
6121
6122static int
6123ips_erase_bios_memio(ips_ha_t * ha)
6124{
6125 int timeout;
6126 uint8_t status;
6127
6128 METHOD_TRACE("ips_erase_bios_memio", 1);
6129
6130 status = 0;
6131
6132
6133 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6134 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6135 udelay(25);
6136
6137 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
6138 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6139 udelay(25);
6140
6141
6142 writeb(0x20, ha->mem_ptr + IPS_REG_FLDP);
6143 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6144 udelay(25);
6145
6146
6147 writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP);
6148 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6149 udelay(25);
6150
6151
6152 writeb(0x70, ha->mem_ptr + IPS_REG_FLDP);
6153 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6154 udelay(25);
6155
6156 timeout = 80000;
6157
6158 while (timeout > 0) {
6159 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6160 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6161 udelay(25);
6162 }
6163
6164 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6165
6166 if (status & 0x80)
6167 break;
6168
6169 MDELAY(1);
6170 timeout--;
6171 }
6172
6173
6174 if (timeout <= 0) {
6175
6176
6177
6178 writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP);
6179 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6180 udelay(25);
6181
6182
6183 timeout = 10000;
6184 while (timeout > 0) {
6185 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6186 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6187 udelay(25);
6188 }
6189
6190 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6191
6192 if (status & 0xC0)
6193 break;
6194
6195 MDELAY(1);
6196 timeout--;
6197 }
6198
6199 return (1);
6200 }
6201
6202
6203 if (status & 0x08)
6204
6205 return (1);
6206
6207
6208 if (status & 0x30)
6209
6210 return (1);
6211
6212
6213
6214 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
6215 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6216 udelay(25);
6217
6218
6219 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6220 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6221 udelay(25);
6222
6223 return (0);
6224}
6225
6226
6227
6228
6229
6230
6231
6232
6233
6234static int
6235ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6236 uint32_t offset)
6237{
6238 int i;
6239 int timeout;
6240 uint8_t status = 0;
6241
6242 METHOD_TRACE("ips_program_bios", 1);
6243
6244 status = 0;
6245
6246 for (i = 0; i < buffersize; i++) {
6247
6248 outl(i + offset, ha->io_addr + IPS_REG_FLAP);
6249 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6250 udelay(25);
6251
6252 outb(0x40, ha->io_addr + IPS_REG_FLDP);
6253 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6254 udelay(25);
6255
6256 outb(buffer[i], ha->io_addr + IPS_REG_FLDP);
6257 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6258 udelay(25);
6259
6260
6261 timeout = 1000;
6262 while (timeout > 0) {
6263 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6264 outl(0, ha->io_addr + IPS_REG_FLAP);
6265 udelay(25);
6266 }
6267
6268 status = inb(ha->io_addr + IPS_REG_FLDP);
6269
6270 if (status & 0x80)
6271 break;
6272
6273 MDELAY(1);
6274 timeout--;
6275 }
6276
6277 if (timeout == 0) {
6278
6279 outl(0, ha->io_addr + IPS_REG_FLAP);
6280 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6281 udelay(25);
6282
6283 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6284 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6285 udelay(25);
6286
6287 return (1);
6288 }
6289
6290
6291 if (status & 0x18) {
6292
6293 outl(0, ha->io_addr + IPS_REG_FLAP);
6294 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6295 udelay(25);
6296
6297 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6298 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6299 udelay(25);
6300
6301 return (1);
6302 }
6303 }
6304
6305
6306 outl(0, ha->io_addr + IPS_REG_FLAP);
6307 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6308 udelay(25);
6309
6310 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6311 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6312 udelay(25);
6313
6314 return (0);
6315}
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325static int
6326ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6327 uint32_t offset)
6328{
6329 int i;
6330 int timeout;
6331 uint8_t status = 0;
6332
6333 METHOD_TRACE("ips_program_bios_memio", 1);
6334
6335 status = 0;
6336
6337 for (i = 0; i < buffersize; i++) {
6338
6339 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
6340 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6341 udelay(25);
6342
6343 writeb(0x40, ha->mem_ptr + IPS_REG_FLDP);
6344 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6345 udelay(25);
6346
6347 writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP);
6348 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6349 udelay(25);
6350
6351
6352 timeout = 1000;
6353 while (timeout > 0) {
6354 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6355 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6356 udelay(25);
6357 }
6358
6359 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6360
6361 if (status & 0x80)
6362 break;
6363
6364 MDELAY(1);
6365 timeout--;
6366 }
6367
6368 if (timeout == 0) {
6369
6370 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6371 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6372 udelay(25);
6373
6374 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6375 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6376 udelay(25);
6377
6378 return (1);
6379 }
6380
6381
6382 if (status & 0x18) {
6383
6384 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6385 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6386 udelay(25);
6387
6388 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6389 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6390 udelay(25);
6391
6392 return (1);
6393 }
6394 }
6395
6396
6397 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6398 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6399 udelay(25);
6400
6401 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6402 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6403 udelay(25);
6404
6405 return (0);
6406}
6407
6408
6409
6410
6411
6412
6413
6414
6415
6416static int
6417ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6418 uint32_t offset)
6419{
6420 uint8_t checksum;
6421 int i;
6422
6423 METHOD_TRACE("ips_verify_bios", 1);
6424
6425
6426 outl(0, ha->io_addr + IPS_REG_FLAP);
6427 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6428 udelay(25);
6429
6430 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
6431 return (1);
6432
6433 outl(1, ha->io_addr + IPS_REG_FLAP);
6434 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6435 udelay(25);
6436 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
6437 return (1);
6438
6439 checksum = 0xff;
6440 for (i = 2; i < buffersize; i++) {
6441
6442 outl(i + offset, ha->io_addr + IPS_REG_FLAP);
6443 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6444 udelay(25);
6445
6446 checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP);
6447 }
6448
6449 if (checksum != 0)
6450
6451 return (1);
6452 else
6453
6454 return (0);
6455}
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465static int
6466ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6467 uint32_t offset)
6468{
6469 uint8_t checksum;
6470 int i;
6471
6472 METHOD_TRACE("ips_verify_bios_memio", 1);
6473
6474
6475 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6476 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6477 udelay(25);
6478
6479 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
6480 return (1);
6481
6482 writel(1, ha->mem_ptr + IPS_REG_FLAP);
6483 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6484 udelay(25);
6485 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
6486 return (1);
6487
6488 checksum = 0xff;
6489 for (i = 2; i < buffersize; i++) {
6490
6491 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
6492 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6493 udelay(25);
6494
6495 checksum =
6496 (uint8_t) checksum + readb(ha->mem_ptr + IPS_REG_FLDP);
6497 }
6498
6499 if (checksum != 0)
6500
6501 return (1);
6502 else
6503
6504 return (0);
6505}
6506
6507
6508
6509
6510
6511
6512
6513
6514static int
6515ips_abort_init(ips_ha_t * ha, int index)
6516{
6517 ha->active = 0;
6518 ips_free(ha);
6519 ips_ha[index] = NULL;
6520 ips_sh[index] = NULL;
6521 return -1;
6522}
6523
6524
6525
6526
6527
6528
6529
6530
6531static void
6532ips_shift_controllers(int lowindex, int highindex)
6533{
6534 ips_ha_t *ha_sav = ips_ha[highindex];
6535 struct Scsi_Host *sh_sav = ips_sh[highindex];
6536 int i;
6537
6538 for (i = highindex; i > lowindex; i--) {
6539 ips_ha[i] = ips_ha[i - 1];
6540 ips_sh[i] = ips_sh[i - 1];
6541 ips_ha[i]->host_num = i;
6542 }
6543 ha_sav->host_num = lowindex;
6544 ips_ha[lowindex] = ha_sav;
6545 ips_sh[lowindex] = sh_sav;
6546}
6547
6548
6549
6550
6551
6552
6553
6554
6555static void
6556ips_order_controllers(void)
6557{
6558 int i, j, tmp, position = 0;
6559 IPS_NVRAM_P5 *nvram;
6560 if (!ips_ha[0])
6561 return;
6562 nvram = ips_ha[0]->nvram;
6563
6564 if (nvram->adapter_order[0]) {
6565 for (i = 1; i <= nvram->adapter_order[0]; i++) {
6566 for (j = position; j < ips_num_controllers; j++) {
6567 switch (ips_ha[j]->ad_type) {
6568 case IPS_ADTYPE_SERVERAID6M:
6569 case IPS_ADTYPE_SERVERAID7M:
6570 if (nvram->adapter_order[i] == 'M') {
6571 ips_shift_controllers(position,
6572 j);
6573 position++;
6574 }
6575 break;
6576 case IPS_ADTYPE_SERVERAID4L:
6577 case IPS_ADTYPE_SERVERAID4M:
6578 case IPS_ADTYPE_SERVERAID4MX:
6579 case IPS_ADTYPE_SERVERAID4LX:
6580 if (nvram->adapter_order[i] == 'N') {
6581 ips_shift_controllers(position,
6582 j);
6583 position++;
6584 }
6585 break;
6586 case IPS_ADTYPE_SERVERAID6I:
6587 case IPS_ADTYPE_SERVERAID5I2:
6588 case IPS_ADTYPE_SERVERAID5I1:
6589 case IPS_ADTYPE_SERVERAID7k:
6590 if (nvram->adapter_order[i] == 'S') {
6591 ips_shift_controllers(position,
6592 j);
6593 position++;
6594 }
6595 break;
6596 case IPS_ADTYPE_SERVERAID:
6597 case IPS_ADTYPE_SERVERAID2:
6598 case IPS_ADTYPE_NAVAJO:
6599 case IPS_ADTYPE_KIOWA:
6600 case IPS_ADTYPE_SERVERAID3L:
6601 case IPS_ADTYPE_SERVERAID3:
6602 case IPS_ADTYPE_SERVERAID4H:
6603 if (nvram->adapter_order[i] == 'A') {
6604 ips_shift_controllers(position,
6605 j);
6606 position++;
6607 }
6608 break;
6609 default:
6610 break;
6611 }
6612 }
6613 }
6614
6615 return;
6616 }
6617
6618 tmp = 0;
6619 for (i = position; i < ips_num_controllers; i++) {
6620 if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I2 ||
6621 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I1) {
6622 ips_shift_controllers(position, i);
6623 position++;
6624 tmp = 1;
6625 }
6626 }
6627
6628 if (!tmp)
6629 return;
6630 for (i = position; i < ips_num_controllers; i++) {
6631 if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4L ||
6632 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4M ||
6633 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4LX ||
6634 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4MX) {
6635 ips_shift_controllers(position, i);
6636 position++;
6637 }
6638 }
6639
6640 return;
6641}
6642
6643
6644
6645
6646
6647
6648
6649
6650static int
6651ips_register_scsi(int index)
6652{
6653 struct Scsi_Host *sh;
6654 ips_ha_t *ha, *oldha = ips_ha[index];
6655 sh = scsi_host_alloc(&ips_driver_template, sizeof (ips_ha_t));
6656 if (!sh) {
6657 IPS_PRINTK(KERN_WARNING, oldha->pcidev,
6658 "Unable to register controller with SCSI subsystem\n");
6659 return -1;
6660 }
6661 ha = IPS_HA(sh);
6662 memcpy(ha, oldha, sizeof (ips_ha_t));
6663 free_irq(oldha->pcidev->irq, oldha);
6664
6665 if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
6666 IPS_PRINTK(KERN_WARNING, ha->pcidev,
6667 "Unable to install interrupt handler\n");
6668 goto err_out_sh;
6669 }
6670
6671 kfree(oldha);
6672
6673
6674 sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr;
6675 sh->sg_tablesize = sh->hostt->sg_tablesize;
6676 sh->can_queue = sh->hostt->can_queue;
6677 sh->cmd_per_lun = sh->hostt->cmd_per_lun;
6678 sh->max_sectors = 128;
6679
6680 sh->max_id = ha->ntargets;
6681 sh->max_lun = ha->nlun;
6682 sh->max_channel = ha->nbus - 1;
6683 sh->can_queue = ha->max_cmds - 1;
6684
6685 if (scsi_add_host(sh, &ha->pcidev->dev))
6686 goto err_out;
6687
6688 ips_sh[index] = sh;
6689 ips_ha[index] = ha;
6690
6691 scsi_scan_host(sh);
6692
6693 return 0;
6694
6695err_out:
6696 free_irq(ha->pcidev->irq, ha);
6697err_out_sh:
6698 scsi_host_put(sh);
6699 return -1;
6700}
6701
6702
6703
6704
6705
6706
6707
6708static void
6709ips_remove_device(struct pci_dev *pci_dev)
6710{
6711 struct Scsi_Host *sh = pci_get_drvdata(pci_dev);
6712
6713 pci_set_drvdata(pci_dev, NULL);
6714
6715 ips_release(sh);
6716
6717 pci_release_regions(pci_dev);
6718 pci_disable_device(pci_dev);
6719}
6720
6721
6722
6723
6724
6725
6726
6727
6728static int __init
6729ips_module_init(void)
6730{
6731#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__)
6732 printk(KERN_ERR "ips: This driver has only been tested on the x86/ia64/x86_64 platforms\n");
6733 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
6734#endif
6735
6736 if (pci_register_driver(&ips_pci_driver) < 0)
6737 return -ENODEV;
6738 ips_driver_template.module = THIS_MODULE;
6739 ips_order_controllers();
6740 if (!ips_detect(&ips_driver_template)) {
6741 pci_unregister_driver(&ips_pci_driver);
6742 return -ENODEV;
6743 }
6744 register_reboot_notifier(&ips_notifier);
6745 return 0;
6746}
6747
6748
6749
6750
6751
6752
6753
6754
6755static void __exit
6756ips_module_exit(void)
6757{
6758 pci_unregister_driver(&ips_pci_driver);
6759 unregister_reboot_notifier(&ips_notifier);
6760}
6761
6762module_init(ips_module_init);
6763module_exit(ips_module_exit);
6764
6765
6766
6767
6768
6769
6770
6771
6772
6773
6774static int
6775ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
6776{
6777 int index = -1;
6778 int rc;
6779
6780 METHOD_TRACE("ips_insert_device", 1);
6781 rc = pci_enable_device(pci_dev);
6782 if (rc)
6783 return rc;
6784
6785 rc = pci_request_regions(pci_dev, "ips");
6786 if (rc)
6787 goto err_out;
6788
6789 rc = ips_init_phase1(pci_dev, &index);
6790 if (rc == SUCCESS)
6791 rc = ips_init_phase2(index);
6792
6793 if (ips_hotplug)
6794 if (ips_register_scsi(index)) {
6795 ips_free(ips_ha[index]);
6796 rc = -1;
6797 }
6798
6799 if (rc == SUCCESS)
6800 ips_num_controllers++;
6801
6802 ips_next_controller = ips_num_controllers;
6803
6804 if (rc < 0) {
6805 rc = -ENODEV;
6806 goto err_out_regions;
6807 }
6808
6809 pci_set_drvdata(pci_dev, ips_sh[index]);
6810 return 0;
6811
6812err_out_regions:
6813 pci_release_regions(pci_dev);
6814err_out:
6815 pci_disable_device(pci_dev);
6816 return rc;
6817}
6818
6819
6820
6821
6822
6823
6824
6825
6826
6827
6828static int
6829ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
6830{
6831 ips_ha_t *ha;
6832 uint32_t io_addr;
6833 uint32_t mem_addr;
6834 uint32_t io_len;
6835 uint32_t mem_len;
6836 int j;
6837 int index;
6838 dma_addr_t dma_address;
6839 char __iomem *ioremap_ptr;
6840 char __iomem *mem_ptr;
6841 uint32_t IsDead;
6842
6843 METHOD_TRACE("ips_init_phase1", 1);
6844 index = IPS_MAX_ADAPTERS;
6845 for (j = 0; j < IPS_MAX_ADAPTERS; j++) {
6846 if (ips_ha[j] == NULL) {
6847 index = j;
6848 break;
6849 }
6850 }
6851
6852 if (index >= IPS_MAX_ADAPTERS)
6853 return -1;
6854
6855
6856 mem_addr = 0;
6857 io_addr = 0;
6858 mem_len = 0;
6859 io_len = 0;
6860
6861 for (j = 0; j < 2; j++) {
6862 if (!pci_resource_start(pci_dev, j))
6863 break;
6864
6865 if (pci_resource_flags(pci_dev, j) & IORESOURCE_IO) {
6866 io_addr = pci_resource_start(pci_dev, j);
6867 io_len = pci_resource_len(pci_dev, j);
6868 } else {
6869 mem_addr = pci_resource_start(pci_dev, j);
6870 mem_len = pci_resource_len(pci_dev, j);
6871 }
6872 }
6873
6874
6875 if (mem_addr) {
6876 uint32_t base;
6877 uint32_t offs;
6878
6879 base = mem_addr & PAGE_MASK;
6880 offs = mem_addr - base;
6881 ioremap_ptr = ioremap(base, PAGE_SIZE);
6882 if (!ioremap_ptr)
6883 return -1;
6884 mem_ptr = ioremap_ptr + offs;
6885 } else {
6886 ioremap_ptr = NULL;
6887 mem_ptr = NULL;
6888 }
6889
6890
6891 ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL);
6892 if (ha == NULL) {
6893 IPS_PRINTK(KERN_WARNING, pci_dev,
6894 "Unable to allocate temporary ha struct\n");
6895 return -1;
6896 }
6897
6898 ips_sh[index] = NULL;
6899 ips_ha[index] = ha;
6900 ha->active = 1;
6901
6902
6903 ha->io_addr = io_addr;
6904 ha->io_len = io_len;
6905 ha->mem_addr = mem_addr;
6906 ha->mem_len = mem_len;
6907 ha->mem_ptr = mem_ptr;
6908 ha->ioremap_ptr = ioremap_ptr;
6909 ha->host_num = (uint32_t) index;
6910 ha->slot_num = PCI_SLOT(pci_dev->devfn);
6911 ha->pcidev = pci_dev;
6912
6913
6914
6915
6916
6917
6918
6919 if (sizeof(dma_addr_t) > 4 && IPS_HAS_ENH_SGLIST(ha) &&
6920 !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) {
6921 (ha)->flags |= IPS_HA_ENH_SG;
6922 } else {
6923 if (dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(32)) != 0) {
6924 printk(KERN_WARNING "Unable to set DMA Mask\n");
6925 return ips_abort_init(ha, index);
6926 }
6927 }
6928 if(ips_cd_boot && !ips_FlashData){
6929 ips_FlashData = dma_alloc_coherent(&pci_dev->dev,
6930 PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL);
6931 }
6932
6933 ha->enq = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ENQ),
6934 &ha->enq_busaddr, GFP_KERNEL);
6935 if (!ha->enq) {
6936 IPS_PRINTK(KERN_WARNING, pci_dev,
6937 "Unable to allocate host inquiry structure\n");
6938 return ips_abort_init(ha, index);
6939 }
6940
6941 ha->adapt = dma_alloc_coherent(&pci_dev->dev,
6942 sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD),
6943 &dma_address, GFP_KERNEL);
6944 if (!ha->adapt) {
6945 IPS_PRINTK(KERN_WARNING, pci_dev,
6946 "Unable to allocate host adapt & dummy structures\n");
6947 return ips_abort_init(ha, index);
6948 }
6949 ha->adapt->hw_status_start = dma_address;
6950 ha->dummy = (void *) (ha->adapt + 1);
6951
6952
6953
6954 ha->logical_drive_info = dma_alloc_coherent(&pci_dev->dev,
6955 sizeof (IPS_LD_INFO), &dma_address, GFP_KERNEL);
6956 if (!ha->logical_drive_info) {
6957 IPS_PRINTK(KERN_WARNING, pci_dev,
6958 "Unable to allocate logical drive info structure\n");
6959 return ips_abort_init(ha, index);
6960 }
6961 ha->logical_drive_info_dma_addr = dma_address;
6962
6963
6964 ha->conf = kmalloc(sizeof (IPS_CONF), GFP_KERNEL);
6965
6966 if (!ha->conf) {
6967 IPS_PRINTK(KERN_WARNING, pci_dev,
6968 "Unable to allocate host conf structure\n");
6969 return ips_abort_init(ha, index);
6970 }
6971
6972 ha->nvram = kmalloc(sizeof (IPS_NVRAM_P5), GFP_KERNEL);
6973
6974 if (!ha->nvram) {
6975 IPS_PRINTK(KERN_WARNING, pci_dev,
6976 "Unable to allocate host NVRAM structure\n");
6977 return ips_abort_init(ha, index);
6978 }
6979
6980 ha->subsys = kmalloc(sizeof (IPS_SUBSYS), GFP_KERNEL);
6981
6982 if (!ha->subsys) {
6983 IPS_PRINTK(KERN_WARNING, pci_dev,
6984 "Unable to allocate host subsystem structure\n");
6985 return ips_abort_init(ha, index);
6986 }
6987
6988
6989
6990 if (ips_ioctlsize < PAGE_SIZE)
6991 ips_ioctlsize = PAGE_SIZE;
6992
6993 ha->ioctl_data = dma_alloc_coherent(&pci_dev->dev, ips_ioctlsize,
6994 &ha->ioctl_busaddr, GFP_KERNEL);
6995 ha->ioctl_len = ips_ioctlsize;
6996 if (!ha->ioctl_data) {
6997 IPS_PRINTK(KERN_WARNING, pci_dev,
6998 "Unable to allocate IOCTL data\n");
6999 return ips_abort_init(ha, index);
7000 }
7001
7002
7003
7004
7005 ips_setup_funclist(ha);
7006
7007 if ((IPS_IS_MORPHEUS(ha)) || (IPS_IS_MARCO(ha))) {
7008
7009 IsDead = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
7010 if (IsDead == 0xDEADBEEF) {
7011 ips_reset_morpheus(ha);
7012 }
7013 }
7014
7015
7016
7017
7018
7019 if (!(*ha->func.isinit) (ha)) {
7020 if (!(*ha->func.init) (ha)) {
7021
7022
7023
7024 IPS_PRINTK(KERN_WARNING, pci_dev,
7025 "Unable to initialize controller\n");
7026 return ips_abort_init(ha, index);
7027 }
7028 }
7029
7030 *indexPtr = index;
7031 return SUCCESS;
7032}
7033
7034
7035
7036
7037
7038
7039
7040
7041
7042
7043static int
7044ips_init_phase2(int index)
7045{
7046 ips_ha_t *ha;
7047
7048 ha = ips_ha[index];
7049
7050 METHOD_TRACE("ips_init_phase2", 1);
7051 if (!ha->active) {
7052 ips_ha[index] = NULL;
7053 return -1;
7054 }
7055
7056
7057 if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
7058 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7059 "Unable to install interrupt handler\n");
7060 return ips_abort_init(ha, index);
7061 }
7062
7063
7064
7065
7066 ha->max_cmds = 1;
7067 if (!ips_allocatescbs(ha)) {
7068 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7069 "Unable to allocate a CCB\n");
7070 free_irq(ha->pcidev->irq, ha);
7071 return ips_abort_init(ha, index);
7072 }
7073
7074 if (!ips_hainit(ha)) {
7075 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7076 "Unable to initialize controller\n");
7077 free_irq(ha->pcidev->irq, ha);
7078 return ips_abort_init(ha, index);
7079 }
7080
7081 ips_deallocatescbs(ha, 1);
7082
7083
7084 if (!ips_allocatescbs(ha)) {
7085 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7086 "Unable to allocate CCBs\n");
7087 free_irq(ha->pcidev->irq, ha);
7088 return ips_abort_init(ha, index);
7089 }
7090
7091 return SUCCESS;
7092}
7093
7094MODULE_LICENSE("GPL");
7095MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
7096MODULE_VERSION(IPS_VER_STRING);
7097
7098
7099
7100
7101
7102
7103
7104
7105
7106
7107
7108
7109
7110
7111
7112
7113
7114
7115
7116
7117