1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164#include <asm/io.h>
165#include <asm/byteorder.h>
166#include <asm/page.h>
167#include <linux/stddef.h>
168#include <linux/string.h>
169#include <linux/errno.h>
170#include <linux/kernel.h>
171#include <linux/ioport.h>
172#include <linux/slab.h>
173#include <linux/delay.h>
174#include <linux/pci.h>
175#include <linux/proc_fs.h>
176#include <linux/reboot.h>
177#include <linux/interrupt.h>
178
179#include <linux/blkdev.h>
180#include <linux/types.h>
181#include <linux/dma-mapping.h>
182
183#include <scsi/sg.h>
184#include "scsi.h"
185#include <scsi/scsi_host.h>
186
187#include "ips.h"
188
189#include <linux/module.h>
190
191#include <linux/stat.h>
192
193#include <linux/spinlock.h>
194#include <linux/init.h>
195
196#include <linux/smp.h>
197
198#ifdef MODULE
199static char *ips = NULL;
200module_param(ips, charp, 0);
201#endif
202
203
204
205
206#define IPS_VERSION_HIGH IPS_VER_MAJOR_STRING "." IPS_VER_MINOR_STRING
207#define IPS_VERSION_LOW "." IPS_VER_BUILD_STRING " "
208
209#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
210 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
211 PCI_DMA_BIDIRECTIONAL : \
212 scb->scsi_cmd->sc_data_direction)
213
214#ifdef IPS_DEBUG
215#define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n");
216#define DEBUG(i, s) if (ips_debug >= i) printk(KERN_NOTICE s "\n");
217#define DEBUG_VAR(i, s, v...) if (ips_debug >= i) printk(KERN_NOTICE s "\n", v);
218#else
219#define METHOD_TRACE(s, i)
220#define DEBUG(i, s)
221#define DEBUG_VAR(i, s, v...)
222#endif
223
224
225
226
227static int ips_eh_abort(struct scsi_cmnd *);
228static int ips_eh_reset(struct scsi_cmnd *);
229static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *);
230static const char *ips_info(struct Scsi_Host *);
231static irqreturn_t do_ipsintr(int, void *);
232static int ips_hainit(ips_ha_t *);
233static int ips_map_status(ips_ha_t *, ips_scb_t *, ips_stat_t *);
234static int ips_send_wait(ips_ha_t *, ips_scb_t *, int, int);
235static int ips_send_cmd(ips_ha_t *, ips_scb_t *);
236static int ips_online(ips_ha_t *, ips_scb_t *);
237static int ips_inquiry(ips_ha_t *, ips_scb_t *);
238static int ips_rdcap(ips_ha_t *, ips_scb_t *);
239static int ips_msense(ips_ha_t *, ips_scb_t *);
240static int ips_reqsen(ips_ha_t *, ips_scb_t *);
241static int ips_deallocatescbs(ips_ha_t *, int);
242static int ips_allocatescbs(ips_ha_t *);
243static int ips_reset_copperhead(ips_ha_t *);
244static int ips_reset_copperhead_memio(ips_ha_t *);
245static int ips_reset_morpheus(ips_ha_t *);
246static int ips_issue_copperhead(ips_ha_t *, ips_scb_t *);
247static int ips_issue_copperhead_memio(ips_ha_t *, ips_scb_t *);
248static int ips_issue_i2o(ips_ha_t *, ips_scb_t *);
249static int ips_issue_i2o_memio(ips_ha_t *, ips_scb_t *);
250static int ips_isintr_copperhead(ips_ha_t *);
251static int ips_isintr_copperhead_memio(ips_ha_t *);
252static int ips_isintr_morpheus(ips_ha_t *);
253static int ips_wait(ips_ha_t *, int, int);
254static int ips_write_driver_status(ips_ha_t *, int);
255static int ips_read_adapter_status(ips_ha_t *, int);
256static int ips_read_subsystem_parameters(ips_ha_t *, int);
257static int ips_read_config(ips_ha_t *, int);
258static int ips_clear_adapter(ips_ha_t *, int);
259static int ips_readwrite_page5(ips_ha_t *, int, int);
260static int ips_init_copperhead(ips_ha_t *);
261static int ips_init_copperhead_memio(ips_ha_t *);
262static int ips_init_morpheus(ips_ha_t *);
263static int ips_isinit_copperhead(ips_ha_t *);
264static int ips_isinit_copperhead_memio(ips_ha_t *);
265static int ips_isinit_morpheus(ips_ha_t *);
266static int ips_erase_bios(ips_ha_t *);
267static int ips_program_bios(ips_ha_t *, char *, uint32_t, uint32_t);
268static int ips_verify_bios(ips_ha_t *, char *, uint32_t, uint32_t);
269static int ips_erase_bios_memio(ips_ha_t *);
270static int ips_program_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
271static int ips_verify_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
272static int ips_flash_copperhead(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
273static int ips_flash_bios(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
274static int ips_flash_firmware(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
275static void ips_free_flash_copperhead(ips_ha_t * ha);
276static void ips_get_bios_version(ips_ha_t *, int);
277static void ips_identify_controller(ips_ha_t *);
278static void ips_chkstatus(ips_ha_t *, IPS_STATUS *);
279static void ips_enable_int_copperhead(ips_ha_t *);
280static void ips_enable_int_copperhead_memio(ips_ha_t *);
281static void ips_enable_int_morpheus(ips_ha_t *);
282static int ips_intr_copperhead(ips_ha_t *);
283static int ips_intr_morpheus(ips_ha_t *);
284static void ips_next(ips_ha_t *, int);
285static void ipsintr_blocking(ips_ha_t *, struct ips_scb *);
286static void ipsintr_done(ips_ha_t *, struct ips_scb *);
287static void ips_done(ips_ha_t *, ips_scb_t *);
288static void ips_free(ips_ha_t *);
289static void ips_init_scb(ips_ha_t *, ips_scb_t *);
290static void ips_freescb(ips_ha_t *, ips_scb_t *);
291static void ips_setup_funclist(ips_ha_t *);
292static void ips_statinit(ips_ha_t *);
293static void ips_statinit_memio(ips_ha_t *);
294static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time_t);
295static void ips_ffdc_reset(ips_ha_t *, int);
296static void ips_ffdc_time(ips_ha_t *);
297static uint32_t ips_statupd_copperhead(ips_ha_t *);
298static uint32_t ips_statupd_copperhead_memio(ips_ha_t *);
299static uint32_t ips_statupd_morpheus(ips_ha_t *);
300static ips_scb_t *ips_getscb(ips_ha_t *);
301static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *);
302static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *);
303static void ips_putq_copp_tail(ips_copp_queue_t *,
304 ips_copp_wait_item_t *);
305static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *);
306static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *);
307static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *);
308static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *,
309 struct scsi_cmnd *);
310static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *,
311 ips_copp_wait_item_t *);
312static ips_copp_wait_item_t *ips_removeq_copp_head(ips_copp_queue_t *);
313
314static int ips_is_passthru(struct scsi_cmnd *);
315static int ips_make_passthru(ips_ha_t *, struct scsi_cmnd *, ips_scb_t *, int);
316static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
317static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *);
318static void ips_scmd_buf_write(struct scsi_cmnd * scmd, void *data,
319 unsigned int count);
320static void ips_scmd_buf_read(struct scsi_cmnd * scmd, void *data,
321 unsigned int count);
322
323static int ips_write_info(struct Scsi_Host *, char *, int);
324static int ips_show_info(struct seq_file *, struct Scsi_Host *);
325static int ips_host_info(ips_ha_t *, struct seq_file *);
326static int ips_abort_init(ips_ha_t * ha, int index);
327static int ips_init_phase2(int index);
328
329static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr);
330static int ips_register_scsi(int index);
331
332static int ips_poll_for_flush_complete(ips_ha_t * ha);
333static void ips_flush_and_reset(ips_ha_t *ha);
334
335
336
337
338static const char ips_name[] = "ips";
339static struct Scsi_Host *ips_sh[IPS_MAX_ADAPTERS];
340static ips_ha_t *ips_ha[IPS_MAX_ADAPTERS];
341static unsigned int ips_next_controller;
342static unsigned int ips_num_controllers;
343static unsigned int ips_released_controllers;
344static int ips_hotplug;
345static int ips_cmd_timeout = 60;
346static int ips_reset_timeout = 60 * 5;
347static int ips_force_memio = 1;
348static int ips_force_i2o = 1;
349static int ips_ioctlsize = IPS_IOCTL_SIZE;
350static int ips_cd_boot;
351static char *ips_FlashData = NULL;
352static dma_addr_t ips_flashbusaddr;
353static long ips_FlashDataInUse;
354static uint32_t MaxLiteCmds = 32;
355static struct scsi_host_template ips_driver_template = {
356 .info = ips_info,
357 .queuecommand = ips_queue,
358 .eh_abort_handler = ips_eh_abort,
359 .eh_host_reset_handler = ips_eh_reset,
360 .proc_name = "ips",
361 .show_info = ips_show_info,
362 .write_info = ips_write_info,
363 .slave_configure = ips_slave_configure,
364 .bios_param = ips_biosparam,
365 .this_id = -1,
366 .sg_tablesize = IPS_MAX_SG,
367 .cmd_per_lun = 3,
368 .use_clustering = ENABLE_CLUSTERING,
369 .no_write_same = 1,
370};
371
372
373
374static struct pci_device_id ips_pci_table[] = {
375 { 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
376 { 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
377 { 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
378 { 0, }
379};
380
381MODULE_DEVICE_TABLE( pci, ips_pci_table );
382
383static char ips_hot_plug_name[] = "ips";
384
385static int ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent);
386static void ips_remove_device(struct pci_dev *pci_dev);
387
388static struct pci_driver ips_pci_driver = {
389 .name = ips_hot_plug_name,
390 .id_table = ips_pci_table,
391 .probe = ips_insert_device,
392 .remove = ips_remove_device,
393};
394
395
396
397
398
399static int ips_halt(struct notifier_block *nb, ulong event, void *buf);
400
401#define MAX_ADAPTER_NAME 15
402
403static char ips_adapter_name[][30] = {
404 "ServeRAID",
405 "ServeRAID II",
406 "ServeRAID on motherboard",
407 "ServeRAID on motherboard",
408 "ServeRAID 3H",
409 "ServeRAID 3L",
410 "ServeRAID 4H",
411 "ServeRAID 4M",
412 "ServeRAID 4L",
413 "ServeRAID 4Mx",
414 "ServeRAID 4Lx",
415 "ServeRAID 5i",
416 "ServeRAID 5i",
417 "ServeRAID 6M",
418 "ServeRAID 6i",
419 "ServeRAID 7t",
420 "ServeRAID 7k",
421 "ServeRAID 7M"
422};
423
424static struct notifier_block ips_notifier = {
425 ips_halt, NULL, 0
426};
427
428
429
430
431static char ips_command_direction[] = {
432 IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT,
433 IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK,
434 IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
435 IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
436 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_OUT,
437 IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
438 IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_IN,
439 IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
440 IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK,
441 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
442 IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE,
443 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
444 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT,
445 IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_NONE,
446 IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK,
447 IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
448 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
449 IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
450 IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
451 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
452 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
453 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
454 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
455 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
456 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
457 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
458 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
459 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
460 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
461 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
462 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
463 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
464 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
465 IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE,
466 IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_OUT,
467 IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE,
468 IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN,
469 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
470 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
471 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
472 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
473 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
474 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
475 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
476 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
477 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
478 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT,
479 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
480 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
481 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
482 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK
483};
484
485
486
487
488
489
490
491
492
493
494
495static int
496ips_setup(char *ips_str)
497{
498
499 int i;
500 char *key;
501 char *value;
502 IPS_OPTION options[] = {
503 {"noi2o", &ips_force_i2o, 0},
504 {"nommap", &ips_force_memio, 0},
505 {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE},
506 {"cdboot", &ips_cd_boot, 0},
507 {"maxcmds", &MaxLiteCmds, 32},
508 };
509
510
511
512 while ((key = strsep(&ips_str, ",."))) {
513 if (!*key)
514 continue;
515 value = strchr(key, ':');
516 if (value)
517 *value++ = '\0';
518
519
520
521
522 for (i = 0; i < ARRAY_SIZE(options); i++) {
523 if (strncasecmp
524 (key, options[i].option_name,
525 strlen(options[i].option_name)) == 0) {
526 if (value)
527 *options[i].option_flag =
528 simple_strtoul(value, NULL, 0);
529 else
530 *options[i].option_flag =
531 options[i].option_value;
532 break;
533 }
534 }
535 }
536
537 return (1);
538}
539
540__setup("ips=", ips_setup);
541
542
543
544
545
546
547
548
549
550
551
552
553static int
554ips_detect(struct scsi_host_template * SHT)
555{
556 int i;
557
558 METHOD_TRACE("ips_detect", 1);
559
560#ifdef MODULE
561 if (ips)
562 ips_setup(ips);
563#endif
564
565 for (i = 0; i < ips_num_controllers; i++) {
566 if (ips_register_scsi(i))
567 ips_free(ips_ha[i]);
568 ips_released_controllers++;
569 }
570 ips_hotplug = 1;
571 return (ips_num_controllers);
572}
573
574
575
576
577
578static void
579ips_setup_funclist(ips_ha_t * ha)
580{
581
582
583
584
585 if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) {
586
587 ha->func.isintr = ips_isintr_morpheus;
588 ha->func.isinit = ips_isinit_morpheus;
589 ha->func.issue = ips_issue_i2o_memio;
590 ha->func.init = ips_init_morpheus;
591 ha->func.statupd = ips_statupd_morpheus;
592 ha->func.reset = ips_reset_morpheus;
593 ha->func.intr = ips_intr_morpheus;
594 ha->func.enableint = ips_enable_int_morpheus;
595 } else if (IPS_USE_MEMIO(ha)) {
596
597 ha->func.isintr = ips_isintr_copperhead_memio;
598 ha->func.isinit = ips_isinit_copperhead_memio;
599 ha->func.init = ips_init_copperhead_memio;
600 ha->func.statupd = ips_statupd_copperhead_memio;
601 ha->func.statinit = ips_statinit_memio;
602 ha->func.reset = ips_reset_copperhead_memio;
603 ha->func.intr = ips_intr_copperhead;
604 ha->func.erasebios = ips_erase_bios_memio;
605 ha->func.programbios = ips_program_bios_memio;
606 ha->func.verifybios = ips_verify_bios_memio;
607 ha->func.enableint = ips_enable_int_copperhead_memio;
608 if (IPS_USE_I2O_DELIVER(ha))
609 ha->func.issue = ips_issue_i2o_memio;
610 else
611 ha->func.issue = ips_issue_copperhead_memio;
612 } else {
613
614 ha->func.isintr = ips_isintr_copperhead;
615 ha->func.isinit = ips_isinit_copperhead;
616 ha->func.init = ips_init_copperhead;
617 ha->func.statupd = ips_statupd_copperhead;
618 ha->func.statinit = ips_statinit;
619 ha->func.reset = ips_reset_copperhead;
620 ha->func.intr = ips_intr_copperhead;
621 ha->func.erasebios = ips_erase_bios;
622 ha->func.programbios = ips_program_bios;
623 ha->func.verifybios = ips_verify_bios;
624 ha->func.enableint = ips_enable_int_copperhead;
625
626 if (IPS_USE_I2O_DELIVER(ha))
627 ha->func.issue = ips_issue_i2o;
628 else
629 ha->func.issue = ips_issue_copperhead;
630 }
631}
632
633
634
635
636
637
638
639
640
641
642static int
643ips_release(struct Scsi_Host *sh)
644{
645 ips_scb_t *scb;
646 ips_ha_t *ha;
647 int i;
648
649 METHOD_TRACE("ips_release", 1);
650
651 scsi_remove_host(sh);
652
653 for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++) ;
654
655 if (i == IPS_MAX_ADAPTERS) {
656 printk(KERN_WARNING
657 "(%s) release, invalid Scsi_Host pointer.\n", ips_name);
658 BUG();
659 return (FALSE);
660 }
661
662 ha = IPS_HA(sh);
663
664 if (!ha)
665 return (FALSE);
666
667
668 scb = &ha->scbs[ha->max_cmds - 1];
669
670 ips_init_scb(ha, scb);
671
672 scb->timeout = ips_cmd_timeout;
673 scb->cdb[0] = IPS_CMD_FLUSH;
674
675 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
676 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
677 scb->cmd.flush_cache.state = IPS_NORM_STATE;
678 scb->cmd.flush_cache.reserved = 0;
679 scb->cmd.flush_cache.reserved2 = 0;
680 scb->cmd.flush_cache.reserved3 = 0;
681 scb->cmd.flush_cache.reserved4 = 0;
682
683 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
684
685
686 if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE)
687 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Incomplete Flush.\n");
688
689 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Complete.\n");
690
691 ips_sh[i] = NULL;
692 ips_ha[i] = NULL;
693
694
695 ips_free(ha);
696
697
698 free_irq(ha->pcidev->irq, ha);
699
700 scsi_host_put(sh);
701
702 ips_released_controllers++;
703
704 return (FALSE);
705}
706
707
708
709
710
711
712
713
714
715
716static int
717ips_halt(struct notifier_block *nb, ulong event, void *buf)
718{
719 ips_scb_t *scb;
720 ips_ha_t *ha;
721 int i;
722
723 if ((event != SYS_RESTART) && (event != SYS_HALT) &&
724 (event != SYS_POWER_OFF))
725 return (NOTIFY_DONE);
726
727 for (i = 0; i < ips_next_controller; i++) {
728 ha = (ips_ha_t *) ips_ha[i];
729
730 if (!ha)
731 continue;
732
733 if (!ha->active)
734 continue;
735
736
737 scb = &ha->scbs[ha->max_cmds - 1];
738
739 ips_init_scb(ha, scb);
740
741 scb->timeout = ips_cmd_timeout;
742 scb->cdb[0] = IPS_CMD_FLUSH;
743
744 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
745 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
746 scb->cmd.flush_cache.state = IPS_NORM_STATE;
747 scb->cmd.flush_cache.reserved = 0;
748 scb->cmd.flush_cache.reserved2 = 0;
749 scb->cmd.flush_cache.reserved3 = 0;
750 scb->cmd.flush_cache.reserved4 = 0;
751
752 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
753
754
755 if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) ==
756 IPS_FAILURE)
757 IPS_PRINTK(KERN_WARNING, ha->pcidev,
758 "Incomplete Flush.\n");
759 else
760 IPS_PRINTK(KERN_WARNING, ha->pcidev,
761 "Flushing Complete.\n");
762 }
763
764 return (NOTIFY_OK);
765}
766
767
768
769
770
771
772
773
774
775
776int ips_eh_abort(struct scsi_cmnd *SC)
777{
778 ips_ha_t *ha;
779 ips_copp_wait_item_t *item;
780 int ret;
781 struct Scsi_Host *host;
782
783 METHOD_TRACE("ips_eh_abort", 1);
784
785 if (!SC)
786 return (FAILED);
787
788 host = SC->device->host;
789 ha = (ips_ha_t *) SC->device->host->hostdata;
790
791 if (!ha)
792 return (FAILED);
793
794 if (!ha->active)
795 return (FAILED);
796
797 spin_lock(host->host_lock);
798
799
800 item = ha->copp_waitlist.head;
801 while ((item) && (item->scsi_cmd != SC))
802 item = item->next;
803
804 if (item) {
805
806 ips_removeq_copp(&ha->copp_waitlist, item);
807 ret = (SUCCESS);
808
809
810 } else if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
811
812 ret = (SUCCESS);
813 } else {
814
815 ret = (FAILED);
816 }
817
818 spin_unlock(host->host_lock);
819 return ret;
820}
821
822
823
824
825
826
827
828
829
830
831
832
833static int __ips_eh_reset(struct scsi_cmnd *SC)
834{
835 int ret;
836 int i;
837 ips_ha_t *ha;
838 ips_scb_t *scb;
839 ips_copp_wait_item_t *item;
840
841 METHOD_TRACE("ips_eh_reset", 1);
842
843#ifdef NO_IPS_RESET
844 return (FAILED);
845#else
846
847 if (!SC) {
848 DEBUG(1, "Reset called with NULL scsi command");
849
850 return (FAILED);
851 }
852
853 ha = (ips_ha_t *) SC->device->host->hostdata;
854
855 if (!ha) {
856 DEBUG(1, "Reset called with NULL ha struct");
857
858 return (FAILED);
859 }
860
861 if (!ha->active)
862 return (FAILED);
863
864
865 item = ha->copp_waitlist.head;
866 while ((item) && (item->scsi_cmd != SC))
867 item = item->next;
868
869 if (item) {
870
871 ips_removeq_copp(&ha->copp_waitlist, item);
872 return (SUCCESS);
873 }
874
875
876 if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
877
878 return (SUCCESS);
879 }
880
881
882
883
884
885
886
887
888
889
890
891 if (ha->ioctl_reset == 0) {
892 scb = &ha->scbs[ha->max_cmds - 1];
893
894 ips_init_scb(ha, scb);
895
896 scb->timeout = ips_cmd_timeout;
897 scb->cdb[0] = IPS_CMD_FLUSH;
898
899 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
900 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
901 scb->cmd.flush_cache.state = IPS_NORM_STATE;
902 scb->cmd.flush_cache.reserved = 0;
903 scb->cmd.flush_cache.reserved2 = 0;
904 scb->cmd.flush_cache.reserved3 = 0;
905 scb->cmd.flush_cache.reserved4 = 0;
906
907
908 ret = ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_IORL);
909 if (ret == IPS_SUCCESS) {
910 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
911 "Reset Request - Flushed Cache\n");
912 return (SUCCESS);
913 }
914 }
915
916
917
918
919 ha->ioctl_reset = 0;
920
921
922
923
924
925 IPS_PRINTK(KERN_NOTICE, ha->pcidev, "Resetting controller.\n");
926 ret = (*ha->func.reset) (ha);
927
928 if (!ret) {
929 struct scsi_cmnd *scsi_cmd;
930
931 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
932 "Controller reset failed - controller now offline.\n");
933
934
935 DEBUG_VAR(1, "(%s%d) Failing active commands",
936 ips_name, ha->host_num);
937
938 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
939 scb->scsi_cmd->result = DID_ERROR << 16;
940 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
941 ips_freescb(ha, scb);
942 }
943
944
945 DEBUG_VAR(1, "(%s%d) Failing pending commands",
946 ips_name, ha->host_num);
947
948 while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
949 scsi_cmd->result = DID_ERROR;
950 scsi_cmd->scsi_done(scsi_cmd);
951 }
952
953 ha->active = FALSE;
954 return (FAILED);
955 }
956
957 if (!ips_clear_adapter(ha, IPS_INTR_IORL)) {
958 struct scsi_cmnd *scsi_cmd;
959
960 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
961 "Controller reset failed - controller now offline.\n");
962
963
964 DEBUG_VAR(1, "(%s%d) Failing active commands",
965 ips_name, ha->host_num);
966
967 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
968 scb->scsi_cmd->result = DID_ERROR << 16;
969 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
970 ips_freescb(ha, scb);
971 }
972
973
974 DEBUG_VAR(1, "(%s%d) Failing pending commands",
975 ips_name, ha->host_num);
976
977 while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
978 scsi_cmd->result = DID_ERROR << 16;
979 scsi_cmd->scsi_done(scsi_cmd);
980 }
981
982 ha->active = FALSE;
983 return (FAILED);
984 }
985
986
987 if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) {
988 struct timeval tv;
989
990 do_gettimeofday(&tv);
991 ha->last_ffdc = tv.tv_sec;
992 ha->reset_count++;
993 ips_ffdc_reset(ha, IPS_INTR_IORL);
994 }
995
996
997 DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num);
998
999 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
1000 scb->scsi_cmd->result = DID_RESET << 16;
1001 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
1002 ips_freescb(ha, scb);
1003 }
1004
1005
1006 for (i = 1; i < ha->nbus; i++)
1007 ha->dcdb_active[i - 1] = 0;
1008
1009
1010 ha->num_ioctl = 0;
1011
1012 ips_next(ha, IPS_INTR_IORL);
1013
1014 return (SUCCESS);
1015#endif
1016
1017}
1018
1019static int ips_eh_reset(struct scsi_cmnd *SC)
1020{
1021 int rc;
1022
1023 spin_lock_irq(SC->device->host->host_lock);
1024 rc = __ips_eh_reset(SC);
1025 spin_unlock_irq(SC->device->host->host_lock);
1026
1027 return rc;
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
1043{
1044 ips_ha_t *ha;
1045 ips_passthru_t *pt;
1046
1047 METHOD_TRACE("ips_queue", 1);
1048
1049 ha = (ips_ha_t *) SC->device->host->hostdata;
1050
1051 if (!ha)
1052 return (1);
1053
1054 if (!ha->active)
1055 return (DID_ERROR);
1056
1057 if (ips_is_passthru(SC)) {
1058 if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) {
1059 SC->result = DID_BUS_BUSY << 16;
1060 done(SC);
1061
1062 return (0);
1063 }
1064 } else if (ha->scb_waitlist.count == IPS_MAX_QUEUE) {
1065 SC->result = DID_BUS_BUSY << 16;
1066 done(SC);
1067
1068 return (0);
1069 }
1070
1071 SC->scsi_done = done;
1072
1073 DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)",
1074 ips_name,
1075 ha->host_num,
1076 SC->cmnd[0],
1077 SC->device->channel, SC->device->id, SC->device->lun);
1078
1079
1080 if ((scmd_channel(SC) > 0)
1081 && (scmd_id(SC) == ha->ha_id[scmd_channel(SC)])) {
1082 SC->result = DID_NO_CONNECT << 16;
1083 done(SC);
1084
1085 return (0);
1086 }
1087
1088 if (ips_is_passthru(SC)) {
1089
1090 ips_copp_wait_item_t *scratch;
1091
1092
1093
1094
1095 pt = (ips_passthru_t *) scsi_sglist(SC);
1096 if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
1097 (pt->CoppCP.cmd.reset.adapter_flag == 1)) {
1098 if (ha->scb_activelist.count != 0) {
1099 SC->result = DID_BUS_BUSY << 16;
1100 done(SC);
1101 return (0);
1102 }
1103 ha->ioctl_reset = 1;
1104 __ips_eh_reset(SC);
1105 SC->result = DID_OK << 16;
1106 SC->scsi_done(SC);
1107 return (0);
1108 }
1109
1110
1111 scratch = kmalloc(sizeof (ips_copp_wait_item_t), GFP_ATOMIC);
1112
1113 if (!scratch) {
1114 SC->result = DID_ERROR << 16;
1115 done(SC);
1116
1117 return (0);
1118 }
1119
1120 scratch->scsi_cmd = SC;
1121 scratch->next = NULL;
1122
1123 ips_putq_copp_tail(&ha->copp_waitlist, scratch);
1124 } else {
1125 ips_putq_wait_tail(&ha->scb_waitlist, SC);
1126 }
1127
1128 ips_next(ha, IPS_INTR_IORL);
1129
1130 return (0);
1131}
1132
1133static DEF_SCSI_QCMD(ips_queue)
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1145 sector_t capacity, int geom[])
1146{
1147 ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
1148 int heads;
1149 int sectors;
1150 int cylinders;
1151
1152 METHOD_TRACE("ips_biosparam", 1);
1153
1154 if (!ha)
1155
1156 return (0);
1157
1158 if (!ha->active)
1159 return (0);
1160
1161 if (!ips_read_adapter_status(ha, IPS_INTR_ON))
1162
1163 return (0);
1164
1165 if ((capacity > 0x400000) && ((ha->enq->ucMiscFlag & 0x8) == 0)) {
1166 heads = IPS_NORM_HEADS;
1167 sectors = IPS_NORM_SECTORS;
1168 } else {
1169 heads = IPS_COMP_HEADS;
1170 sectors = IPS_COMP_SECTORS;
1171 }
1172
1173 cylinders = (unsigned long) capacity / (heads * sectors);
1174
1175 DEBUG_VAR(2, "Geometry: heads: %d, sectors: %d, cylinders: %d",
1176 heads, sectors, cylinders);
1177
1178 geom[0] = heads;
1179 geom[1] = sectors;
1180 geom[2] = cylinders;
1181
1182 return (0);
1183}
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194static int
1195ips_slave_configure(struct scsi_device * SDptr)
1196{
1197 ips_ha_t *ha;
1198 int min;
1199
1200 ha = IPS_HA(SDptr->host);
1201 if (SDptr->tagged_supported && SDptr->type == TYPE_DISK) {
1202 min = ha->max_cmds / 2;
1203 if (ha->enq->ucLogDriveCount <= 2)
1204 min = ha->max_cmds - 1;
1205 scsi_change_queue_depth(SDptr, min);
1206 }
1207
1208 SDptr->skip_ms_page_8 = 1;
1209 SDptr->skip_ms_page_3f = 1;
1210 return 0;
1211}
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222static irqreturn_t
1223do_ipsintr(int irq, void *dev_id)
1224{
1225 ips_ha_t *ha;
1226 struct Scsi_Host *host;
1227 int irqstatus;
1228
1229 METHOD_TRACE("do_ipsintr", 2);
1230
1231 ha = (ips_ha_t *) dev_id;
1232 if (!ha)
1233 return IRQ_NONE;
1234 host = ips_sh[ha->host_num];
1235
1236 if (!host) {
1237 (*ha->func.intr) (ha);
1238 return IRQ_HANDLED;
1239 }
1240
1241 spin_lock(host->host_lock);
1242
1243 if (!ha->active) {
1244 spin_unlock(host->host_lock);
1245 return IRQ_HANDLED;
1246 }
1247
1248 irqstatus = (*ha->func.intr) (ha);
1249
1250 spin_unlock(host->host_lock);
1251
1252
1253 ips_next(ha, IPS_INTR_ON);
1254 return IRQ_RETVAL(irqstatus);
1255}
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268int
1269ips_intr_copperhead(ips_ha_t * ha)
1270{
1271 ips_stat_t *sp;
1272 ips_scb_t *scb;
1273 IPS_STATUS cstatus;
1274 int intrstatus;
1275
1276 METHOD_TRACE("ips_intr", 2);
1277
1278 if (!ha)
1279 return 0;
1280
1281 if (!ha->active)
1282 return 0;
1283
1284 intrstatus = (*ha->func.isintr) (ha);
1285
1286 if (!intrstatus) {
1287
1288
1289
1290
1291 return 0;
1292 }
1293
1294 while (TRUE) {
1295 sp = &ha->sp;
1296
1297 intrstatus = (*ha->func.isintr) (ha);
1298
1299 if (!intrstatus)
1300 break;
1301 else
1302 cstatus.value = (*ha->func.statupd) (ha);
1303
1304 if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
1305
1306 continue;
1307 }
1308
1309 ips_chkstatus(ha, &cstatus);
1310 scb = (ips_scb_t *) sp->scb_addr;
1311
1312
1313
1314
1315
1316 (*scb->callback) (ha, scb);
1317 }
1318 return 1;
1319}
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332int
1333ips_intr_morpheus(ips_ha_t * ha)
1334{
1335 ips_stat_t *sp;
1336 ips_scb_t *scb;
1337 IPS_STATUS cstatus;
1338 int intrstatus;
1339
1340 METHOD_TRACE("ips_intr_morpheus", 2);
1341
1342 if (!ha)
1343 return 0;
1344
1345 if (!ha->active)
1346 return 0;
1347
1348 intrstatus = (*ha->func.isintr) (ha);
1349
1350 if (!intrstatus) {
1351
1352
1353
1354
1355 return 0;
1356 }
1357
1358 while (TRUE) {
1359 sp = &ha->sp;
1360
1361 intrstatus = (*ha->func.isintr) (ha);
1362
1363 if (!intrstatus)
1364 break;
1365 else
1366 cstatus.value = (*ha->func.statupd) (ha);
1367
1368 if (cstatus.value == 0xffffffff)
1369
1370 break;
1371
1372 if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
1373 IPS_PRINTK(KERN_WARNING, ha->pcidev,
1374 "Spurious interrupt; no ccb.\n");
1375
1376 continue;
1377 }
1378
1379 ips_chkstatus(ha, &cstatus);
1380 scb = (ips_scb_t *) sp->scb_addr;
1381
1382
1383
1384
1385
1386 (*scb->callback) (ha, scb);
1387 }
1388 return 1;
1389}
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400static const char *
1401ips_info(struct Scsi_Host *SH)
1402{
1403 static char buffer[256];
1404 char *bp;
1405 ips_ha_t *ha;
1406
1407 METHOD_TRACE("ips_info", 1);
1408
1409 ha = IPS_HA(SH);
1410
1411 if (!ha)
1412 return (NULL);
1413
1414 bp = &buffer[0];
1415 memset(bp, 0, sizeof (buffer));
1416
1417 sprintf(bp, "%s%s%s Build %d", "IBM PCI ServeRAID ",
1418 IPS_VERSION_HIGH, IPS_VERSION_LOW, IPS_BUILD_IDENT);
1419
1420 if (ha->ad_type > 0 && ha->ad_type <= MAX_ADAPTER_NAME) {
1421 strcat(bp, " <");
1422 strcat(bp, ips_adapter_name[ha->ad_type - 1]);
1423 strcat(bp, ">");
1424 }
1425
1426 return (bp);
1427}
1428
1429static int
1430ips_write_info(struct Scsi_Host *host, char *buffer, int length)
1431{
1432 int i;
1433 ips_ha_t *ha = NULL;
1434
1435
1436 for (i = 0; i < ips_next_controller; i++) {
1437 if (ips_sh[i]) {
1438 if (ips_sh[i] == host) {
1439 ha = (ips_ha_t *) ips_sh[i]->hostdata;
1440 break;
1441 }
1442 }
1443 }
1444
1445 if (!ha)
1446 return (-EINVAL);
1447
1448 return 0;
1449}
1450
1451static int
1452ips_show_info(struct seq_file *m, struct Scsi_Host *host)
1453{
1454 int i;
1455 ips_ha_t *ha = NULL;
1456
1457
1458 for (i = 0; i < ips_next_controller; i++) {
1459 if (ips_sh[i]) {
1460 if (ips_sh[i] == host) {
1461 ha = (ips_ha_t *) ips_sh[i]->hostdata;
1462 break;
1463 }
1464 }
1465 }
1466
1467 if (!ha)
1468 return (-EINVAL);
1469
1470 return ips_host_info(ha, m);
1471}
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486static int ips_is_passthru(struct scsi_cmnd *SC)
1487{
1488 unsigned long flags;
1489
1490 METHOD_TRACE("ips_is_passthru", 1);
1491
1492 if (!SC)
1493 return (0);
1494
1495 if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
1496 (SC->device->channel == 0) &&
1497 (SC->device->id == IPS_ADAPTER_ID) &&
1498 (SC->device->lun == 0) && scsi_sglist(SC)) {
1499 struct scatterlist *sg = scsi_sglist(SC);
1500 char *buffer;
1501
1502
1503
1504 local_irq_save(flags);
1505 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
1506 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
1507 buffer[2] == 'P' && buffer[3] == 'P') {
1508 kunmap_atomic(buffer - sg->offset);
1509 local_irq_restore(flags);
1510 return 1;
1511 }
1512 kunmap_atomic(buffer - sg->offset);
1513 local_irq_restore(flags);
1514 }
1515 return 0;
1516}
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526static int
1527ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
1528{
1529 void *bigger_buf;
1530 dma_addr_t dma_busaddr;
1531
1532 if (ha->ioctl_data && length <= ha->ioctl_len)
1533 return 0;
1534
1535 bigger_buf = pci_alloc_consistent(ha->pcidev, length, &dma_busaddr);
1536 if (bigger_buf) {
1537
1538 pci_free_consistent(ha->pcidev, ha->ioctl_len, ha->ioctl_data,
1539 ha->ioctl_busaddr);
1540
1541 ha->ioctl_data = (char *) bigger_buf;
1542 ha->ioctl_len = length;
1543 ha->ioctl_busaddr = dma_busaddr;
1544 } else {
1545 return -1;
1546 }
1547 return 0;
1548}
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559static int
1560ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
1561{
1562 ips_passthru_t *pt;
1563 int length = 0;
1564 int i, ret;
1565 struct scatterlist *sg = scsi_sglist(SC);
1566
1567 METHOD_TRACE("ips_make_passthru", 1);
1568
1569 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
1570 length += sg->length;
1571
1572 if (length < sizeof (ips_passthru_t)) {
1573
1574 DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
1575 ips_name, ha->host_num);
1576 return (IPS_FAILURE);
1577 }
1578 if (ips_alloc_passthru_buffer(ha, length)) {
1579
1580
1581 if (ha->ioctl_data) {
1582 pt = (ips_passthru_t *) ha->ioctl_data;
1583 ips_scmd_buf_read(SC, pt, sizeof (ips_passthru_t));
1584 pt->BasicStatus = 0x0B;
1585 pt->ExtendedStatus = 0x00;
1586 ips_scmd_buf_write(SC, pt, sizeof (ips_passthru_t));
1587 }
1588 return IPS_FAILURE;
1589 }
1590 ha->ioctl_datasize = length;
1591
1592 ips_scmd_buf_read(SC, ha->ioctl_data, ha->ioctl_datasize);
1593 pt = (ips_passthru_t *) ha->ioctl_data;
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605 switch (pt->CoppCmd) {
1606 case IPS_NUMCTRLS:
1607 memcpy(ha->ioctl_data + sizeof (ips_passthru_t),
1608 &ips_num_controllers, sizeof (int));
1609 ips_scmd_buf_write(SC, ha->ioctl_data,
1610 sizeof (ips_passthru_t) + sizeof (int));
1611 SC->result = DID_OK << 16;
1612
1613 return (IPS_SUCCESS_IMM);
1614
1615 case IPS_COPPUSRCMD:
1616 case IPS_COPPIOCCMD:
1617 if (SC->cmnd[0] == IPS_IOCTL_COMMAND) {
1618 if (length < (sizeof (ips_passthru_t) + pt->CmdBSize)) {
1619
1620 DEBUG_VAR(1,
1621 "(%s%d) Passthru structure wrong size",
1622 ips_name, ha->host_num);
1623
1624 return (IPS_FAILURE);
1625 }
1626
1627 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
1628 pt->CoppCP.cmd.flashfw.op_code ==
1629 IPS_CMD_RW_BIOSFW) {
1630 ret = ips_flash_copperhead(ha, pt, scb);
1631 ips_scmd_buf_write(SC, ha->ioctl_data,
1632 sizeof (ips_passthru_t));
1633 return ret;
1634 }
1635 if (ips_usrcmd(ha, pt, scb))
1636 return (IPS_SUCCESS);
1637 else
1638 return (IPS_FAILURE);
1639 }
1640
1641 break;
1642
1643 }
1644
1645 return (IPS_FAILURE);
1646}
1647
1648
1649
1650
1651
1652
1653static int
1654ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1655{
1656 int datasize;
1657
1658
1659
1660 if (IPS_IS_TROMBONE(ha) && pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) {
1661 if (ips_usrcmd(ha, pt, scb))
1662 return IPS_SUCCESS;
1663 else
1664 return IPS_FAILURE;
1665 }
1666 pt->BasicStatus = 0x0B;
1667 pt->ExtendedStatus = 0;
1668 scb->scsi_cmd->result = DID_OK << 16;
1669
1670
1671 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1672 pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
1673 pt->BasicStatus = 0;
1674 return ips_flash_bios(ha, pt, scb);
1675 } else if (pt->CoppCP.cmd.flashfw.packet_num == 0) {
1676 if (ips_FlashData && !test_and_set_bit(0, &ips_FlashDataInUse)){
1677 ha->flash_data = ips_FlashData;
1678 ha->flash_busaddr = ips_flashbusaddr;
1679 ha->flash_len = PAGE_SIZE << 7;
1680 ha->flash_datasize = 0;
1681 } else if (!ha->flash_data) {
1682 datasize = pt->CoppCP.cmd.flashfw.total_packets *
1683 pt->CoppCP.cmd.flashfw.count;
1684 ha->flash_data = pci_alloc_consistent(ha->pcidev,
1685 datasize,
1686 &ha->flash_busaddr);
1687 if (!ha->flash_data){
1688 printk(KERN_WARNING "Unable to allocate a flash buffer\n");
1689 return IPS_FAILURE;
1690 }
1691 ha->flash_datasize = 0;
1692 ha->flash_len = datasize;
1693 } else
1694 return IPS_FAILURE;
1695 } else {
1696 if (pt->CoppCP.cmd.flashfw.count + ha->flash_datasize >
1697 ha->flash_len) {
1698 ips_free_flash_copperhead(ha);
1699 IPS_PRINTK(KERN_WARNING, ha->pcidev,
1700 "failed size sanity check\n");
1701 return IPS_FAILURE;
1702 }
1703 }
1704 if (!ha->flash_data)
1705 return IPS_FAILURE;
1706 pt->BasicStatus = 0;
1707 memcpy(&ha->flash_data[ha->flash_datasize], pt + 1,
1708 pt->CoppCP.cmd.flashfw.count);
1709 ha->flash_datasize += pt->CoppCP.cmd.flashfw.count;
1710 if (pt->CoppCP.cmd.flashfw.packet_num ==
1711 pt->CoppCP.cmd.flashfw.total_packets - 1) {
1712 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE)
1713 return ips_flash_bios(ha, pt, scb);
1714 else if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE)
1715 return ips_flash_firmware(ha, pt, scb);
1716 }
1717 return IPS_SUCCESS_IMM;
1718}
1719
1720
1721
1722
1723
1724
1725static int
1726ips_flash_bios(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1727{
1728
1729 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1730 pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_BIOS) {
1731 if ((!ha->func.programbios) || (!ha->func.erasebios) ||
1732 (!ha->func.verifybios))
1733 goto error;
1734 if ((*ha->func.erasebios) (ha)) {
1735 DEBUG_VAR(1,
1736 "(%s%d) flash bios failed - unable to erase flash",
1737 ips_name, ha->host_num);
1738 goto error;
1739 } else
1740 if ((*ha->func.programbios) (ha,
1741 ha->flash_data +
1742 IPS_BIOS_HEADER,
1743 ha->flash_datasize -
1744 IPS_BIOS_HEADER, 0)) {
1745 DEBUG_VAR(1,
1746 "(%s%d) flash bios failed - unable to flash",
1747 ips_name, ha->host_num);
1748 goto error;
1749 } else
1750 if ((*ha->func.verifybios) (ha,
1751 ha->flash_data +
1752 IPS_BIOS_HEADER,
1753 ha->flash_datasize -
1754 IPS_BIOS_HEADER, 0)) {
1755 DEBUG_VAR(1,
1756 "(%s%d) flash bios failed - unable to verify flash",
1757 ips_name, ha->host_num);
1758 goto error;
1759 }
1760 ips_free_flash_copperhead(ha);
1761 return IPS_SUCCESS_IMM;
1762 } else if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1763 pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
1764 if (!ha->func.erasebios)
1765 goto error;
1766 if ((*ha->func.erasebios) (ha)) {
1767 DEBUG_VAR(1,
1768 "(%s%d) flash bios failed - unable to erase flash",
1769 ips_name, ha->host_num);
1770 goto error;
1771 }
1772 return IPS_SUCCESS_IMM;
1773 }
1774 error:
1775 pt->BasicStatus = 0x0B;
1776 pt->ExtendedStatus = 0x00;
1777 ips_free_flash_copperhead(ha);
1778 return IPS_FAILURE;
1779}
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789static int
1790ips_fill_scb_sg_single(ips_ha_t * ha, dma_addr_t busaddr,
1791 ips_scb_t * scb, int indx, unsigned int e_len)
1792{
1793
1794 int ret_val = 0;
1795
1796 if ((scb->data_len + e_len) > ha->max_xfer) {
1797 e_len = ha->max_xfer - scb->data_len;
1798 scb->breakup = indx;
1799 ++scb->sg_break;
1800 ret_val = -1;
1801 } else {
1802 scb->breakup = 0;
1803 scb->sg_break = 0;
1804 }
1805 if (IPS_USE_ENH_SGLIST(ha)) {
1806 scb->sg_list.enh_list[indx].address_lo =
1807 cpu_to_le32(pci_dma_lo32(busaddr));
1808 scb->sg_list.enh_list[indx].address_hi =
1809 cpu_to_le32(pci_dma_hi32(busaddr));
1810 scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len);
1811 } else {
1812 scb->sg_list.std_list[indx].address =
1813 cpu_to_le32(pci_dma_lo32(busaddr));
1814 scb->sg_list.std_list[indx].length = cpu_to_le32(e_len);
1815 }
1816
1817 ++scb->sg_len;
1818 scb->data_len += e_len;
1819 return ret_val;
1820}
1821
1822
1823
1824
1825
1826
1827static int
1828ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1829{
1830 IPS_SG_LIST sg_list;
1831 uint32_t cmd_busaddr;
1832
1833 if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE &&
1834 pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_FW) {
1835 memset(&pt->CoppCP.cmd, 0, sizeof (IPS_HOST_COMMAND));
1836 pt->CoppCP.cmd.flashfw.op_code = IPS_CMD_DOWNLOAD;
1837 pt->CoppCP.cmd.flashfw.count = cpu_to_le32(ha->flash_datasize);
1838 } else {
1839 pt->BasicStatus = 0x0B;
1840 pt->ExtendedStatus = 0x00;
1841 ips_free_flash_copperhead(ha);
1842 return IPS_FAILURE;
1843 }
1844
1845 sg_list.list = scb->sg_list.list;
1846 cmd_busaddr = scb->scb_busaddr;
1847
1848 memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
1849
1850 scb->sg_list.list = sg_list.list;
1851 scb->scb_busaddr = cmd_busaddr;
1852 scb->bus = scb->scsi_cmd->device->channel;
1853 scb->target_id = scb->scsi_cmd->device->id;
1854 scb->lun = scb->scsi_cmd->device->lun;
1855 scb->sg_len = 0;
1856 scb->data_len = 0;
1857 scb->flags = 0;
1858 scb->op_code = 0;
1859 scb->callback = ipsintr_done;
1860 scb->timeout = ips_cmd_timeout;
1861
1862 scb->data_len = ha->flash_datasize;
1863 scb->data_busaddr =
1864 pci_map_single(ha->pcidev, ha->flash_data, scb->data_len,
1865 IPS_DMA_DIR(scb));
1866 scb->flags |= IPS_SCB_MAP_SINGLE;
1867 scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
1868 scb->cmd.flashfw.buffer_addr = cpu_to_le32(scb->data_busaddr);
1869 if (pt->TimeOut)
1870 scb->timeout = pt->TimeOut;
1871 scb->scsi_cmd->result = DID_OK << 16;
1872 return IPS_SUCCESS;
1873}
1874
1875
1876
1877
1878
1879
1880static void
1881ips_free_flash_copperhead(ips_ha_t * ha)
1882{
1883 if (ha->flash_data == ips_FlashData)
1884 test_and_clear_bit(0, &ips_FlashDataInUse);
1885 else if (ha->flash_data)
1886 pci_free_consistent(ha->pcidev, ha->flash_len, ha->flash_data,
1887 ha->flash_busaddr);
1888 ha->flash_data = NULL;
1889}
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900static int
1901ips_usrcmd(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1902{
1903 IPS_SG_LIST sg_list;
1904 uint32_t cmd_busaddr;
1905
1906 METHOD_TRACE("ips_usrcmd", 1);
1907
1908 if ((!scb) || (!pt) || (!ha))
1909 return (0);
1910
1911
1912 sg_list.list = scb->sg_list.list;
1913 cmd_busaddr = scb->scb_busaddr;
1914
1915 memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
1916 memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof (IPS_DCDB_TABLE));
1917
1918
1919 scb->sg_list.list = sg_list.list;
1920 scb->scb_busaddr = cmd_busaddr;
1921 scb->bus = scb->scsi_cmd->device->channel;
1922 scb->target_id = scb->scsi_cmd->device->id;
1923 scb->lun = scb->scsi_cmd->device->lun;
1924 scb->sg_len = 0;
1925 scb->data_len = 0;
1926 scb->flags = 0;
1927 scb->op_code = 0;
1928 scb->callback = ipsintr_done;
1929 scb->timeout = ips_cmd_timeout;
1930 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
1931
1932
1933 if ((scb->cmd.basic_io.op_code == IPS_CMD_READ_SG) ||
1934 (scb->cmd.basic_io.op_code == IPS_CMD_WRITE_SG) ||
1935 (scb->cmd.basic_io.op_code == IPS_CMD_DCDB_SG))
1936 return (0);
1937
1938 if (pt->CmdBSize) {
1939 scb->data_len = pt->CmdBSize;
1940 scb->data_busaddr = ha->ioctl_busaddr + sizeof (ips_passthru_t);
1941 } else {
1942 scb->data_busaddr = 0L;
1943 }
1944
1945 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
1946 scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
1947 (unsigned long) &scb->
1948 dcdb -
1949 (unsigned long) scb);
1950
1951 if (pt->CmdBSize) {
1952 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
1953 scb->dcdb.buffer_pointer =
1954 cpu_to_le32(scb->data_busaddr);
1955 else
1956 scb->cmd.basic_io.sg_addr =
1957 cpu_to_le32(scb->data_busaddr);
1958 }
1959
1960
1961 if (pt->TimeOut) {
1962 scb->timeout = pt->TimeOut;
1963
1964 if (pt->TimeOut <= 10)
1965 scb->dcdb.cmd_attribute |= IPS_TIMEOUT10;
1966 else if (pt->TimeOut <= 60)
1967 scb->dcdb.cmd_attribute |= IPS_TIMEOUT60;
1968 else
1969 scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M;
1970 }
1971
1972
1973 scb->scsi_cmd->result = DID_OK << 16;
1974
1975
1976 return (1);
1977}
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988static void
1989ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
1990{
1991 ips_passthru_t *pt;
1992
1993 METHOD_TRACE("ips_cleanup_passthru", 1);
1994
1995 if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
1996 DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
1997 ips_name, ha->host_num);
1998
1999 return;
2000 }
2001 pt = (ips_passthru_t *) ha->ioctl_data;
2002
2003
2004 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
2005 memcpy(&pt->CoppCP.dcdb, &scb->dcdb, sizeof (IPS_DCDB_TABLE));
2006
2007 pt->BasicStatus = scb->basic_status;
2008 pt->ExtendedStatus = scb->extended_status;
2009 pt->AdapterType = ha->ad_type;
2010
2011 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
2012 (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD ||
2013 scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW))
2014 ips_free_flash_copperhead(ha);
2015
2016 ips_scmd_buf_write(scb->scsi_cmd, ha->ioctl_data, ha->ioctl_datasize);
2017}
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028static int
2029ips_host_info(ips_ha_t *ha, struct seq_file *m)
2030{
2031 METHOD_TRACE("ips_host_info", 1);
2032
2033 seq_puts(m, "\nIBM ServeRAID General Information:\n\n");
2034
2035 if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) &&
2036 (le16_to_cpu(ha->nvram->adapter_type) != 0))
2037 seq_printf(m, "\tController Type : %s\n",
2038 ips_adapter_name[ha->ad_type - 1]);
2039 else
2040 seq_puts(m, "\tController Type : Unknown\n");
2041
2042 if (ha->io_addr)
2043 seq_printf(m,
2044 "\tIO region : 0x%x (%d bytes)\n",
2045 ha->io_addr, ha->io_len);
2046
2047 if (ha->mem_addr) {
2048 seq_printf(m,
2049 "\tMemory region : 0x%x (%d bytes)\n",
2050 ha->mem_addr, ha->mem_len);
2051 seq_printf(m,
2052 "\tShared memory address : 0x%lx\n",
2053 (unsigned long)ha->mem_ptr);
2054 }
2055
2056 seq_printf(m, "\tIRQ number : %d\n", ha->pcidev->irq);
2057
2058
2059
2060
2061 if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) {
2062 if (ha->nvram->bios_low[3] == 0) {
2063 seq_printf(m,
2064 "\tBIOS Version : %c%c%c%c%c%c%c\n",
2065 ha->nvram->bios_high[0], ha->nvram->bios_high[1],
2066 ha->nvram->bios_high[2], ha->nvram->bios_high[3],
2067 ha->nvram->bios_low[0], ha->nvram->bios_low[1],
2068 ha->nvram->bios_low[2]);
2069
2070 } else {
2071 seq_printf(m,
2072 "\tBIOS Version : %c%c%c%c%c%c%c%c\n",
2073 ha->nvram->bios_high[0], ha->nvram->bios_high[1],
2074 ha->nvram->bios_high[2], ha->nvram->bios_high[3],
2075 ha->nvram->bios_low[0], ha->nvram->bios_low[1],
2076 ha->nvram->bios_low[2], ha->nvram->bios_low[3]);
2077 }
2078
2079 }
2080
2081 if (ha->enq->CodeBlkVersion[7] == 0) {
2082 seq_printf(m,
2083 "\tFirmware Version : %c%c%c%c%c%c%c\n",
2084 ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
2085 ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
2086 ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
2087 ha->enq->CodeBlkVersion[6]);
2088 } else {
2089 seq_printf(m,
2090 "\tFirmware Version : %c%c%c%c%c%c%c%c\n",
2091 ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
2092 ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
2093 ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
2094 ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]);
2095 }
2096
2097 if (ha->enq->BootBlkVersion[7] == 0) {
2098 seq_printf(m,
2099 "\tBoot Block Version : %c%c%c%c%c%c%c\n",
2100 ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
2101 ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
2102 ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
2103 ha->enq->BootBlkVersion[6]);
2104 } else {
2105 seq_printf(m,
2106 "\tBoot Block Version : %c%c%c%c%c%c%c%c\n",
2107 ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
2108 ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
2109 ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
2110 ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]);
2111 }
2112
2113 seq_printf(m, "\tDriver Version : %s%s\n",
2114 IPS_VERSION_HIGH, IPS_VERSION_LOW);
2115
2116 seq_printf(m, "\tDriver Build : %d\n",
2117 IPS_BUILD_IDENT);
2118
2119 seq_printf(m, "\tMax Physical Devices : %d\n",
2120 ha->enq->ucMaxPhysicalDevices);
2121 seq_printf(m, "\tMax Active Commands : %d\n",
2122 ha->max_cmds);
2123 seq_printf(m, "\tCurrent Queued Commands : %d\n",
2124 ha->scb_waitlist.count);
2125 seq_printf(m, "\tCurrent Active Commands : %d\n",
2126 ha->scb_activelist.count - ha->num_ioctl);
2127 seq_printf(m, "\tCurrent Queued PT Commands : %d\n",
2128 ha->copp_waitlist.count);
2129 seq_printf(m, "\tCurrent Active PT Commands : %d\n",
2130 ha->num_ioctl);
2131
2132 seq_putc(m, '\n');
2133
2134 return 0;
2135}
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146static void
2147ips_identify_controller(ips_ha_t * ha)
2148{
2149 METHOD_TRACE("ips_identify_controller", 1);
2150
2151 switch (ha->pcidev->device) {
2152 case IPS_DEVICEID_COPPERHEAD:
2153 if (ha->pcidev->revision <= IPS_REVID_SERVERAID) {
2154 ha->ad_type = IPS_ADTYPE_SERVERAID;
2155 } else if (ha->pcidev->revision == IPS_REVID_SERVERAID2) {
2156 ha->ad_type = IPS_ADTYPE_SERVERAID2;
2157 } else if (ha->pcidev->revision == IPS_REVID_NAVAJO) {
2158 ha->ad_type = IPS_ADTYPE_NAVAJO;
2159 } else if ((ha->pcidev->revision == IPS_REVID_SERVERAID2)
2160 && (ha->slot_num == 0)) {
2161 ha->ad_type = IPS_ADTYPE_KIOWA;
2162 } else if ((ha->pcidev->revision >= IPS_REVID_CLARINETP1) &&
2163 (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) {
2164 if (ha->enq->ucMaxPhysicalDevices == 15)
2165 ha->ad_type = IPS_ADTYPE_SERVERAID3L;
2166 else
2167 ha->ad_type = IPS_ADTYPE_SERVERAID3;
2168 } else if ((ha->pcidev->revision >= IPS_REVID_TROMBONE32) &&
2169 (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) {
2170 ha->ad_type = IPS_ADTYPE_SERVERAID4H;
2171 }
2172 break;
2173
2174 case IPS_DEVICEID_MORPHEUS:
2175 switch (ha->pcidev->subsystem_device) {
2176 case IPS_SUBDEVICEID_4L:
2177 ha->ad_type = IPS_ADTYPE_SERVERAID4L;
2178 break;
2179
2180 case IPS_SUBDEVICEID_4M:
2181 ha->ad_type = IPS_ADTYPE_SERVERAID4M;
2182 break;
2183
2184 case IPS_SUBDEVICEID_4MX:
2185 ha->ad_type = IPS_ADTYPE_SERVERAID4MX;
2186 break;
2187
2188 case IPS_SUBDEVICEID_4LX:
2189 ha->ad_type = IPS_ADTYPE_SERVERAID4LX;
2190 break;
2191
2192 case IPS_SUBDEVICEID_5I2:
2193 ha->ad_type = IPS_ADTYPE_SERVERAID5I2;
2194 break;
2195
2196 case IPS_SUBDEVICEID_5I1:
2197 ha->ad_type = IPS_ADTYPE_SERVERAID5I1;
2198 break;
2199 }
2200
2201 break;
2202
2203 case IPS_DEVICEID_MARCO:
2204 switch (ha->pcidev->subsystem_device) {
2205 case IPS_SUBDEVICEID_6M:
2206 ha->ad_type = IPS_ADTYPE_SERVERAID6M;
2207 break;
2208 case IPS_SUBDEVICEID_6I:
2209 ha->ad_type = IPS_ADTYPE_SERVERAID6I;
2210 break;
2211 case IPS_SUBDEVICEID_7k:
2212 ha->ad_type = IPS_ADTYPE_SERVERAID7k;
2213 break;
2214 case IPS_SUBDEVICEID_7M:
2215 ha->ad_type = IPS_ADTYPE_SERVERAID7M;
2216 break;
2217 }
2218 break;
2219 }
2220}
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231static void
2232ips_get_bios_version(ips_ha_t * ha, int intr)
2233{
2234 ips_scb_t *scb;
2235 int ret;
2236 uint8_t major;
2237 uint8_t minor;
2238 uint8_t subminor;
2239 uint8_t *buffer;
2240
2241 METHOD_TRACE("ips_get_bios_version", 1);
2242
2243 major = 0;
2244 minor = 0;
2245
2246 strncpy(ha->bios_version, " ?", 8);
2247
2248 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) {
2249 if (IPS_USE_MEMIO(ha)) {
2250
2251
2252
2253 writel(0, ha->mem_ptr + IPS_REG_FLAP);
2254 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2255 udelay(25);
2256
2257 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
2258 return;
2259
2260 writel(1, ha->mem_ptr + IPS_REG_FLAP);
2261 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2262 udelay(25);
2263
2264 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
2265 return;
2266
2267
2268 writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP);
2269 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2270 udelay(25);
2271
2272 major = readb(ha->mem_ptr + IPS_REG_FLDP);
2273
2274
2275 writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP);
2276 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2277 udelay(25);
2278 minor = readb(ha->mem_ptr + IPS_REG_FLDP);
2279
2280
2281 writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP);
2282 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2283 udelay(25);
2284 subminor = readb(ha->mem_ptr + IPS_REG_FLDP);
2285
2286 } else {
2287
2288
2289
2290 outl(0, ha->io_addr + IPS_REG_FLAP);
2291 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2292 udelay(25);
2293
2294 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
2295 return;
2296
2297 outl(1, ha->io_addr + IPS_REG_FLAP);
2298 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2299 udelay(25);
2300
2301 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
2302 return;
2303
2304
2305 outl(0x1FF, ha->io_addr + IPS_REG_FLAP);
2306 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2307 udelay(25);
2308
2309 major = inb(ha->io_addr + IPS_REG_FLDP);
2310
2311
2312 outl(0x1FE, ha->io_addr + IPS_REG_FLAP);
2313 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2314 udelay(25);
2315
2316 minor = inb(ha->io_addr + IPS_REG_FLDP);
2317
2318
2319 outl(0x1FD, ha->io_addr + IPS_REG_FLAP);
2320 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2321 udelay(25);
2322
2323 subminor = inb(ha->io_addr + IPS_REG_FLDP);
2324
2325 }
2326 } else {
2327
2328
2329 buffer = ha->ioctl_data;
2330
2331 memset(buffer, 0, 0x1000);
2332
2333 scb = &ha->scbs[ha->max_cmds - 1];
2334
2335 ips_init_scb(ha, scb);
2336
2337 scb->timeout = ips_cmd_timeout;
2338 scb->cdb[0] = IPS_CMD_RW_BIOSFW;
2339
2340 scb->cmd.flashfw.op_code = IPS_CMD_RW_BIOSFW;
2341 scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
2342 scb->cmd.flashfw.type = 1;
2343 scb->cmd.flashfw.direction = 0;
2344 scb->cmd.flashfw.count = cpu_to_le32(0x800);
2345 scb->cmd.flashfw.total_packets = 1;
2346 scb->cmd.flashfw.packet_num = 0;
2347 scb->data_len = 0x1000;
2348 scb->cmd.flashfw.buffer_addr = ha->ioctl_busaddr;
2349
2350
2351 if (((ret =
2352 ips_send_wait(ha, scb, ips_cmd_timeout,
2353 intr)) == IPS_FAILURE)
2354 || (ret == IPS_SUCCESS_IMM)
2355 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
2356
2357
2358 return;
2359 }
2360
2361 if ((buffer[0xC0] == 0x55) && (buffer[0xC1] == 0xAA)) {
2362 major = buffer[0x1ff + 0xC0];
2363 minor = buffer[0x1fe + 0xC0];
2364 subminor = buffer[0x1fd + 0xC0];
2365 } else {
2366 return;
2367 }
2368 }
2369
2370 ha->bios_version[0] = hex_asc_upper_hi(major);
2371 ha->bios_version[1] = '.';
2372 ha->bios_version[2] = hex_asc_upper_lo(major);
2373 ha->bios_version[3] = hex_asc_upper_lo(subminor);
2374 ha->bios_version[4] = '.';
2375 ha->bios_version[5] = hex_asc_upper_hi(minor);
2376 ha->bios_version[6] = hex_asc_upper_lo(minor);
2377 ha->bios_version[7] = 0;
2378}
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391static int
2392ips_hainit(ips_ha_t * ha)
2393{
2394 int i;
2395 struct timeval tv;
2396
2397 METHOD_TRACE("ips_hainit", 1);
2398
2399 if (!ha)
2400 return (0);
2401
2402 if (ha->func.statinit)
2403 (*ha->func.statinit) (ha);
2404
2405 if (ha->func.enableint)
2406 (*ha->func.enableint) (ha);
2407
2408
2409 ha->reset_count = 1;
2410 do_gettimeofday(&tv);
2411 ha->last_ffdc = tv.tv_sec;
2412 ips_ffdc_reset(ha, IPS_INTR_IORL);
2413
2414 if (!ips_read_config(ha, IPS_INTR_IORL)) {
2415 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2416 "unable to read config from controller.\n");
2417
2418 return (0);
2419 }
2420
2421 if (!ips_read_adapter_status(ha, IPS_INTR_IORL)) {
2422 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2423 "unable to read controller status.\n");
2424
2425 return (0);
2426 }
2427
2428
2429 ips_identify_controller(ha);
2430
2431 if (!ips_read_subsystem_parameters(ha, IPS_INTR_IORL)) {
2432 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2433 "unable to read subsystem parameters.\n");
2434
2435 return (0);
2436 }
2437
2438
2439 if (!ips_write_driver_status(ha, IPS_INTR_IORL)) {
2440 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2441 "unable to write driver info to controller.\n");
2442
2443 return (0);
2444 }
2445
2446
2447 if ((ha->conf->ucLogDriveCount > 0) && (ha->requires_esl == 1))
2448 ips_clear_adapter(ha, IPS_INTR_IORL);
2449
2450
2451 ha->ntargets = IPS_MAX_TARGETS + 1;
2452 ha->nlun = 1;
2453 ha->nbus = (ha->enq->ucMaxPhysicalDevices / IPS_MAX_TARGETS) + 1;
2454
2455 switch (ha->conf->logical_drive[0].ucStripeSize) {
2456 case 4:
2457 ha->max_xfer = 0x10000;
2458 break;
2459
2460 case 5:
2461 ha->max_xfer = 0x20000;
2462 break;
2463
2464 case 6:
2465 ha->max_xfer = 0x40000;
2466 break;
2467
2468 case 7:
2469 default:
2470 ha->max_xfer = 0x80000;
2471 break;
2472 }
2473
2474
2475 if (le32_to_cpu(ha->subsys->param[4]) & 0x1) {
2476
2477 ha->max_cmds = ha->enq->ucConcurrentCmdCount;
2478 } else {
2479
2480 switch (ha->conf->logical_drive[0].ucStripeSize) {
2481 case 4:
2482 ha->max_cmds = 32;
2483 break;
2484
2485 case 5:
2486 ha->max_cmds = 16;
2487 break;
2488
2489 case 6:
2490 ha->max_cmds = 8;
2491 break;
2492
2493 case 7:
2494 default:
2495 ha->max_cmds = 4;
2496 break;
2497 }
2498 }
2499
2500
2501 if ((ha->ad_type == IPS_ADTYPE_SERVERAID3L) ||
2502 (ha->ad_type == IPS_ADTYPE_SERVERAID4L) ||
2503 (ha->ad_type == IPS_ADTYPE_SERVERAID4LX)) {
2504 if ((ha->max_cmds > MaxLiteCmds) && (MaxLiteCmds))
2505 ha->max_cmds = MaxLiteCmds;
2506 }
2507
2508
2509 ha->ha_id[0] = IPS_ADAPTER_ID;
2510 for (i = 1; i < ha->nbus; i++) {
2511 ha->ha_id[i] = ha->conf->init_id[i - 1] & 0x1f;
2512 ha->dcdb_active[i - 1] = 0;
2513 }
2514
2515 return (1);
2516}
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527static void
2528ips_next(ips_ha_t * ha, int intr)
2529{
2530 ips_scb_t *scb;
2531 struct scsi_cmnd *SC;
2532 struct scsi_cmnd *p;
2533 struct scsi_cmnd *q;
2534 ips_copp_wait_item_t *item;
2535 int ret;
2536 struct Scsi_Host *host;
2537 METHOD_TRACE("ips_next", 1);
2538
2539 if (!ha)
2540 return;
2541 host = ips_sh[ha->host_num];
2542
2543
2544
2545
2546 if (intr == IPS_INTR_ON)
2547 spin_lock(host->host_lock);
2548
2549 if ((ha->subsys->param[3] & 0x300000)
2550 && (ha->scb_activelist.count == 0)) {
2551 struct timeval tv;
2552
2553 do_gettimeofday(&tv);
2554
2555 if (tv.tv_sec - ha->last_ffdc > IPS_SECS_8HOURS) {
2556 ha->last_ffdc = tv.tv_sec;
2557 ips_ffdc_time(ha);
2558 }
2559 }
2560
2561
2562
2563
2564
2565
2566
2567
2568 while ((ha->num_ioctl < IPS_MAX_IOCTL) &&
2569 (ha->copp_waitlist.head) && (scb = ips_getscb(ha))) {
2570
2571 item = ips_removeq_copp_head(&ha->copp_waitlist);
2572 ha->num_ioctl++;
2573 if (intr == IPS_INTR_ON)
2574 spin_unlock(host->host_lock);
2575 scb->scsi_cmd = item->scsi_cmd;
2576 kfree(item);
2577
2578 ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr);
2579
2580 if (intr == IPS_INTR_ON)
2581 spin_lock(host->host_lock);
2582 switch (ret) {
2583 case IPS_FAILURE:
2584 if (scb->scsi_cmd) {
2585 scb->scsi_cmd->result = DID_ERROR << 16;
2586 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2587 }
2588
2589 ips_freescb(ha, scb);
2590 break;
2591 case IPS_SUCCESS_IMM:
2592 if (scb->scsi_cmd) {
2593 scb->scsi_cmd->result = DID_OK << 16;
2594 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2595 }
2596
2597 ips_freescb(ha, scb);
2598 break;
2599 default:
2600 break;
2601 }
2602
2603 if (ret != IPS_SUCCESS) {
2604 ha->num_ioctl--;
2605 continue;
2606 }
2607
2608 ret = ips_send_cmd(ha, scb);
2609
2610 if (ret == IPS_SUCCESS)
2611 ips_putq_scb_head(&ha->scb_activelist, scb);
2612 else
2613 ha->num_ioctl--;
2614
2615 switch (ret) {
2616 case IPS_FAILURE:
2617 if (scb->scsi_cmd) {
2618 scb->scsi_cmd->result = DID_ERROR << 16;
2619 }
2620
2621 ips_freescb(ha, scb);
2622 break;
2623 case IPS_SUCCESS_IMM:
2624 ips_freescb(ha, scb);
2625 break;
2626 default:
2627 break;
2628 }
2629
2630 }
2631
2632
2633
2634
2635
2636 p = ha->scb_waitlist.head;
2637 while ((p) && (scb = ips_getscb(ha))) {
2638 if ((scmd_channel(p) > 0)
2639 && (ha->
2640 dcdb_active[scmd_channel(p) -
2641 1] & (1 << scmd_id(p)))) {
2642 ips_freescb(ha, scb);
2643 p = (struct scsi_cmnd *) p->host_scribble;
2644 continue;
2645 }
2646
2647 q = p;
2648 SC = ips_removeq_wait(&ha->scb_waitlist, q);
2649
2650 if (intr == IPS_INTR_ON)
2651 spin_unlock(host->host_lock);
2652
2653 SC->result = DID_OK;
2654 SC->host_scribble = NULL;
2655
2656 scb->target_id = SC->device->id;
2657 scb->lun = SC->device->lun;
2658 scb->bus = SC->device->channel;
2659 scb->scsi_cmd = SC;
2660 scb->breakup = 0;
2661 scb->data_len = 0;
2662 scb->callback = ipsintr_done;
2663 scb->timeout = ips_cmd_timeout;
2664 memset(&scb->cmd, 0, 16);
2665
2666
2667 memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
2668
2669 scb->sg_count = scsi_dma_map(SC);
2670 BUG_ON(scb->sg_count < 0);
2671 if (scb->sg_count) {
2672 struct scatterlist *sg;
2673 int i;
2674
2675 scb->flags |= IPS_SCB_MAP_SG;
2676
2677 scsi_for_each_sg(SC, sg, scb->sg_count, i) {
2678 if (ips_fill_scb_sg_single
2679 (ha, sg_dma_address(sg), scb, i,
2680 sg_dma_len(sg)) < 0)
2681 break;
2682 }
2683 scb->dcdb.transfer_length = scb->data_len;
2684 } else {
2685 scb->data_busaddr = 0L;
2686 scb->sg_len = 0;
2687 scb->data_len = 0;
2688 scb->dcdb.transfer_length = 0;
2689 }
2690
2691 scb->dcdb.cmd_attribute =
2692 ips_command_direction[scb->scsi_cmd->cmnd[0]];
2693
2694
2695
2696 if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) &&
2697 (scb->data_len == 0))
2698 scb->dcdb.cmd_attribute = 0;
2699
2700 if (!(scb->dcdb.cmd_attribute & 0x3))
2701 scb->dcdb.transfer_length = 0;
2702
2703 if (scb->data_len >= IPS_MAX_XFER) {
2704 scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
2705 scb->dcdb.transfer_length = 0;
2706 }
2707 if (intr == IPS_INTR_ON)
2708 spin_lock(host->host_lock);
2709
2710 ret = ips_send_cmd(ha, scb);
2711
2712 switch (ret) {
2713 case IPS_SUCCESS:
2714 ips_putq_scb_head(&ha->scb_activelist, scb);
2715 break;
2716 case IPS_FAILURE:
2717 if (scb->scsi_cmd) {
2718 scb->scsi_cmd->result = DID_ERROR << 16;
2719 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2720 }
2721
2722 if (scb->bus)
2723 ha->dcdb_active[scb->bus - 1] &=
2724 ~(1 << scb->target_id);
2725
2726 ips_freescb(ha, scb);
2727 break;
2728 case IPS_SUCCESS_IMM:
2729 if (scb->scsi_cmd)
2730 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2731
2732 if (scb->bus)
2733 ha->dcdb_active[scb->bus - 1] &=
2734 ~(1 << scb->target_id);
2735
2736 ips_freescb(ha, scb);
2737 break;
2738 default:
2739 break;
2740 }
2741
2742 p = (struct scsi_cmnd *) p->host_scribble;
2743
2744 }
2745
2746 if (intr == IPS_INTR_ON)
2747 spin_unlock(host->host_lock);
2748}
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761static void
2762ips_putq_scb_head(ips_scb_queue_t * queue, ips_scb_t * item)
2763{
2764 METHOD_TRACE("ips_putq_scb_head", 1);
2765
2766 if (!item)
2767 return;
2768
2769 item->q_next = queue->head;
2770 queue->head = item;
2771
2772 if (!queue->tail)
2773 queue->tail = item;
2774
2775 queue->count++;
2776}
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789static ips_scb_t *
2790ips_removeq_scb_head(ips_scb_queue_t * queue)
2791{
2792 ips_scb_t *item;
2793
2794 METHOD_TRACE("ips_removeq_scb_head", 1);
2795
2796 item = queue->head;
2797
2798 if (!item) {
2799 return (NULL);
2800 }
2801
2802 queue->head = item->q_next;
2803 item->q_next = NULL;
2804
2805 if (queue->tail == item)
2806 queue->tail = NULL;
2807
2808 queue->count--;
2809
2810 return (item);
2811}
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824static ips_scb_t *
2825ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item)
2826{
2827 ips_scb_t *p;
2828
2829 METHOD_TRACE("ips_removeq_scb", 1);
2830
2831 if (!item)
2832 return (NULL);
2833
2834 if (item == queue->head) {
2835 return (ips_removeq_scb_head(queue));
2836 }
2837
2838 p = queue->head;
2839
2840 while ((p) && (item != p->q_next))
2841 p = p->q_next;
2842
2843 if (p) {
2844
2845 p->q_next = item->q_next;
2846
2847 if (!item->q_next)
2848 queue->tail = p;
2849
2850 item->q_next = NULL;
2851 queue->count--;
2852
2853 return (item);
2854 }
2855
2856 return (NULL);
2857}
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item)
2871{
2872 METHOD_TRACE("ips_putq_wait_tail", 1);
2873
2874 if (!item)
2875 return;
2876
2877 item->host_scribble = NULL;
2878
2879 if (queue->tail)
2880 queue->tail->host_scribble = (char *) item;
2881
2882 queue->tail = item;
2883
2884 if (!queue->head)
2885 queue->head = item;
2886
2887 queue->count++;
2888}
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue)
2902{
2903 struct scsi_cmnd *item;
2904
2905 METHOD_TRACE("ips_removeq_wait_head", 1);
2906
2907 item = queue->head;
2908
2909 if (!item) {
2910 return (NULL);
2911 }
2912
2913 queue->head = (struct scsi_cmnd *) item->host_scribble;
2914 item->host_scribble = NULL;
2915
2916 if (queue->tail == item)
2917 queue->tail = NULL;
2918
2919 queue->count--;
2920
2921 return (item);
2922}
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue,
2936 struct scsi_cmnd *item)
2937{
2938 struct scsi_cmnd *p;
2939
2940 METHOD_TRACE("ips_removeq_wait", 1);
2941
2942 if (!item)
2943 return (NULL);
2944
2945 if (item == queue->head) {
2946 return (ips_removeq_wait_head(queue));
2947 }
2948
2949 p = queue->head;
2950
2951 while ((p) && (item != (struct scsi_cmnd *) p->host_scribble))
2952 p = (struct scsi_cmnd *) p->host_scribble;
2953
2954 if (p) {
2955
2956 p->host_scribble = item->host_scribble;
2957
2958 if (!item->host_scribble)
2959 queue->tail = p;
2960
2961 item->host_scribble = NULL;
2962 queue->count--;
2963
2964 return (item);
2965 }
2966
2967 return (NULL);
2968}
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981static void
2982ips_putq_copp_tail(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
2983{
2984 METHOD_TRACE("ips_putq_copp_tail", 1);
2985
2986 if (!item)
2987 return;
2988
2989 item->next = NULL;
2990
2991 if (queue->tail)
2992 queue->tail->next = item;
2993
2994 queue->tail = item;
2995
2996 if (!queue->head)
2997 queue->head = item;
2998
2999 queue->count++;
3000}
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013static ips_copp_wait_item_t *
3014ips_removeq_copp_head(ips_copp_queue_t * queue)
3015{
3016 ips_copp_wait_item_t *item;
3017
3018 METHOD_TRACE("ips_removeq_copp_head", 1);
3019
3020 item = queue->head;
3021
3022 if (!item) {
3023 return (NULL);
3024 }
3025
3026 queue->head = item->next;
3027 item->next = NULL;
3028
3029 if (queue->tail == item)
3030 queue->tail = NULL;
3031
3032 queue->count--;
3033
3034 return (item);
3035}
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048static ips_copp_wait_item_t *
3049ips_removeq_copp(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
3050{
3051 ips_copp_wait_item_t *p;
3052
3053 METHOD_TRACE("ips_removeq_copp", 1);
3054
3055 if (!item)
3056 return (NULL);
3057
3058 if (item == queue->head) {
3059 return (ips_removeq_copp_head(queue));
3060 }
3061
3062 p = queue->head;
3063
3064 while ((p) && (item != p->next))
3065 p = p->next;
3066
3067 if (p) {
3068
3069 p->next = item->next;
3070
3071 if (!item->next)
3072 queue->tail = p;
3073
3074 item->next = NULL;
3075 queue->count--;
3076
3077 return (item);
3078 }
3079
3080 return (NULL);
3081}
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092static void
3093ipsintr_blocking(ips_ha_t * ha, ips_scb_t * scb)
3094{
3095 METHOD_TRACE("ipsintr_blocking", 2);
3096
3097 ips_freescb(ha, scb);
3098 if ((ha->waitflag == TRUE) && (ha->cmd_in_progress == scb->cdb[0])) {
3099 ha->waitflag = FALSE;
3100
3101 return;
3102 }
3103}
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114static void
3115ipsintr_done(ips_ha_t * ha, ips_scb_t * scb)
3116{
3117 METHOD_TRACE("ipsintr_done", 2);
3118
3119 if (!scb) {
3120 IPS_PRINTK(KERN_WARNING, ha->pcidev,
3121 "Spurious interrupt; scb NULL.\n");
3122
3123 return;
3124 }
3125
3126 if (scb->scsi_cmd == NULL) {
3127
3128 IPS_PRINTK(KERN_WARNING, ha->pcidev,
3129 "Spurious interrupt; scsi_cmd not set.\n");
3130
3131 return;
3132 }
3133
3134 ips_done(ha, scb);
3135}
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146static void
3147ips_done(ips_ha_t * ha, ips_scb_t * scb)
3148{
3149 int ret;
3150
3151 METHOD_TRACE("ips_done", 1);
3152
3153 if (!scb)
3154 return;
3155
3156 if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) {
3157 ips_cleanup_passthru(ha, scb);
3158 ha->num_ioctl--;
3159 } else {
3160
3161
3162
3163
3164
3165 if ((scb->breakup) || (scb->sg_break)) {
3166 struct scatterlist *sg;
3167 int i, sg_dma_index, ips_sg_index = 0;
3168
3169
3170 scb->data_len = 0;
3171
3172 sg = scsi_sglist(scb->scsi_cmd);
3173
3174
3175 sg_dma_index = scb->breakup;
3176 for (i = 0; i < scb->breakup; i++)
3177 sg = sg_next(sg);
3178
3179
3180 ips_fill_scb_sg_single(ha,
3181 sg_dma_address(sg),
3182 scb, ips_sg_index++,
3183 sg_dma_len(sg));
3184
3185 for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
3186 sg_dma_index++, sg = sg_next(sg)) {
3187 if (ips_fill_scb_sg_single
3188 (ha,
3189 sg_dma_address(sg),
3190 scb, ips_sg_index++,
3191 sg_dma_len(sg)) < 0)
3192 break;
3193 }
3194
3195 scb->dcdb.transfer_length = scb->data_len;
3196 scb->dcdb.cmd_attribute |=
3197 ips_command_direction[scb->scsi_cmd->cmnd[0]];
3198
3199 if (!(scb->dcdb.cmd_attribute & 0x3))
3200 scb->dcdb.transfer_length = 0;
3201
3202 if (scb->data_len >= IPS_MAX_XFER) {
3203 scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
3204 scb->dcdb.transfer_length = 0;
3205 }
3206
3207 ret = ips_send_cmd(ha, scb);
3208
3209 switch (ret) {
3210 case IPS_FAILURE:
3211 if (scb->scsi_cmd) {
3212 scb->scsi_cmd->result = DID_ERROR << 16;
3213 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
3214 }
3215
3216 ips_freescb(ha, scb);
3217 break;
3218 case IPS_SUCCESS_IMM:
3219 if (scb->scsi_cmd) {
3220 scb->scsi_cmd->result = DID_ERROR << 16;
3221 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
3222 }
3223
3224 ips_freescb(ha, scb);
3225 break;
3226 default:
3227 break;
3228 }
3229
3230 return;
3231 }
3232 }
3233
3234 if (scb->bus) {
3235 ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id);
3236 }
3237
3238 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
3239
3240 ips_freescb(ha, scb);
3241}
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252static int
3253ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
3254{
3255 int errcode;
3256 int device_error;
3257 uint32_t transfer_len;
3258 IPS_DCDB_TABLE_TAPE *tapeDCDB;
3259 IPS_SCSI_INQ_DATA inquiryData;
3260
3261 METHOD_TRACE("ips_map_status", 1);
3262
3263 if (scb->bus) {
3264 DEBUG_VAR(2,
3265 "(%s%d) Physical device error (%d %d %d): %x %x, Sense Key: %x, ASC: %x, ASCQ: %x",
3266 ips_name, ha->host_num,
3267 scb->scsi_cmd->device->channel,
3268 scb->scsi_cmd->device->id, scb->scsi_cmd->device->lun,
3269 scb->basic_status, scb->extended_status,
3270 scb->extended_status ==
3271 IPS_ERR_CKCOND ? scb->dcdb.sense_info[2] & 0xf : 0,
3272 scb->extended_status ==
3273 IPS_ERR_CKCOND ? scb->dcdb.sense_info[12] : 0,
3274 scb->extended_status ==
3275 IPS_ERR_CKCOND ? scb->dcdb.sense_info[13] : 0);
3276 }
3277
3278
3279 errcode = DID_ERROR;
3280 device_error = 0;
3281
3282 switch (scb->basic_status & IPS_GSC_STATUS_MASK) {
3283 case IPS_CMD_TIMEOUT:
3284 errcode = DID_TIME_OUT;
3285 break;
3286
3287 case IPS_INVAL_OPCO:
3288 case IPS_INVAL_CMD_BLK:
3289 case IPS_INVAL_PARM_BLK:
3290 case IPS_LD_ERROR:
3291 case IPS_CMD_CMPLT_WERROR:
3292 break;
3293
3294 case IPS_PHYS_DRV_ERROR:
3295 switch (scb->extended_status) {
3296 case IPS_ERR_SEL_TO:
3297 if (scb->bus)
3298 errcode = DID_NO_CONNECT;
3299
3300 break;
3301
3302 case IPS_ERR_OU_RUN:
3303 if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) ||
3304 (scb->cmd.dcdb.op_code ==
3305 IPS_CMD_EXTENDED_DCDB_SG)) {
3306 tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3307 transfer_len = tapeDCDB->transfer_length;
3308 } else {
3309 transfer_len =
3310 (uint32_t) scb->dcdb.transfer_length;
3311 }
3312
3313 if ((scb->bus) && (transfer_len < scb->data_len)) {
3314
3315 errcode = DID_OK;
3316
3317
3318 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3319 ips_scmd_buf_read(scb->scsi_cmd,
3320 &inquiryData, sizeof (inquiryData));
3321 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) {
3322 errcode = DID_TIME_OUT;
3323 break;
3324 }
3325 }
3326 } else
3327 errcode = DID_ERROR;
3328
3329 break;
3330
3331 case IPS_ERR_RECOVERY:
3332
3333 if (scb->bus)
3334 errcode = DID_OK;
3335
3336 break;
3337
3338 case IPS_ERR_HOST_RESET:
3339 case IPS_ERR_DEV_RESET:
3340 errcode = DID_RESET;
3341 break;
3342
3343 case IPS_ERR_CKCOND:
3344 if (scb->bus) {
3345 if ((scb->cmd.dcdb.op_code ==
3346 IPS_CMD_EXTENDED_DCDB)
3347 || (scb->cmd.dcdb.op_code ==
3348 IPS_CMD_EXTENDED_DCDB_SG)) {
3349 tapeDCDB =
3350 (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3351 memcpy(scb->scsi_cmd->sense_buffer,
3352 tapeDCDB->sense_info,
3353 SCSI_SENSE_BUFFERSIZE);
3354 } else {
3355 memcpy(scb->scsi_cmd->sense_buffer,
3356 scb->dcdb.sense_info,
3357 SCSI_SENSE_BUFFERSIZE);
3358 }
3359 device_error = 2;
3360 }
3361
3362 errcode = DID_OK;
3363
3364 break;
3365
3366 default:
3367 errcode = DID_ERROR;
3368 break;
3369
3370 }
3371 }
3372
3373 scb->scsi_cmd->result = device_error | (errcode << 16);
3374
3375 return (1);
3376}
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389static int
3390ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
3391{
3392 int ret;
3393
3394 METHOD_TRACE("ips_send_wait", 1);
3395
3396 if (intr != IPS_FFDC) {
3397 ha->waitflag = TRUE;
3398 ha->cmd_in_progress = scb->cdb[0];
3399 }
3400 scb->callback = ipsintr_blocking;
3401 ret = ips_send_cmd(ha, scb);
3402
3403 if ((ret == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM))
3404 return (ret);
3405
3406 if (intr != IPS_FFDC)
3407 ret = ips_wait(ha, timeout, intr);
3408
3409 return (ret);
3410}
3411
3412
3413
3414
3415
3416
3417
3418
3419static void
3420ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
3421{
3422 unsigned long flags;
3423
3424 local_irq_save(flags);
3425 scsi_sg_copy_from_buffer(scmd, data, count);
3426 local_irq_restore(flags);
3427}
3428
3429
3430
3431
3432
3433
3434
3435
3436static void
3437ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
3438{
3439 unsigned long flags;
3440
3441 local_irq_save(flags);
3442 scsi_sg_copy_to_buffer(scmd, data, count);
3443 local_irq_restore(flags);
3444}
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455static int
3456ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
3457{
3458 int ret;
3459 char *sp;
3460 int device_error;
3461 IPS_DCDB_TABLE_TAPE *tapeDCDB;
3462 int TimeOut;
3463
3464 METHOD_TRACE("ips_send_cmd", 1);
3465
3466 ret = IPS_SUCCESS;
3467
3468 if (!scb->scsi_cmd) {
3469
3470
3471 if (scb->bus > 0) {
3472
3473
3474 if ((ha->waitflag == TRUE) &&
3475 (ha->cmd_in_progress == scb->cdb[0])) {
3476 ha->waitflag = FALSE;
3477 }
3478
3479 return (1);
3480 }
3481 } else if ((scb->bus == 0) && (!ips_is_passthru(scb->scsi_cmd))) {
3482
3483 ret = IPS_SUCCESS_IMM;
3484
3485 switch (scb->scsi_cmd->cmnd[0]) {
3486 case ALLOW_MEDIUM_REMOVAL:
3487 case REZERO_UNIT:
3488 case ERASE:
3489 case WRITE_FILEMARKS:
3490 case SPACE:
3491 scb->scsi_cmd->result = DID_ERROR << 16;
3492 break;
3493
3494 case START_STOP:
3495 scb->scsi_cmd->result = DID_OK << 16;
3496
3497 case TEST_UNIT_READY:
3498 case INQUIRY:
3499 if (scb->target_id == IPS_ADAPTER_ID) {
3500
3501
3502
3503
3504 if (scb->scsi_cmd->cmnd[0] == TEST_UNIT_READY)
3505 scb->scsi_cmd->result = DID_OK << 16;
3506
3507 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3508 IPS_SCSI_INQ_DATA inquiry;
3509
3510 memset(&inquiry, 0,
3511 sizeof (IPS_SCSI_INQ_DATA));
3512
3513 inquiry.DeviceType =
3514 IPS_SCSI_INQ_TYPE_PROCESSOR;
3515 inquiry.DeviceTypeQualifier =
3516 IPS_SCSI_INQ_LU_CONNECTED;
3517 inquiry.Version = IPS_SCSI_INQ_REV2;
3518 inquiry.ResponseDataFormat =
3519 IPS_SCSI_INQ_RD_REV2;
3520 inquiry.AdditionalLength = 31;
3521 inquiry.Flags[0] =
3522 IPS_SCSI_INQ_Address16;
3523 inquiry.Flags[1] =
3524 IPS_SCSI_INQ_WBus16 |
3525 IPS_SCSI_INQ_Sync;
3526 strncpy(inquiry.VendorId, "IBM ",
3527 8);
3528 strncpy(inquiry.ProductId,
3529 "SERVERAID ", 16);
3530 strncpy(inquiry.ProductRevisionLevel,
3531 "1.00", 4);
3532
3533 ips_scmd_buf_write(scb->scsi_cmd,
3534 &inquiry,
3535 sizeof (inquiry));
3536
3537 scb->scsi_cmd->result = DID_OK << 16;
3538 }
3539 } else {
3540 scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
3541 scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
3542 scb->cmd.logical_info.reserved = 0;
3543 scb->cmd.logical_info.reserved2 = 0;
3544 scb->data_len = sizeof (IPS_LD_INFO);
3545 scb->data_busaddr = ha->logical_drive_info_dma_addr;
3546 scb->flags = 0;
3547 scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
3548 ret = IPS_SUCCESS;
3549 }
3550
3551 break;
3552
3553 case REQUEST_SENSE:
3554 ips_reqsen(ha, scb);
3555 scb->scsi_cmd->result = DID_OK << 16;
3556 break;
3557
3558 case READ_6:
3559 case WRITE_6:
3560 if (!scb->sg_len) {
3561 scb->cmd.basic_io.op_code =
3562 (scb->scsi_cmd->cmnd[0] ==
3563 READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE;
3564 scb->cmd.basic_io.enhanced_sg = 0;
3565 scb->cmd.basic_io.sg_addr =
3566 cpu_to_le32(scb->data_busaddr);
3567 } else {
3568 scb->cmd.basic_io.op_code =
3569 (scb->scsi_cmd->cmnd[0] ==
3570 READ_6) ? IPS_CMD_READ_SG :
3571 IPS_CMD_WRITE_SG;
3572 scb->cmd.basic_io.enhanced_sg =
3573 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3574 scb->cmd.basic_io.sg_addr =
3575 cpu_to_le32(scb->sg_busaddr);
3576 }
3577
3578 scb->cmd.basic_io.segment_4G = 0;
3579 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3580 scb->cmd.basic_io.log_drv = scb->target_id;
3581 scb->cmd.basic_io.sg_count = scb->sg_len;
3582
3583 if (scb->cmd.basic_io.lba)
3584 le32_add_cpu(&scb->cmd.basic_io.lba,
3585 le16_to_cpu(scb->cmd.basic_io.
3586 sector_count));
3587 else
3588 scb->cmd.basic_io.lba =
3589 (((scb->scsi_cmd->
3590 cmnd[1] & 0x1f) << 16) | (scb->scsi_cmd->
3591 cmnd[2] << 8) |
3592 (scb->scsi_cmd->cmnd[3]));
3593
3594 scb->cmd.basic_io.sector_count =
3595 cpu_to_le16(scb->data_len / IPS_BLKSIZE);
3596
3597 if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0)
3598 scb->cmd.basic_io.sector_count =
3599 cpu_to_le16(256);
3600
3601 ret = IPS_SUCCESS;
3602 break;
3603
3604 case READ_10:
3605 case WRITE_10:
3606 if (!scb->sg_len) {
3607 scb->cmd.basic_io.op_code =
3608 (scb->scsi_cmd->cmnd[0] ==
3609 READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE;
3610 scb->cmd.basic_io.enhanced_sg = 0;
3611 scb->cmd.basic_io.sg_addr =
3612 cpu_to_le32(scb->data_busaddr);
3613 } else {
3614 scb->cmd.basic_io.op_code =
3615 (scb->scsi_cmd->cmnd[0] ==
3616 READ_10) ? IPS_CMD_READ_SG :
3617 IPS_CMD_WRITE_SG;
3618 scb->cmd.basic_io.enhanced_sg =
3619 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3620 scb->cmd.basic_io.sg_addr =
3621 cpu_to_le32(scb->sg_busaddr);
3622 }
3623
3624 scb->cmd.basic_io.segment_4G = 0;
3625 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3626 scb->cmd.basic_io.log_drv = scb->target_id;
3627 scb->cmd.basic_io.sg_count = scb->sg_len;
3628
3629 if (scb->cmd.basic_io.lba)
3630 le32_add_cpu(&scb->cmd.basic_io.lba,
3631 le16_to_cpu(scb->cmd.basic_io.
3632 sector_count));
3633 else
3634 scb->cmd.basic_io.lba =
3635 ((scb->scsi_cmd->cmnd[2] << 24) | (scb->
3636 scsi_cmd->
3637 cmnd[3]
3638 << 16) |
3639 (scb->scsi_cmd->cmnd[4] << 8) | scb->
3640 scsi_cmd->cmnd[5]);
3641
3642 scb->cmd.basic_io.sector_count =
3643 cpu_to_le16(scb->data_len / IPS_BLKSIZE);
3644
3645 if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) {
3646
3647
3648
3649
3650
3651 scb->scsi_cmd->result = DID_OK << 16;
3652 } else
3653 ret = IPS_SUCCESS;
3654
3655 break;
3656
3657 case RESERVE:
3658 case RELEASE:
3659 scb->scsi_cmd->result = DID_OK << 16;
3660 break;
3661
3662 case MODE_SENSE:
3663 scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
3664 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3665 scb->cmd.basic_io.segment_4G = 0;
3666 scb->cmd.basic_io.enhanced_sg = 0;
3667 scb->data_len = sizeof (*ha->enq);
3668 scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
3669 ret = IPS_SUCCESS;
3670 break;
3671
3672 case READ_CAPACITY:
3673 scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
3674 scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
3675 scb->cmd.logical_info.reserved = 0;
3676 scb->cmd.logical_info.reserved2 = 0;
3677 scb->cmd.logical_info.reserved3 = 0;
3678 scb->data_len = sizeof (IPS_LD_INFO);
3679 scb->data_busaddr = ha->logical_drive_info_dma_addr;
3680 scb->flags = 0;
3681 scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
3682 ret = IPS_SUCCESS;
3683 break;
3684
3685 case SEND_DIAGNOSTIC:
3686 case REASSIGN_BLOCKS:
3687 case FORMAT_UNIT:
3688 case SEEK_10:
3689 case VERIFY:
3690 case READ_DEFECT_DATA:
3691 case READ_BUFFER:
3692 case WRITE_BUFFER:
3693 scb->scsi_cmd->result = DID_OK << 16;
3694 break;
3695
3696 default:
3697
3698
3699
3700 sp = (char *) scb->scsi_cmd->sense_buffer;
3701
3702 sp[0] = 0x70;
3703 sp[2] = ILLEGAL_REQUEST;
3704 sp[7] = 0x0A;
3705 sp[12] = 0x20;
3706 sp[13] = 0x00;
3707
3708 device_error = 2;
3709 scb->scsi_cmd->result = device_error | (DID_OK << 16);
3710 break;
3711 }
3712 }
3713
3714 if (ret == IPS_SUCCESS_IMM)
3715 return (ret);
3716
3717
3718 if (scb->bus > 0) {
3719
3720
3721
3722 if (ha->conf->dev[scb->bus - 1][scb->target_id].ucState == 0) {
3723 scb->scsi_cmd->result = DID_NO_CONNECT << 16;
3724 return (IPS_SUCCESS_IMM);
3725 }
3726
3727 ha->dcdb_active[scb->bus - 1] |= (1 << scb->target_id);
3728 scb->cmd.dcdb.command_id = IPS_COMMAND_ID(ha, scb);
3729 scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
3730 (unsigned long) &scb->
3731 dcdb -
3732 (unsigned long) scb);
3733 scb->cmd.dcdb.reserved = 0;
3734 scb->cmd.dcdb.reserved2 = 0;
3735 scb->cmd.dcdb.reserved3 = 0;
3736 scb->cmd.dcdb.segment_4G = 0;
3737 scb->cmd.dcdb.enhanced_sg = 0;
3738
3739 TimeOut = scb->scsi_cmd->request->timeout;
3740
3741 if (ha->subsys->param[4] & 0x00100000) {
3742 if (!scb->sg_len) {
3743 scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB;
3744 } else {
3745 scb->cmd.dcdb.op_code =
3746 IPS_CMD_EXTENDED_DCDB_SG;
3747 scb->cmd.dcdb.enhanced_sg =
3748 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3749 }
3750
3751 tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3752 tapeDCDB->device_address =
3753 ((scb->bus - 1) << 4) | scb->target_id;
3754 tapeDCDB->cmd_attribute |= IPS_DISCONNECT_ALLOWED;
3755 tapeDCDB->cmd_attribute &= ~IPS_TRANSFER64K;
3756
3757 if (TimeOut) {
3758 if (TimeOut < (10 * HZ))
3759 tapeDCDB->cmd_attribute |= IPS_TIMEOUT10;
3760 else if (TimeOut < (60 * HZ))
3761 tapeDCDB->cmd_attribute |= IPS_TIMEOUT60;
3762 else if (TimeOut < (1200 * HZ))
3763 tapeDCDB->cmd_attribute |= IPS_TIMEOUT20M;
3764 }
3765
3766 tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len;
3767 tapeDCDB->reserved_for_LUN = 0;
3768 tapeDCDB->transfer_length = scb->data_len;
3769 if (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)
3770 tapeDCDB->buffer_pointer =
3771 cpu_to_le32(scb->sg_busaddr);
3772 else
3773 tapeDCDB->buffer_pointer =
3774 cpu_to_le32(scb->data_busaddr);
3775 tapeDCDB->sg_count = scb->sg_len;
3776 tapeDCDB->sense_length = sizeof (tapeDCDB->sense_info);
3777 tapeDCDB->scsi_status = 0;
3778 tapeDCDB->reserved = 0;
3779 memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd,
3780 scb->scsi_cmd->cmd_len);
3781 } else {
3782 if (!scb->sg_len) {
3783 scb->cmd.dcdb.op_code = IPS_CMD_DCDB;
3784 } else {
3785 scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG;
3786 scb->cmd.dcdb.enhanced_sg =
3787 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3788 }
3789
3790 scb->dcdb.device_address =
3791 ((scb->bus - 1) << 4) | scb->target_id;
3792 scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED;
3793
3794 if (TimeOut) {
3795 if (TimeOut < (10 * HZ))
3796 scb->dcdb.cmd_attribute |= IPS_TIMEOUT10;
3797 else if (TimeOut < (60 * HZ))
3798 scb->dcdb.cmd_attribute |= IPS_TIMEOUT60;
3799 else if (TimeOut < (1200 * HZ))
3800 scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M;
3801 }
3802
3803 scb->dcdb.transfer_length = scb->data_len;
3804 if (scb->dcdb.cmd_attribute & IPS_TRANSFER64K)
3805 scb->dcdb.transfer_length = 0;
3806 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB_SG)
3807 scb->dcdb.buffer_pointer =
3808 cpu_to_le32(scb->sg_busaddr);
3809 else
3810 scb->dcdb.buffer_pointer =
3811 cpu_to_le32(scb->data_busaddr);
3812 scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len;
3813 scb->dcdb.sense_length = sizeof (scb->dcdb.sense_info);
3814 scb->dcdb.sg_count = scb->sg_len;
3815 scb->dcdb.reserved = 0;
3816 memcpy(scb->dcdb.scsi_cdb, scb->scsi_cmd->cmnd,
3817 scb->scsi_cmd->cmd_len);
3818 scb->dcdb.scsi_status = 0;
3819 scb->dcdb.reserved2[0] = 0;
3820 scb->dcdb.reserved2[1] = 0;
3821 scb->dcdb.reserved2[2] = 0;
3822 }
3823 }
3824
3825 return ((*ha->func.issue) (ha, scb));
3826}
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837static void
3838ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
3839{
3840 ips_scb_t *scb;
3841 ips_stat_t *sp;
3842 uint8_t basic_status;
3843 uint8_t ext_status;
3844 int errcode;
3845 IPS_SCSI_INQ_DATA inquiryData;
3846
3847 METHOD_TRACE("ips_chkstatus", 1);
3848
3849 scb = &ha->scbs[pstatus->fields.command_id];
3850 scb->basic_status = basic_status =
3851 pstatus->fields.basic_status & IPS_BASIC_STATUS_MASK;
3852 scb->extended_status = ext_status = pstatus->fields.extended_status;
3853
3854 sp = &ha->sp;
3855 sp->residue_len = 0;
3856 sp->scb_addr = (void *) scb;
3857
3858
3859 ips_removeq_scb(&ha->scb_activelist, scb);
3860
3861 if (!scb->scsi_cmd)
3862
3863 return;
3864
3865 DEBUG_VAR(2, "(%s%d) ips_chkstatus: cmd 0x%X id %d (%d %d %d)",
3866 ips_name,
3867 ha->host_num,
3868 scb->cdb[0],
3869 scb->cmd.basic_io.command_id,
3870 scb->bus, scb->target_id, scb->lun);
3871
3872 if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd)))
3873
3874 return;
3875
3876 errcode = DID_OK;
3877
3878 if (((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_SUCCESS) ||
3879 ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR)) {
3880
3881 if (scb->bus == 0) {
3882 if ((basic_status & IPS_GSC_STATUS_MASK) ==
3883 IPS_CMD_RECOVERED_ERROR) {
3884 DEBUG_VAR(1,
3885 "(%s%d) Recovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
3886 ips_name, ha->host_num,
3887 scb->cmd.basic_io.op_code,
3888 basic_status, ext_status);
3889 }
3890
3891 switch (scb->scsi_cmd->cmnd[0]) {
3892 case ALLOW_MEDIUM_REMOVAL:
3893 case REZERO_UNIT:
3894 case ERASE:
3895 case WRITE_FILEMARKS:
3896 case SPACE:
3897 errcode = DID_ERROR;
3898 break;
3899
3900 case START_STOP:
3901 break;
3902
3903 case TEST_UNIT_READY:
3904 if (!ips_online(ha, scb)) {
3905 errcode = DID_TIME_OUT;
3906 }
3907 break;
3908
3909 case INQUIRY:
3910 if (ips_online(ha, scb)) {
3911 ips_inquiry(ha, scb);
3912 } else {
3913 errcode = DID_TIME_OUT;
3914 }
3915 break;
3916
3917 case REQUEST_SENSE:
3918 ips_reqsen(ha, scb);
3919 break;
3920
3921 case READ_6:
3922 case WRITE_6:
3923 case READ_10:
3924 case WRITE_10:
3925 case RESERVE:
3926 case RELEASE:
3927 break;
3928
3929 case MODE_SENSE:
3930 if (!ips_online(ha, scb)
3931 || !ips_msense(ha, scb)) {
3932 errcode = DID_ERROR;
3933 }
3934 break;
3935
3936 case READ_CAPACITY:
3937 if (ips_online(ha, scb))
3938 ips_rdcap(ha, scb);
3939 else {
3940 errcode = DID_TIME_OUT;
3941 }
3942 break;
3943
3944 case SEND_DIAGNOSTIC:
3945 case REASSIGN_BLOCKS:
3946 break;
3947
3948 case FORMAT_UNIT:
3949 errcode = DID_ERROR;
3950 break;
3951
3952 case SEEK_10:
3953 case VERIFY:
3954 case READ_DEFECT_DATA:
3955 case READ_BUFFER:
3956 case WRITE_BUFFER:
3957 break;
3958
3959 default:
3960 errcode = DID_ERROR;
3961 }
3962
3963 scb->scsi_cmd->result = errcode << 16;
3964 } else {
3965
3966 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3967 ips_scmd_buf_read(scb->scsi_cmd,
3968 &inquiryData, sizeof (inquiryData));
3969 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK)
3970 scb->scsi_cmd->result = DID_TIME_OUT << 16;
3971 }
3972 }
3973 } else {
3974 if (scb->bus == 0) {
3975 DEBUG_VAR(1,
3976 "(%s%d) Unrecovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
3977 ips_name, ha->host_num,
3978 scb->cmd.basic_io.op_code, basic_status,
3979 ext_status);
3980 }
3981
3982 ips_map_status(ha, scb, sp);
3983 }
3984}
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995static int
3996ips_online(ips_ha_t * ha, ips_scb_t * scb)
3997{
3998 METHOD_TRACE("ips_online", 1);
3999
4000 if (scb->target_id >= IPS_MAX_LD)
4001 return (0);
4002
4003 if ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1) {
4004 memset(ha->logical_drive_info, 0, sizeof (IPS_LD_INFO));
4005 return (0);
4006 }
4007
4008 if (ha->logical_drive_info->drive_info[scb->target_id].state !=
4009 IPS_LD_OFFLINE
4010 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4011 IPS_LD_FREE
4012 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4013 IPS_LD_CRS
4014 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4015 IPS_LD_SYS)
4016 return (1);
4017 else
4018 return (0);
4019}
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030static int
4031ips_inquiry(ips_ha_t * ha, ips_scb_t * scb)
4032{
4033 IPS_SCSI_INQ_DATA inquiry;
4034
4035 METHOD_TRACE("ips_inquiry", 1);
4036
4037 memset(&inquiry, 0, sizeof (IPS_SCSI_INQ_DATA));
4038
4039 inquiry.DeviceType = IPS_SCSI_INQ_TYPE_DASD;
4040 inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED;
4041 inquiry.Version = IPS_SCSI_INQ_REV2;
4042 inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2;
4043 inquiry.AdditionalLength = 31;
4044 inquiry.Flags[0] = IPS_SCSI_INQ_Address16;
4045 inquiry.Flags[1] =
4046 IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync | IPS_SCSI_INQ_CmdQue;
4047 strncpy(inquiry.VendorId, "IBM ", 8);
4048 strncpy(inquiry.ProductId, "SERVERAID ", 16);
4049 strncpy(inquiry.ProductRevisionLevel, "1.00", 4);
4050
4051 ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof (inquiry));
4052
4053 return (1);
4054}
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065static int
4066ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
4067{
4068 IPS_SCSI_CAPACITY cap;
4069
4070 METHOD_TRACE("ips_rdcap", 1);
4071
4072 if (scsi_bufflen(scb->scsi_cmd) < 8)
4073 return (0);
4074
4075 cap.lba =
4076 cpu_to_be32(le32_to_cpu
4077 (ha->logical_drive_info->
4078 drive_info[scb->target_id].sector_count) - 1);
4079 cap.len = cpu_to_be32((uint32_t) IPS_BLKSIZE);
4080
4081 ips_scmd_buf_write(scb->scsi_cmd, &cap, sizeof (cap));
4082
4083 return (1);
4084}
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095static int
4096ips_msense(ips_ha_t * ha, ips_scb_t * scb)
4097{
4098 uint16_t heads;
4099 uint16_t sectors;
4100 uint32_t cylinders;
4101 IPS_SCSI_MODE_PAGE_DATA mdata;
4102
4103 METHOD_TRACE("ips_msense", 1);
4104
4105 if (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) > 0x400000 &&
4106 (ha->enq->ucMiscFlag & 0x8) == 0) {
4107 heads = IPS_NORM_HEADS;
4108 sectors = IPS_NORM_SECTORS;
4109 } else {
4110 heads = IPS_COMP_HEADS;
4111 sectors = IPS_COMP_SECTORS;
4112 }
4113
4114 cylinders =
4115 (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) -
4116 1) / (heads * sectors);
4117
4118 memset(&mdata, 0, sizeof (IPS_SCSI_MODE_PAGE_DATA));
4119
4120 mdata.hdr.BlockDescLength = 8;
4121
4122 switch (scb->scsi_cmd->cmnd[2] & 0x3f) {
4123 case 0x03:
4124 mdata.pdata.pg3.PageCode = 3;
4125 mdata.pdata.pg3.PageLength = sizeof (IPS_SCSI_MODE_PAGE3);
4126 mdata.hdr.DataLength =
4127 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg3.PageLength;
4128 mdata.pdata.pg3.TracksPerZone = 0;
4129 mdata.pdata.pg3.AltSectorsPerZone = 0;
4130 mdata.pdata.pg3.AltTracksPerZone = 0;
4131 mdata.pdata.pg3.AltTracksPerVolume = 0;
4132 mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors);
4133 mdata.pdata.pg3.BytesPerSector = cpu_to_be16(IPS_BLKSIZE);
4134 mdata.pdata.pg3.Interleave = cpu_to_be16(1);
4135 mdata.pdata.pg3.TrackSkew = 0;
4136 mdata.pdata.pg3.CylinderSkew = 0;
4137 mdata.pdata.pg3.flags = IPS_SCSI_MP3_SoftSector;
4138 break;
4139
4140 case 0x4:
4141 mdata.pdata.pg4.PageCode = 4;
4142 mdata.pdata.pg4.PageLength = sizeof (IPS_SCSI_MODE_PAGE4);
4143 mdata.hdr.DataLength =
4144 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg4.PageLength;
4145 mdata.pdata.pg4.CylindersHigh =
4146 cpu_to_be16((cylinders >> 8) & 0xFFFF);
4147 mdata.pdata.pg4.CylindersLow = (cylinders & 0xFF);
4148 mdata.pdata.pg4.Heads = heads;
4149 mdata.pdata.pg4.WritePrecompHigh = 0;
4150 mdata.pdata.pg4.WritePrecompLow = 0;
4151 mdata.pdata.pg4.ReducedWriteCurrentHigh = 0;
4152 mdata.pdata.pg4.ReducedWriteCurrentLow = 0;
4153 mdata.pdata.pg4.StepRate = cpu_to_be16(1);
4154 mdata.pdata.pg4.LandingZoneHigh = 0;
4155 mdata.pdata.pg4.LandingZoneLow = 0;
4156 mdata.pdata.pg4.flags = 0;
4157 mdata.pdata.pg4.RotationalOffset = 0;
4158 mdata.pdata.pg4.MediumRotationRate = 0;
4159 break;
4160 case 0x8:
4161 mdata.pdata.pg8.PageCode = 8;
4162 mdata.pdata.pg8.PageLength = sizeof (IPS_SCSI_MODE_PAGE8);
4163 mdata.hdr.DataLength =
4164 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg8.PageLength;
4165
4166 break;
4167
4168 default:
4169 return (0);
4170 }
4171
4172 ips_scmd_buf_write(scb->scsi_cmd, &mdata, sizeof (mdata));
4173
4174 return (1);
4175}
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186static int
4187ips_reqsen(ips_ha_t * ha, ips_scb_t * scb)
4188{
4189 IPS_SCSI_REQSEN reqsen;
4190
4191 METHOD_TRACE("ips_reqsen", 1);
4192
4193 memset(&reqsen, 0, sizeof (IPS_SCSI_REQSEN));
4194
4195 reqsen.ResponseCode =
4196 IPS_SCSI_REQSEN_VALID | IPS_SCSI_REQSEN_CURRENT_ERR;
4197 reqsen.AdditionalLength = 10;
4198 reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE;
4199 reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE;
4200
4201 ips_scmd_buf_write(scb->scsi_cmd, &reqsen, sizeof (reqsen));
4202
4203 return (1);
4204}
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215static void
4216ips_free(ips_ha_t * ha)
4217{
4218
4219 METHOD_TRACE("ips_free", 1);
4220
4221 if (ha) {
4222 if (ha->enq) {
4223 pci_free_consistent(ha->pcidev, sizeof(IPS_ENQ),
4224 ha->enq, ha->enq_busaddr);
4225 ha->enq = NULL;
4226 }
4227
4228 kfree(ha->conf);
4229 ha->conf = NULL;
4230
4231 if (ha->adapt) {
4232 pci_free_consistent(ha->pcidev,
4233 sizeof (IPS_ADAPTER) +
4234 sizeof (IPS_IO_CMD), ha->adapt,
4235 ha->adapt->hw_status_start);
4236 ha->adapt = NULL;
4237 }
4238
4239 if (ha->logical_drive_info) {
4240 pci_free_consistent(ha->pcidev,
4241 sizeof (IPS_LD_INFO),
4242 ha->logical_drive_info,
4243 ha->logical_drive_info_dma_addr);
4244 ha->logical_drive_info = NULL;
4245 }
4246
4247 kfree(ha->nvram);
4248 ha->nvram = NULL;
4249
4250 kfree(ha->subsys);
4251 ha->subsys = NULL;
4252
4253 if (ha->ioctl_data) {
4254 pci_free_consistent(ha->pcidev, ha->ioctl_len,
4255 ha->ioctl_data, ha->ioctl_busaddr);
4256 ha->ioctl_data = NULL;
4257 ha->ioctl_datasize = 0;
4258 ha->ioctl_len = 0;
4259 }
4260 ips_deallocatescbs(ha, ha->max_cmds);
4261
4262
4263 if (ha->mem_ptr) {
4264 iounmap(ha->ioremap_ptr);
4265 ha->ioremap_ptr = NULL;
4266 ha->mem_ptr = NULL;
4267 }
4268
4269 ha->mem_addr = 0;
4270
4271 }
4272}
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283static int
4284ips_deallocatescbs(ips_ha_t * ha, int cmds)
4285{
4286 if (ha->scbs) {
4287 pci_free_consistent(ha->pcidev,
4288 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
4289 ha->scbs->sg_list.list,
4290 ha->scbs->sg_busaddr);
4291 pci_free_consistent(ha->pcidev, sizeof (ips_scb_t) * cmds,
4292 ha->scbs, ha->scbs->scb_busaddr);
4293 ha->scbs = NULL;
4294 }
4295 return 1;
4296}
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307static int
4308ips_allocatescbs(ips_ha_t * ha)
4309{
4310 ips_scb_t *scb_p;
4311 IPS_SG_LIST ips_sg;
4312 int i;
4313 dma_addr_t command_dma, sg_dma;
4314
4315 METHOD_TRACE("ips_allocatescbs", 1);
4316
4317
4318 ha->scbs =
4319 pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof (ips_scb_t),
4320 &command_dma);
4321 if (ha->scbs == NULL)
4322 return 0;
4323 ips_sg.list =
4324 pci_alloc_consistent(ha->pcidev,
4325 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG *
4326 ha->max_cmds, &sg_dma);
4327 if (ips_sg.list == NULL) {
4328 pci_free_consistent(ha->pcidev,
4329 ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
4330 command_dma);
4331 return 0;
4332 }
4333
4334 memset(ha->scbs, 0, ha->max_cmds * sizeof (ips_scb_t));
4335
4336 for (i = 0; i < ha->max_cmds; i++) {
4337 scb_p = &ha->scbs[i];
4338 scb_p->scb_busaddr = command_dma + sizeof (ips_scb_t) * i;
4339
4340 if (IPS_USE_ENH_SGLIST(ha)) {
4341 scb_p->sg_list.enh_list =
4342 ips_sg.enh_list + i * IPS_MAX_SG;
4343 scb_p->sg_busaddr =
4344 sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
4345 } else {
4346 scb_p->sg_list.std_list =
4347 ips_sg.std_list + i * IPS_MAX_SG;
4348 scb_p->sg_busaddr =
4349 sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
4350 }
4351
4352
4353 if (i < ha->max_cmds - 1) {
4354 scb_p->q_next = ha->scb_freelist;
4355 ha->scb_freelist = scb_p;
4356 }
4357 }
4358
4359
4360 return (1);
4361}
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372static void
4373ips_init_scb(ips_ha_t * ha, ips_scb_t * scb)
4374{
4375 IPS_SG_LIST sg_list;
4376 uint32_t cmd_busaddr, sg_busaddr;
4377 METHOD_TRACE("ips_init_scb", 1);
4378
4379 if (scb == NULL)
4380 return;
4381
4382 sg_list.list = scb->sg_list.list;
4383 cmd_busaddr = scb->scb_busaddr;
4384 sg_busaddr = scb->sg_busaddr;
4385
4386 memset(scb, 0, sizeof (ips_scb_t));
4387 memset(ha->dummy, 0, sizeof (IPS_IO_CMD));
4388
4389
4390 ha->dummy->op_code = 0xFF;
4391 ha->dummy->ccsar = cpu_to_le32(ha->adapt->hw_status_start
4392 + sizeof (IPS_ADAPTER));
4393 ha->dummy->command_id = IPS_MAX_CMDS;
4394
4395
4396 scb->scb_busaddr = cmd_busaddr;
4397 scb->sg_busaddr = sg_busaddr;
4398 scb->sg_list.list = sg_list.list;
4399
4400
4401 scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE);
4402 scb->cmd.basic_io.ccsar = cpu_to_le32(ha->adapt->hw_status_start
4403 + sizeof (IPS_ADAPTER));
4404}
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417static ips_scb_t *
4418ips_getscb(ips_ha_t * ha)
4419{
4420 ips_scb_t *scb;
4421
4422 METHOD_TRACE("ips_getscb", 1);
4423
4424 if ((scb = ha->scb_freelist) == NULL) {
4425
4426 return (NULL);
4427 }
4428
4429 ha->scb_freelist = scb->q_next;
4430 scb->flags = 0;
4431 scb->q_next = NULL;
4432
4433 ips_init_scb(ha, scb);
4434
4435 return (scb);
4436}
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449static void
4450ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
4451{
4452
4453 METHOD_TRACE("ips_freescb", 1);
4454 if (scb->flags & IPS_SCB_MAP_SG)
4455 scsi_dma_unmap(scb->scsi_cmd);
4456 else if (scb->flags & IPS_SCB_MAP_SINGLE)
4457 pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
4458 IPS_DMA_DIR(scb));
4459
4460
4461 if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
4462 scb->q_next = ha->scb_freelist;
4463 ha->scb_freelist = scb;
4464 }
4465}
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476static int
4477ips_isinit_copperhead(ips_ha_t * ha)
4478{
4479 uint8_t scpr;
4480 uint8_t isr;
4481
4482 METHOD_TRACE("ips_isinit_copperhead", 1);
4483
4484 isr = inb(ha->io_addr + IPS_REG_HISR);
4485 scpr = inb(ha->io_addr + IPS_REG_SCPR);
4486
4487 if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
4488 return (0);
4489 else
4490 return (1);
4491}
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502static int
4503ips_isinit_copperhead_memio(ips_ha_t * ha)
4504{
4505 uint8_t isr = 0;
4506 uint8_t scpr;
4507
4508 METHOD_TRACE("ips_is_init_copperhead_memio", 1);
4509
4510 isr = readb(ha->mem_ptr + IPS_REG_HISR);
4511 scpr = readb(ha->mem_ptr + IPS_REG_SCPR);
4512
4513 if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
4514 return (0);
4515 else
4516 return (1);
4517}
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528static int
4529ips_isinit_morpheus(ips_ha_t * ha)
4530{
4531 uint32_t post;
4532 uint32_t bits;
4533
4534 METHOD_TRACE("ips_is_init_morpheus", 1);
4535
4536 if (ips_isintr_morpheus(ha))
4537 ips_flush_and_reset(ha);
4538
4539 post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4540 bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4541
4542 if (post == 0)
4543 return (0);
4544 else if (bits & 0x3)
4545 return (0);
4546 else
4547 return (1);
4548}
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560static void
4561ips_flush_and_reset(ips_ha_t *ha)
4562{
4563 ips_scb_t *scb;
4564 int ret;
4565 int time;
4566 int done;
4567 dma_addr_t command_dma;
4568
4569
4570 scb = pci_alloc_consistent(ha->pcidev, sizeof(ips_scb_t), &command_dma);
4571 if (scb) {
4572 memset(scb, 0, sizeof(ips_scb_t));
4573 ips_init_scb(ha, scb);
4574 scb->scb_busaddr = command_dma;
4575
4576 scb->timeout = ips_cmd_timeout;
4577 scb->cdb[0] = IPS_CMD_FLUSH;
4578
4579 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
4580 scb->cmd.flush_cache.command_id = IPS_MAX_CMDS;
4581 scb->cmd.flush_cache.state = IPS_NORM_STATE;
4582 scb->cmd.flush_cache.reserved = 0;
4583 scb->cmd.flush_cache.reserved2 = 0;
4584 scb->cmd.flush_cache.reserved3 = 0;
4585 scb->cmd.flush_cache.reserved4 = 0;
4586
4587 ret = ips_send_cmd(ha, scb);
4588
4589 if (ret == IPS_SUCCESS) {
4590 time = 60 * IPS_ONE_SEC;
4591 done = 0;
4592
4593 while ((time > 0) && (!done)) {
4594 done = ips_poll_for_flush_complete(ha);
4595
4596 udelay(1000);
4597 time--;
4598 }
4599 }
4600 }
4601
4602
4603 (*ha->func.reset) (ha);
4604
4605 pci_free_consistent(ha->pcidev, sizeof(ips_scb_t), scb, command_dma);
4606 return;
4607}
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619static int
4620ips_poll_for_flush_complete(ips_ha_t * ha)
4621{
4622 IPS_STATUS cstatus;
4623
4624 while (TRUE) {
4625 cstatus.value = (*ha->func.statupd) (ha);
4626
4627 if (cstatus.value == 0xffffffff)
4628 break;
4629
4630
4631 if (cstatus.fields.command_id == IPS_MAX_CMDS)
4632 return 1;
4633 }
4634
4635 return 0;
4636}
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646static void
4647ips_enable_int_copperhead(ips_ha_t * ha)
4648{
4649 METHOD_TRACE("ips_enable_int_copperhead", 1);
4650
4651 outb(ha->io_addr + IPS_REG_HISR, IPS_BIT_EI);
4652 inb(ha->io_addr + IPS_REG_HISR);
4653}
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663static void
4664ips_enable_int_copperhead_memio(ips_ha_t * ha)
4665{
4666 METHOD_TRACE("ips_enable_int_copperhead_memio", 1);
4667
4668 writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
4669 readb(ha->mem_ptr + IPS_REG_HISR);
4670}
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680static void
4681ips_enable_int_morpheus(ips_ha_t * ha)
4682{
4683 uint32_t Oimr;
4684
4685 METHOD_TRACE("ips_enable_int_morpheus", 1);
4686
4687 Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4688 Oimr &= ~0x08;
4689 writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
4690 readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4691}
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702static int
4703ips_init_copperhead(ips_ha_t * ha)
4704{
4705 uint8_t Isr;
4706 uint8_t Cbsp;
4707 uint8_t PostByte[IPS_MAX_POST_BYTES];
4708 uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES];
4709 int i, j;
4710
4711 METHOD_TRACE("ips_init_copperhead", 1);
4712
4713 for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
4714 for (j = 0; j < 45; j++) {
4715 Isr = inb(ha->io_addr + IPS_REG_HISR);
4716 if (Isr & IPS_BIT_GHI)
4717 break;
4718
4719
4720 MDELAY(IPS_ONE_SEC);
4721 }
4722
4723 if (j >= 45)
4724
4725 return (0);
4726
4727 PostByte[i] = inb(ha->io_addr + IPS_REG_ISPR);
4728 outb(Isr, ha->io_addr + IPS_REG_HISR);
4729 }
4730
4731 if (PostByte[0] < IPS_GOOD_POST_STATUS) {
4732 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4733 "reset controller fails (post status %x %x).\n",
4734 PostByte[0], PostByte[1]);
4735
4736 return (0);
4737 }
4738
4739 for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
4740 for (j = 0; j < 240; j++) {
4741 Isr = inb(ha->io_addr + IPS_REG_HISR);
4742 if (Isr & IPS_BIT_GHI)
4743 break;
4744
4745
4746 MDELAY(IPS_ONE_SEC);
4747 }
4748
4749 if (j >= 240)
4750
4751 return (0);
4752
4753 ConfigByte[i] = inb(ha->io_addr + IPS_REG_ISPR);
4754 outb(Isr, ha->io_addr + IPS_REG_HISR);
4755 }
4756
4757 for (i = 0; i < 240; i++) {
4758 Cbsp = inb(ha->io_addr + IPS_REG_CBSP);
4759
4760 if ((Cbsp & IPS_BIT_OP) == 0)
4761 break;
4762
4763
4764 MDELAY(IPS_ONE_SEC);
4765 }
4766
4767 if (i >= 240)
4768
4769 return (0);
4770
4771
4772 outl(0x1010, ha->io_addr + IPS_REG_CCCR);
4773
4774
4775 outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR);
4776
4777 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
4778
4779 outl(0, ha->io_addr + IPS_REG_NDAE);
4780
4781
4782 outb(IPS_BIT_EI, ha->io_addr + IPS_REG_HISR);
4783
4784 return (1);
4785}
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796static int
4797ips_init_copperhead_memio(ips_ha_t * ha)
4798{
4799 uint8_t Isr = 0;
4800 uint8_t Cbsp;
4801 uint8_t PostByte[IPS_MAX_POST_BYTES];
4802 uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES];
4803 int i, j;
4804
4805 METHOD_TRACE("ips_init_copperhead_memio", 1);
4806
4807 for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
4808 for (j = 0; j < 45; j++) {
4809 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
4810 if (Isr & IPS_BIT_GHI)
4811 break;
4812
4813
4814 MDELAY(IPS_ONE_SEC);
4815 }
4816
4817 if (j >= 45)
4818
4819 return (0);
4820
4821 PostByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR);
4822 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
4823 }
4824
4825 if (PostByte[0] < IPS_GOOD_POST_STATUS) {
4826 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4827 "reset controller fails (post status %x %x).\n",
4828 PostByte[0], PostByte[1]);
4829
4830 return (0);
4831 }
4832
4833 for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
4834 for (j = 0; j < 240; j++) {
4835 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
4836 if (Isr & IPS_BIT_GHI)
4837 break;
4838
4839
4840 MDELAY(IPS_ONE_SEC);
4841 }
4842
4843 if (j >= 240)
4844
4845 return (0);
4846
4847 ConfigByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR);
4848 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
4849 }
4850
4851 for (i = 0; i < 240; i++) {
4852 Cbsp = readb(ha->mem_ptr + IPS_REG_CBSP);
4853
4854 if ((Cbsp & IPS_BIT_OP) == 0)
4855 break;
4856
4857
4858 MDELAY(IPS_ONE_SEC);
4859 }
4860
4861 if (i >= 240)
4862
4863 return (0);
4864
4865
4866 writel(0x1010, ha->mem_ptr + IPS_REG_CCCR);
4867
4868
4869 writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR);
4870
4871 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
4872
4873 writel(0, ha->mem_ptr + IPS_REG_NDAE);
4874
4875
4876 writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
4877
4878
4879 return (1);
4880}
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891static int
4892ips_init_morpheus(ips_ha_t * ha)
4893{
4894 uint32_t Post;
4895 uint32_t Config;
4896 uint32_t Isr;
4897 uint32_t Oimr;
4898 int i;
4899
4900 METHOD_TRACE("ips_init_morpheus", 1);
4901
4902
4903 for (i = 0; i < 45; i++) {
4904 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4905
4906 if (Isr & IPS_BIT_I960_MSG0I)
4907 break;
4908
4909
4910 MDELAY(IPS_ONE_SEC);
4911 }
4912
4913 if (i >= 45) {
4914
4915 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4916 "timeout waiting for post.\n");
4917
4918 return (0);
4919 }
4920
4921 Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4922
4923 if (Post == 0x4F00) {
4924 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4925 "Flashing Battery PIC, Please wait ...\n");
4926
4927
4928 Isr = (uint32_t) IPS_BIT_I960_MSG0I;
4929 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4930
4931 for (i = 0; i < 120; i++) {
4932 Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4933 if (Post != 0x4F00)
4934 break;
4935
4936 MDELAY(IPS_ONE_SEC);
4937 }
4938
4939 if (i >= 120) {
4940 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4941 "timeout waiting for Battery PIC Flash\n");
4942 return (0);
4943 }
4944
4945 }
4946
4947
4948 Isr = (uint32_t) IPS_BIT_I960_MSG0I;
4949 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4950
4951 if (Post < (IPS_GOOD_POST_STATUS << 8)) {
4952 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4953 "reset controller fails (post status %x).\n", Post);
4954
4955 return (0);
4956 }
4957
4958
4959 for (i = 0; i < 240; i++) {
4960 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4961
4962 if (Isr & IPS_BIT_I960_MSG1I)
4963 break;
4964
4965
4966 MDELAY(IPS_ONE_SEC);
4967 }
4968
4969 if (i >= 240) {
4970
4971 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4972 "timeout waiting for config.\n");
4973
4974 return (0);
4975 }
4976
4977 Config = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
4978
4979
4980 Isr = (uint32_t) IPS_BIT_I960_MSG1I;
4981 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4982
4983
4984 Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4985 Oimr &= ~0x8;
4986 writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
4987
4988
4989
4990
4991 if (Post == 0xEF10) {
4992 if ((Config == 0x000F) || (Config == 0x0009))
4993 ha->requires_esl = 1;
4994 }
4995
4996 return (1);
4997}
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008static int
5009ips_reset_copperhead(ips_ha_t * ha)
5010{
5011 int reset_counter;
5012
5013 METHOD_TRACE("ips_reset_copperhead", 1);
5014
5015 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d",
5016 ips_name, ha->host_num, ha->io_addr, ha->pcidev->irq);
5017
5018 reset_counter = 0;
5019
5020 while (reset_counter < 2) {
5021 reset_counter++;
5022
5023 outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
5024
5025
5026 MDELAY(IPS_ONE_SEC);
5027
5028 outb(0, ha->io_addr + IPS_REG_SCPR);
5029
5030
5031 MDELAY(IPS_ONE_SEC);
5032
5033 if ((*ha->func.init) (ha))
5034 break;
5035 else if (reset_counter >= 2) {
5036
5037 return (0);
5038 }
5039 }
5040
5041 return (1);
5042}
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053static int
5054ips_reset_copperhead_memio(ips_ha_t * ha)
5055{
5056 int reset_counter;
5057
5058 METHOD_TRACE("ips_reset_copperhead_memio", 1);
5059
5060 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d",
5061 ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
5062
5063 reset_counter = 0;
5064
5065 while (reset_counter < 2) {
5066 reset_counter++;
5067
5068 writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
5069
5070
5071 MDELAY(IPS_ONE_SEC);
5072
5073 writeb(0, ha->mem_ptr + IPS_REG_SCPR);
5074
5075
5076 MDELAY(IPS_ONE_SEC);
5077
5078 if ((*ha->func.init) (ha))
5079 break;
5080 else if (reset_counter >= 2) {
5081
5082 return (0);
5083 }
5084 }
5085
5086 return (1);
5087}
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098static int
5099ips_reset_morpheus(ips_ha_t * ha)
5100{
5101 int reset_counter;
5102 uint8_t junk;
5103
5104 METHOD_TRACE("ips_reset_morpheus", 1);
5105
5106 DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d",
5107 ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
5108
5109 reset_counter = 0;
5110
5111 while (reset_counter < 2) {
5112 reset_counter++;
5113
5114 writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
5115
5116
5117 MDELAY(5 * IPS_ONE_SEC);
5118
5119
5120 pci_read_config_byte(ha->pcidev, 4, &junk);
5121
5122 if ((*ha->func.init) (ha))
5123 break;
5124 else if (reset_counter >= 2) {
5125
5126 return (0);
5127 }
5128 }
5129
5130 return (1);
5131}
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142static void
5143ips_statinit(ips_ha_t * ha)
5144{
5145 uint32_t phys_status_start;
5146
5147 METHOD_TRACE("ips_statinit", 1);
5148
5149 ha->adapt->p_status_start = ha->adapt->status;
5150 ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
5151 ha->adapt->p_status_tail = ha->adapt->status;
5152
5153 phys_status_start = ha->adapt->hw_status_start;
5154 outl(phys_status_start, ha->io_addr + IPS_REG_SQSR);
5155 outl(phys_status_start + IPS_STATUS_Q_SIZE,
5156 ha->io_addr + IPS_REG_SQER);
5157 outl(phys_status_start + IPS_STATUS_SIZE,
5158 ha->io_addr + IPS_REG_SQHR);
5159 outl(phys_status_start, ha->io_addr + IPS_REG_SQTR);
5160
5161 ha->adapt->hw_status_tail = phys_status_start;
5162}
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173static void
5174ips_statinit_memio(ips_ha_t * ha)
5175{
5176 uint32_t phys_status_start;
5177
5178 METHOD_TRACE("ips_statinit_memio", 1);
5179
5180 ha->adapt->p_status_start = ha->adapt->status;
5181 ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
5182 ha->adapt->p_status_tail = ha->adapt->status;
5183
5184 phys_status_start = ha->adapt->hw_status_start;
5185 writel(phys_status_start, ha->mem_ptr + IPS_REG_SQSR);
5186 writel(phys_status_start + IPS_STATUS_Q_SIZE,
5187 ha->mem_ptr + IPS_REG_SQER);
5188 writel(phys_status_start + IPS_STATUS_SIZE, ha->mem_ptr + IPS_REG_SQHR);
5189 writel(phys_status_start, ha->mem_ptr + IPS_REG_SQTR);
5190
5191 ha->adapt->hw_status_tail = phys_status_start;
5192}
5193
5194
5195
5196
5197
5198
5199
5200
5201
5202
5203static uint32_t
5204ips_statupd_copperhead(ips_ha_t * ha)
5205{
5206 METHOD_TRACE("ips_statupd_copperhead", 1);
5207
5208 if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
5209 ha->adapt->p_status_tail++;
5210 ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
5211 } else {
5212 ha->adapt->p_status_tail = ha->adapt->p_status_start;
5213 ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
5214 }
5215
5216 outl(ha->adapt->hw_status_tail,
5217 ha->io_addr + IPS_REG_SQTR);
5218
5219 return (ha->adapt->p_status_tail->value);
5220}
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231static uint32_t
5232ips_statupd_copperhead_memio(ips_ha_t * ha)
5233{
5234 METHOD_TRACE("ips_statupd_copperhead_memio", 1);
5235
5236 if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
5237 ha->adapt->p_status_tail++;
5238 ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
5239 } else {
5240 ha->adapt->p_status_tail = ha->adapt->p_status_start;
5241 ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
5242 }
5243
5244 writel(ha->adapt->hw_status_tail, ha->mem_ptr + IPS_REG_SQTR);
5245
5246 return (ha->adapt->p_status_tail->value);
5247}
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258static uint32_t
5259ips_statupd_morpheus(ips_ha_t * ha)
5260{
5261 uint32_t val;
5262
5263 METHOD_TRACE("ips_statupd_morpheus", 1);
5264
5265 val = readl(ha->mem_ptr + IPS_REG_I2O_OUTMSGQ);
5266
5267 return (val);
5268}
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279static int
5280ips_issue_copperhead(ips_ha_t * ha, ips_scb_t * scb)
5281{
5282 uint32_t TimeOut;
5283 uint32_t val;
5284
5285 METHOD_TRACE("ips_issue_copperhead", 1);
5286
5287 if (scb->scsi_cmd) {
5288 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5289 ips_name,
5290 ha->host_num,
5291 scb->cdb[0],
5292 scb->cmd.basic_io.command_id,
5293 scb->bus, scb->target_id, scb->lun);
5294 } else {
5295 DEBUG_VAR(2, KERN_NOTICE "(%s%d) ips_issue: logical cmd id %d",
5296 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5297 }
5298
5299 TimeOut = 0;
5300
5301 while ((val =
5302 le32_to_cpu(inl(ha->io_addr + IPS_REG_CCCR))) & IPS_BIT_SEM) {
5303 udelay(1000);
5304
5305 if (++TimeOut >= IPS_SEM_TIMEOUT) {
5306 if (!(val & IPS_BIT_START_STOP))
5307 break;
5308
5309 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5310 "ips_issue val [0x%x].\n", val);
5311 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5312 "ips_issue semaphore chk timeout.\n");
5313
5314 return (IPS_FAILURE);
5315 }
5316 }
5317
5318 outl(scb->scb_busaddr, ha->io_addr + IPS_REG_CCSAR);
5319 outw(IPS_BIT_START_CMD, ha->io_addr + IPS_REG_CCCR);
5320
5321 return (IPS_SUCCESS);
5322}
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333static int
5334ips_issue_copperhead_memio(ips_ha_t * ha, ips_scb_t * scb)
5335{
5336 uint32_t TimeOut;
5337 uint32_t val;
5338
5339 METHOD_TRACE("ips_issue_copperhead_memio", 1);
5340
5341 if (scb->scsi_cmd) {
5342 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5343 ips_name,
5344 ha->host_num,
5345 scb->cdb[0],
5346 scb->cmd.basic_io.command_id,
5347 scb->bus, scb->target_id, scb->lun);
5348 } else {
5349 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5350 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5351 }
5352
5353 TimeOut = 0;
5354
5355 while ((val = readl(ha->mem_ptr + IPS_REG_CCCR)) & IPS_BIT_SEM) {
5356 udelay(1000);
5357
5358 if (++TimeOut >= IPS_SEM_TIMEOUT) {
5359 if (!(val & IPS_BIT_START_STOP))
5360 break;
5361
5362 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5363 "ips_issue val [0x%x].\n", val);
5364 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5365 "ips_issue semaphore chk timeout.\n");
5366
5367 return (IPS_FAILURE);
5368 }
5369 }
5370
5371 writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_CCSAR);
5372 writel(IPS_BIT_START_CMD, ha->mem_ptr + IPS_REG_CCCR);
5373
5374 return (IPS_SUCCESS);
5375}
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386static int
5387ips_issue_i2o(ips_ha_t * ha, ips_scb_t * scb)
5388{
5389
5390 METHOD_TRACE("ips_issue_i2o", 1);
5391
5392 if (scb->scsi_cmd) {
5393 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5394 ips_name,
5395 ha->host_num,
5396 scb->cdb[0],
5397 scb->cmd.basic_io.command_id,
5398 scb->bus, scb->target_id, scb->lun);
5399 } else {
5400 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5401 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5402 }
5403
5404 outl(scb->scb_busaddr, ha->io_addr + IPS_REG_I2O_INMSGQ);
5405
5406 return (IPS_SUCCESS);
5407}
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418static int
5419ips_issue_i2o_memio(ips_ha_t * ha, ips_scb_t * scb)
5420{
5421
5422 METHOD_TRACE("ips_issue_i2o_memio", 1);
5423
5424 if (scb->scsi_cmd) {
5425 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5426 ips_name,
5427 ha->host_num,
5428 scb->cdb[0],
5429 scb->cmd.basic_io.command_id,
5430 scb->bus, scb->target_id, scb->lun);
5431 } else {
5432 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5433 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5434 }
5435
5436 writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_I2O_INMSGQ);
5437
5438 return (IPS_SUCCESS);
5439}
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450static int
5451ips_isintr_copperhead(ips_ha_t * ha)
5452{
5453 uint8_t Isr;
5454
5455 METHOD_TRACE("ips_isintr_copperhead", 2);
5456
5457 Isr = inb(ha->io_addr + IPS_REG_HISR);
5458
5459 if (Isr == 0xFF)
5460
5461 return (0);
5462
5463 if (Isr & IPS_BIT_SCE)
5464 return (1);
5465 else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
5466
5467
5468 outb(Isr, ha->io_addr + IPS_REG_HISR);
5469 }
5470
5471 return (0);
5472}
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483static int
5484ips_isintr_copperhead_memio(ips_ha_t * ha)
5485{
5486 uint8_t Isr;
5487
5488 METHOD_TRACE("ips_isintr_memio", 2);
5489
5490 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
5491
5492 if (Isr == 0xFF)
5493
5494 return (0);
5495
5496 if (Isr & IPS_BIT_SCE)
5497 return (1);
5498 else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
5499
5500
5501 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
5502 }
5503
5504 return (0);
5505}
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516static int
5517ips_isintr_morpheus(ips_ha_t * ha)
5518{
5519 uint32_t Isr;
5520
5521 METHOD_TRACE("ips_isintr_morpheus", 2);
5522
5523 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
5524
5525 if (Isr & IPS_BIT_I2O_OPQI)
5526 return (1);
5527 else
5528 return (0);
5529}
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540static int
5541ips_wait(ips_ha_t * ha, int time, int intr)
5542{
5543 int ret;
5544 int done;
5545
5546 METHOD_TRACE("ips_wait", 1);
5547
5548 ret = IPS_FAILURE;
5549 done = FALSE;
5550
5551 time *= IPS_ONE_SEC;
5552
5553 while ((time > 0) && (!done)) {
5554 if (intr == IPS_INTR_ON) {
5555 if (ha->waitflag == FALSE) {
5556 ret = IPS_SUCCESS;
5557 done = TRUE;
5558 break;
5559 }
5560 } else if (intr == IPS_INTR_IORL) {
5561 if (ha->waitflag == FALSE) {
5562
5563
5564
5565
5566
5567 ret = IPS_SUCCESS;
5568 done = TRUE;
5569 break;
5570 }
5571
5572
5573
5574
5575
5576
5577
5578 (*ha->func.intr) (ha);
5579 }
5580
5581
5582 udelay(1000);
5583 time--;
5584 }
5585
5586 return (ret);
5587}
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598static int
5599ips_write_driver_status(ips_ha_t * ha, int intr)
5600{
5601 METHOD_TRACE("ips_write_driver_status", 1);
5602
5603 if (!ips_readwrite_page5(ha, FALSE, intr)) {
5604 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5605 "unable to read NVRAM page 5.\n");
5606
5607 return (0);
5608 }
5609
5610
5611
5612 if (le32_to_cpu(ha->nvram->signature) != IPS_NVRAM_P5_SIG) {
5613 DEBUG_VAR(1,
5614 "(%s%d) NVRAM page 5 has an invalid signature: %X.",
5615 ips_name, ha->host_num, ha->nvram->signature);
5616 ha->nvram->signature = IPS_NVRAM_P5_SIG;
5617 }
5618
5619 DEBUG_VAR(2,
5620 "(%s%d) Ad Type: %d, Ad Slot: %d, BIOS: %c%c%c%c %c%c%c%c.",
5621 ips_name, ha->host_num, le16_to_cpu(ha->nvram->adapter_type),
5622 ha->nvram->adapter_slot, ha->nvram->bios_high[0],
5623 ha->nvram->bios_high[1], ha->nvram->bios_high[2],
5624 ha->nvram->bios_high[3], ha->nvram->bios_low[0],
5625 ha->nvram->bios_low[1], ha->nvram->bios_low[2],
5626 ha->nvram->bios_low[3]);
5627
5628 ips_get_bios_version(ha, intr);
5629
5630
5631 ha->nvram->operating_system = IPS_OS_LINUX;
5632 ha->nvram->adapter_type = ha->ad_type;
5633 strncpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4);
5634 strncpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4);
5635 strncpy((char *) ha->nvram->bios_high, ha->bios_version, 4);
5636 strncpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4);
5637
5638 ha->nvram->versioning = 0;
5639
5640
5641 if (!ips_readwrite_page5(ha, TRUE, intr)) {
5642 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5643 "unable to write NVRAM page 5.\n");
5644
5645 return (0);
5646 }
5647
5648
5649 ha->slot_num = ha->nvram->adapter_slot;
5650
5651 return (1);
5652}
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663static int
5664ips_read_adapter_status(ips_ha_t * ha, int intr)
5665{
5666 ips_scb_t *scb;
5667 int ret;
5668
5669 METHOD_TRACE("ips_read_adapter_status", 1);
5670
5671 scb = &ha->scbs[ha->max_cmds - 1];
5672
5673 ips_init_scb(ha, scb);
5674
5675 scb->timeout = ips_cmd_timeout;
5676 scb->cdb[0] = IPS_CMD_ENQUIRY;
5677
5678 scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
5679 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5680 scb->cmd.basic_io.sg_count = 0;
5681 scb->cmd.basic_io.lba = 0;
5682 scb->cmd.basic_io.sector_count = 0;
5683 scb->cmd.basic_io.log_drv = 0;
5684 scb->data_len = sizeof (*ha->enq);
5685 scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
5686
5687
5688 if (((ret =
5689 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5690 || (ret == IPS_SUCCESS_IMM)
5691 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5692 return (0);
5693
5694 return (1);
5695}
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706static int
5707ips_read_subsystem_parameters(ips_ha_t * ha, int intr)
5708{
5709 ips_scb_t *scb;
5710 int ret;
5711
5712 METHOD_TRACE("ips_read_subsystem_parameters", 1);
5713
5714 scb = &ha->scbs[ha->max_cmds - 1];
5715
5716 ips_init_scb(ha, scb);
5717
5718 scb->timeout = ips_cmd_timeout;
5719 scb->cdb[0] = IPS_CMD_GET_SUBSYS;
5720
5721 scb->cmd.basic_io.op_code = IPS_CMD_GET_SUBSYS;
5722 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5723 scb->cmd.basic_io.sg_count = 0;
5724 scb->cmd.basic_io.lba = 0;
5725 scb->cmd.basic_io.sector_count = 0;
5726 scb->cmd.basic_io.log_drv = 0;
5727 scb->data_len = sizeof (*ha->subsys);
5728 scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
5729
5730
5731 if (((ret =
5732 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5733 || (ret == IPS_SUCCESS_IMM)
5734 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5735 return (0);
5736
5737 memcpy(ha->subsys, ha->ioctl_data, sizeof(*ha->subsys));
5738 return (1);
5739}
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750static int
5751ips_read_config(ips_ha_t * ha, int intr)
5752{
5753 ips_scb_t *scb;
5754 int i;
5755 int ret;
5756
5757 METHOD_TRACE("ips_read_config", 1);
5758
5759
5760 for (i = 0; i < 4; i++)
5761 ha->conf->init_id[i] = 7;
5762
5763 scb = &ha->scbs[ha->max_cmds - 1];
5764
5765 ips_init_scb(ha, scb);
5766
5767 scb->timeout = ips_cmd_timeout;
5768 scb->cdb[0] = IPS_CMD_READ_CONF;
5769
5770 scb->cmd.basic_io.op_code = IPS_CMD_READ_CONF;
5771 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5772 scb->data_len = sizeof (*ha->conf);
5773 scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
5774
5775
5776 if (((ret =
5777 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5778 || (ret == IPS_SUCCESS_IMM)
5779 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
5780
5781 memset(ha->conf, 0, sizeof (IPS_CONF));
5782
5783
5784 for (i = 0; i < 4; i++)
5785 ha->conf->init_id[i] = 7;
5786
5787
5788 if ((scb->basic_status & IPS_GSC_STATUS_MASK) ==
5789 IPS_CMD_CMPLT_WERROR)
5790 return (1);
5791
5792 return (0);
5793 }
5794
5795 memcpy(ha->conf, ha->ioctl_data, sizeof(*ha->conf));
5796 return (1);
5797}
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808static int
5809ips_readwrite_page5(ips_ha_t * ha, int write, int intr)
5810{
5811 ips_scb_t *scb;
5812 int ret;
5813
5814 METHOD_TRACE("ips_readwrite_page5", 1);
5815
5816 scb = &ha->scbs[ha->max_cmds - 1];
5817
5818 ips_init_scb(ha, scb);
5819
5820 scb->timeout = ips_cmd_timeout;
5821 scb->cdb[0] = IPS_CMD_RW_NVRAM_PAGE;
5822
5823 scb->cmd.nvram.op_code = IPS_CMD_RW_NVRAM_PAGE;
5824 scb->cmd.nvram.command_id = IPS_COMMAND_ID(ha, scb);
5825 scb->cmd.nvram.page = 5;
5826 scb->cmd.nvram.write = write;
5827 scb->cmd.nvram.reserved = 0;
5828 scb->cmd.nvram.reserved2 = 0;
5829 scb->data_len = sizeof (*ha->nvram);
5830 scb->cmd.nvram.buffer_addr = ha->ioctl_busaddr;
5831 if (write)
5832 memcpy(ha->ioctl_data, ha->nvram, sizeof(*ha->nvram));
5833
5834
5835 if (((ret =
5836 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5837 || (ret == IPS_SUCCESS_IMM)
5838 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
5839
5840 memset(ha->nvram, 0, sizeof (IPS_NVRAM_P5));
5841
5842 return (0);
5843 }
5844 if (!write)
5845 memcpy(ha->nvram, ha->ioctl_data, sizeof(*ha->nvram));
5846 return (1);
5847}
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858static int
5859ips_clear_adapter(ips_ha_t * ha, int intr)
5860{
5861 ips_scb_t *scb;
5862 int ret;
5863
5864 METHOD_TRACE("ips_clear_adapter", 1);
5865
5866 scb = &ha->scbs[ha->max_cmds - 1];
5867
5868 ips_init_scb(ha, scb);
5869
5870 scb->timeout = ips_reset_timeout;
5871 scb->cdb[0] = IPS_CMD_CONFIG_SYNC;
5872
5873 scb->cmd.config_sync.op_code = IPS_CMD_CONFIG_SYNC;
5874 scb->cmd.config_sync.command_id = IPS_COMMAND_ID(ha, scb);
5875 scb->cmd.config_sync.channel = 0;
5876 scb->cmd.config_sync.source_target = IPS_POCL;
5877 scb->cmd.config_sync.reserved = 0;
5878 scb->cmd.config_sync.reserved2 = 0;
5879 scb->cmd.config_sync.reserved3 = 0;
5880
5881
5882 if (((ret =
5883 ips_send_wait(ha, scb, ips_reset_timeout, intr)) == IPS_FAILURE)
5884 || (ret == IPS_SUCCESS_IMM)
5885 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5886 return (0);
5887
5888
5889 ips_init_scb(ha, scb);
5890
5891 scb->cdb[0] = IPS_CMD_ERROR_TABLE;
5892 scb->timeout = ips_reset_timeout;
5893
5894 scb->cmd.unlock_stripe.op_code = IPS_CMD_ERROR_TABLE;
5895 scb->cmd.unlock_stripe.command_id = IPS_COMMAND_ID(ha, scb);
5896 scb->cmd.unlock_stripe.log_drv = 0;
5897 scb->cmd.unlock_stripe.control = IPS_CSL;
5898 scb->cmd.unlock_stripe.reserved = 0;
5899 scb->cmd.unlock_stripe.reserved2 = 0;
5900 scb->cmd.unlock_stripe.reserved3 = 0;
5901
5902
5903 if (((ret =
5904 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5905 || (ret == IPS_SUCCESS_IMM)
5906 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5907 return (0);
5908
5909 return (1);
5910}
5911
5912
5913
5914
5915
5916
5917
5918
5919
5920
5921static void
5922ips_ffdc_reset(ips_ha_t * ha, int intr)
5923{
5924 ips_scb_t *scb;
5925
5926 METHOD_TRACE("ips_ffdc_reset", 1);
5927
5928 scb = &ha->scbs[ha->max_cmds - 1];
5929
5930 ips_init_scb(ha, scb);
5931
5932 scb->timeout = ips_cmd_timeout;
5933 scb->cdb[0] = IPS_CMD_FFDC;
5934 scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
5935 scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
5936 scb->cmd.ffdc.reset_count = ha->reset_count;
5937 scb->cmd.ffdc.reset_type = 0x80;
5938
5939
5940 ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
5941
5942
5943 ips_send_wait(ha, scb, ips_cmd_timeout, intr);
5944}
5945
5946
5947
5948
5949
5950
5951
5952
5953
5954
5955static void
5956ips_ffdc_time(ips_ha_t * ha)
5957{
5958 ips_scb_t *scb;
5959
5960 METHOD_TRACE("ips_ffdc_time", 1);
5961
5962 DEBUG_VAR(1, "(%s%d) Sending time update.", ips_name, ha->host_num);
5963
5964 scb = &ha->scbs[ha->max_cmds - 1];
5965
5966 ips_init_scb(ha, scb);
5967
5968 scb->timeout = ips_cmd_timeout;
5969 scb->cdb[0] = IPS_CMD_FFDC;
5970 scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
5971 scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
5972 scb->cmd.ffdc.reset_count = 0;
5973 scb->cmd.ffdc.reset_type = 0;
5974
5975
5976 ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
5977
5978
5979 ips_send_wait(ha, scb, ips_cmd_timeout, IPS_FFDC);
5980}
5981
5982
5983
5984
5985
5986
5987
5988
5989
5990static void
5991ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time_t current_time)
5992{
5993 long days;
5994 long rem;
5995 int i;
5996 int year;
5997 int yleap;
5998 int year_lengths[2] = { IPS_DAYS_NORMAL_YEAR, IPS_DAYS_LEAP_YEAR };
5999 int month_lengths[12][2] = { {31, 31},
6000 {28, 29},
6001 {31, 31},
6002 {30, 30},
6003 {31, 31},
6004 {30, 30},
6005 {31, 31},
6006 {31, 31},
6007 {30, 30},
6008 {31, 31},
6009 {30, 30},
6010 {31, 31}
6011 };
6012
6013 METHOD_TRACE("ips_fix_ffdc_time", 1);
6014
6015 days = current_time / IPS_SECS_DAY;
6016 rem = current_time % IPS_SECS_DAY;
6017
6018 scb->cmd.ffdc.hour = (rem / IPS_SECS_HOUR);
6019 rem = rem % IPS_SECS_HOUR;
6020 scb->cmd.ffdc.minute = (rem / IPS_SECS_MIN);
6021 scb->cmd.ffdc.second = (rem % IPS_SECS_MIN);
6022
6023 year = IPS_EPOCH_YEAR;
6024 while (days < 0 || days >= year_lengths[yleap = IPS_IS_LEAP_YEAR(year)]) {
6025 int newy;
6026
6027 newy = year + (days / IPS_DAYS_NORMAL_YEAR);
6028 if (days < 0)
6029 --newy;
6030 days -= (newy - year) * IPS_DAYS_NORMAL_YEAR +
6031 IPS_NUM_LEAP_YEARS_THROUGH(newy - 1) -
6032 IPS_NUM_LEAP_YEARS_THROUGH(year - 1);
6033 year = newy;
6034 }
6035
6036 scb->cmd.ffdc.yearH = year / 100;
6037 scb->cmd.ffdc.yearL = year % 100;
6038
6039 for (i = 0; days >= month_lengths[i][yleap]; ++i)
6040 days -= month_lengths[i][yleap];
6041
6042 scb->cmd.ffdc.month = i + 1;
6043 scb->cmd.ffdc.day = days + 1;
6044}
6045
6046
6047
6048
6049
6050
6051
6052
6053
6054
6055
6056
6057
6058static int
6059ips_erase_bios(ips_ha_t * ha)
6060{
6061 int timeout;
6062 uint8_t status = 0;
6063
6064 METHOD_TRACE("ips_erase_bios", 1);
6065
6066 status = 0;
6067
6068
6069 outl(0, ha->io_addr + IPS_REG_FLAP);
6070 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6071 udelay(25);
6072
6073 outb(0x50, ha->io_addr + IPS_REG_FLDP);
6074 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6075 udelay(25);
6076
6077
6078 outb(0x20, ha->io_addr + IPS_REG_FLDP);
6079 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6080 udelay(25);
6081
6082
6083 outb(0xD0, ha->io_addr + IPS_REG_FLDP);
6084 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6085 udelay(25);
6086
6087
6088 outb(0x70, ha->io_addr + IPS_REG_FLDP);
6089 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6090 udelay(25);
6091
6092 timeout = 80000;
6093
6094 while (timeout > 0) {
6095 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6096 outl(0, ha->io_addr + IPS_REG_FLAP);
6097 udelay(25);
6098 }
6099
6100 status = inb(ha->io_addr + IPS_REG_FLDP);
6101
6102 if (status & 0x80)
6103 break;
6104
6105 MDELAY(1);
6106 timeout--;
6107 }
6108
6109
6110 if (timeout <= 0) {
6111
6112
6113
6114 outb(0xB0, ha->io_addr + IPS_REG_FLDP);
6115 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6116 udelay(25);
6117
6118
6119 timeout = 10000;
6120 while (timeout > 0) {
6121 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6122 outl(0, ha->io_addr + IPS_REG_FLAP);
6123 udelay(25);
6124 }
6125
6126 status = inb(ha->io_addr + IPS_REG_FLDP);
6127
6128 if (status & 0xC0)
6129 break;
6130
6131 MDELAY(1);
6132 timeout--;
6133 }
6134
6135 return (1);
6136 }
6137
6138
6139 if (status & 0x08)
6140
6141 return (1);
6142
6143
6144 if (status & 0x30)
6145
6146 return (1);
6147
6148
6149
6150 outb(0x50, ha->io_addr + IPS_REG_FLDP);
6151 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6152 udelay(25);
6153
6154
6155 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6156 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6157 udelay(25);
6158
6159 return (0);
6160}
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170static int
6171ips_erase_bios_memio(ips_ha_t * ha)
6172{
6173 int timeout;
6174 uint8_t status;
6175
6176 METHOD_TRACE("ips_erase_bios_memio", 1);
6177
6178 status = 0;
6179
6180
6181 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6182 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6183 udelay(25);
6184
6185 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
6186 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6187 udelay(25);
6188
6189
6190 writeb(0x20, ha->mem_ptr + IPS_REG_FLDP);
6191 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6192 udelay(25);
6193
6194
6195 writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP);
6196 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6197 udelay(25);
6198
6199
6200 writeb(0x70, ha->mem_ptr + IPS_REG_FLDP);
6201 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6202 udelay(25);
6203
6204 timeout = 80000;
6205
6206 while (timeout > 0) {
6207 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6208 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6209 udelay(25);
6210 }
6211
6212 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6213
6214 if (status & 0x80)
6215 break;
6216
6217 MDELAY(1);
6218 timeout--;
6219 }
6220
6221
6222 if (timeout <= 0) {
6223
6224
6225
6226 writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP);
6227 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6228 udelay(25);
6229
6230
6231 timeout = 10000;
6232 while (timeout > 0) {
6233 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6234 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6235 udelay(25);
6236 }
6237
6238 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6239
6240 if (status & 0xC0)
6241 break;
6242
6243 MDELAY(1);
6244 timeout--;
6245 }
6246
6247 return (1);
6248 }
6249
6250
6251 if (status & 0x08)
6252
6253 return (1);
6254
6255
6256 if (status & 0x30)
6257
6258 return (1);
6259
6260
6261
6262 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
6263 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6264 udelay(25);
6265
6266
6267 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6268 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6269 udelay(25);
6270
6271 return (0);
6272}
6273
6274
6275
6276
6277
6278
6279
6280
6281
6282static int
6283ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6284 uint32_t offset)
6285{
6286 int i;
6287 int timeout;
6288 uint8_t status = 0;
6289
6290 METHOD_TRACE("ips_program_bios", 1);
6291
6292 status = 0;
6293
6294 for (i = 0; i < buffersize; i++) {
6295
6296 outl(i + offset, ha->io_addr + IPS_REG_FLAP);
6297 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6298 udelay(25);
6299
6300 outb(0x40, ha->io_addr + IPS_REG_FLDP);
6301 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6302 udelay(25);
6303
6304 outb(buffer[i], ha->io_addr + IPS_REG_FLDP);
6305 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6306 udelay(25);
6307
6308
6309 timeout = 1000;
6310 while (timeout > 0) {
6311 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6312 outl(0, ha->io_addr + IPS_REG_FLAP);
6313 udelay(25);
6314 }
6315
6316 status = inb(ha->io_addr + IPS_REG_FLDP);
6317
6318 if (status & 0x80)
6319 break;
6320
6321 MDELAY(1);
6322 timeout--;
6323 }
6324
6325 if (timeout == 0) {
6326
6327 outl(0, ha->io_addr + IPS_REG_FLAP);
6328 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6329 udelay(25);
6330
6331 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6332 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6333 udelay(25);
6334
6335 return (1);
6336 }
6337
6338
6339 if (status & 0x18) {
6340
6341 outl(0, ha->io_addr + IPS_REG_FLAP);
6342 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6343 udelay(25);
6344
6345 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6346 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6347 udelay(25);
6348
6349 return (1);
6350 }
6351 }
6352
6353
6354 outl(0, ha->io_addr + IPS_REG_FLAP);
6355 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6356 udelay(25);
6357
6358 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6359 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6360 udelay(25);
6361
6362 return (0);
6363}
6364
6365
6366
6367
6368
6369
6370
6371
6372
6373static int
6374ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6375 uint32_t offset)
6376{
6377 int i;
6378 int timeout;
6379 uint8_t status = 0;
6380
6381 METHOD_TRACE("ips_program_bios_memio", 1);
6382
6383 status = 0;
6384
6385 for (i = 0; i < buffersize; i++) {
6386
6387 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
6388 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6389 udelay(25);
6390
6391 writeb(0x40, ha->mem_ptr + IPS_REG_FLDP);
6392 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6393 udelay(25);
6394
6395 writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP);
6396 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6397 udelay(25);
6398
6399
6400 timeout = 1000;
6401 while (timeout > 0) {
6402 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6403 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6404 udelay(25);
6405 }
6406
6407 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6408
6409 if (status & 0x80)
6410 break;
6411
6412 MDELAY(1);
6413 timeout--;
6414 }
6415
6416 if (timeout == 0) {
6417
6418 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6419 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6420 udelay(25);
6421
6422 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6423 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6424 udelay(25);
6425
6426 return (1);
6427 }
6428
6429
6430 if (status & 0x18) {
6431
6432 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6433 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6434 udelay(25);
6435
6436 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6437 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6438 udelay(25);
6439
6440 return (1);
6441 }
6442 }
6443
6444
6445 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6446 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6447 udelay(25);
6448
6449 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6450 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6451 udelay(25);
6452
6453 return (0);
6454}
6455
6456
6457
6458
6459
6460
6461
6462
6463
6464static int
6465ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6466 uint32_t offset)
6467{
6468 uint8_t checksum;
6469 int i;
6470
6471 METHOD_TRACE("ips_verify_bios", 1);
6472
6473
6474 outl(0, ha->io_addr + IPS_REG_FLAP);
6475 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6476 udelay(25);
6477
6478 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
6479 return (1);
6480
6481 outl(1, ha->io_addr + IPS_REG_FLAP);
6482 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6483 udelay(25);
6484 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
6485 return (1);
6486
6487 checksum = 0xff;
6488 for (i = 2; i < buffersize; i++) {
6489
6490 outl(i + offset, ha->io_addr + IPS_REG_FLAP);
6491 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6492 udelay(25);
6493
6494 checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP);
6495 }
6496
6497 if (checksum != 0)
6498
6499 return (1);
6500 else
6501
6502 return (0);
6503}
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513static int
6514ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6515 uint32_t offset)
6516{
6517 uint8_t checksum;
6518 int i;
6519
6520 METHOD_TRACE("ips_verify_bios_memio", 1);
6521
6522
6523 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6524 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6525 udelay(25);
6526
6527 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
6528 return (1);
6529
6530 writel(1, ha->mem_ptr + IPS_REG_FLAP);
6531 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6532 udelay(25);
6533 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
6534 return (1);
6535
6536 checksum = 0xff;
6537 for (i = 2; i < buffersize; i++) {
6538
6539 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
6540 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6541 udelay(25);
6542
6543 checksum =
6544 (uint8_t) checksum + readb(ha->mem_ptr + IPS_REG_FLDP);
6545 }
6546
6547 if (checksum != 0)
6548
6549 return (1);
6550 else
6551
6552 return (0);
6553}
6554
6555
6556
6557
6558
6559
6560
6561
6562static int
6563ips_abort_init(ips_ha_t * ha, int index)
6564{
6565 ha->active = 0;
6566 ips_free(ha);
6567 ips_ha[index] = NULL;
6568 ips_sh[index] = NULL;
6569 return -1;
6570}
6571
6572
6573
6574
6575
6576
6577
6578
6579static void
6580ips_shift_controllers(int lowindex, int highindex)
6581{
6582 ips_ha_t *ha_sav = ips_ha[highindex];
6583 struct Scsi_Host *sh_sav = ips_sh[highindex];
6584 int i;
6585
6586 for (i = highindex; i > lowindex; i--) {
6587 ips_ha[i] = ips_ha[i - 1];
6588 ips_sh[i] = ips_sh[i - 1];
6589 ips_ha[i]->host_num = i;
6590 }
6591 ha_sav->host_num = lowindex;
6592 ips_ha[lowindex] = ha_sav;
6593 ips_sh[lowindex] = sh_sav;
6594}
6595
6596
6597
6598
6599
6600
6601
6602
6603static void
6604ips_order_controllers(void)
6605{
6606 int i, j, tmp, position = 0;
6607 IPS_NVRAM_P5 *nvram;
6608 if (!ips_ha[0])
6609 return;
6610 nvram = ips_ha[0]->nvram;
6611
6612 if (nvram->adapter_order[0]) {
6613 for (i = 1; i <= nvram->adapter_order[0]; i++) {
6614 for (j = position; j < ips_num_controllers; j++) {
6615 switch (ips_ha[j]->ad_type) {
6616 case IPS_ADTYPE_SERVERAID6M:
6617 case IPS_ADTYPE_SERVERAID7M:
6618 if (nvram->adapter_order[i] == 'M') {
6619 ips_shift_controllers(position,
6620 j);
6621 position++;
6622 }
6623 break;
6624 case IPS_ADTYPE_SERVERAID4L:
6625 case IPS_ADTYPE_SERVERAID4M:
6626 case IPS_ADTYPE_SERVERAID4MX:
6627 case IPS_ADTYPE_SERVERAID4LX:
6628 if (nvram->adapter_order[i] == 'N') {
6629 ips_shift_controllers(position,
6630 j);
6631 position++;
6632 }
6633 break;
6634 case IPS_ADTYPE_SERVERAID6I:
6635 case IPS_ADTYPE_SERVERAID5I2:
6636 case IPS_ADTYPE_SERVERAID5I1:
6637 case IPS_ADTYPE_SERVERAID7k:
6638 if (nvram->adapter_order[i] == 'S') {
6639 ips_shift_controllers(position,
6640 j);
6641 position++;
6642 }
6643 break;
6644 case IPS_ADTYPE_SERVERAID:
6645 case IPS_ADTYPE_SERVERAID2:
6646 case IPS_ADTYPE_NAVAJO:
6647 case IPS_ADTYPE_KIOWA:
6648 case IPS_ADTYPE_SERVERAID3L:
6649 case IPS_ADTYPE_SERVERAID3:
6650 case IPS_ADTYPE_SERVERAID4H:
6651 if (nvram->adapter_order[i] == 'A') {
6652 ips_shift_controllers(position,
6653 j);
6654 position++;
6655 }
6656 break;
6657 default:
6658 break;
6659 }
6660 }
6661 }
6662
6663 return;
6664 }
6665
6666 tmp = 0;
6667 for (i = position; i < ips_num_controllers; i++) {
6668 if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I2 ||
6669 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I1) {
6670 ips_shift_controllers(position, i);
6671 position++;
6672 tmp = 1;
6673 }
6674 }
6675
6676 if (!tmp)
6677 return;
6678 for (i = position; i < ips_num_controllers; i++) {
6679 if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4L ||
6680 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4M ||
6681 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4LX ||
6682 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4MX) {
6683 ips_shift_controllers(position, i);
6684 position++;
6685 }
6686 }
6687
6688 return;
6689}
6690
6691
6692
6693
6694
6695
6696
6697
6698static int
6699ips_register_scsi(int index)
6700{
6701 struct Scsi_Host *sh;
6702 ips_ha_t *ha, *oldha = ips_ha[index];
6703 sh = scsi_host_alloc(&ips_driver_template, sizeof (ips_ha_t));
6704 if (!sh) {
6705 IPS_PRINTK(KERN_WARNING, oldha->pcidev,
6706 "Unable to register controller with SCSI subsystem\n");
6707 return -1;
6708 }
6709 ha = IPS_HA(sh);
6710 memcpy(ha, oldha, sizeof (ips_ha_t));
6711 free_irq(oldha->pcidev->irq, oldha);
6712
6713 if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
6714 IPS_PRINTK(KERN_WARNING, ha->pcidev,
6715 "Unable to install interrupt handler\n");
6716 goto err_out_sh;
6717 }
6718
6719 kfree(oldha);
6720
6721
6722 sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr;
6723 sh->sg_tablesize = sh->hostt->sg_tablesize;
6724 sh->can_queue = sh->hostt->can_queue;
6725 sh->cmd_per_lun = sh->hostt->cmd_per_lun;
6726 sh->use_clustering = sh->hostt->use_clustering;
6727 sh->max_sectors = 128;
6728
6729 sh->max_id = ha->ntargets;
6730 sh->max_lun = ha->nlun;
6731 sh->max_channel = ha->nbus - 1;
6732 sh->can_queue = ha->max_cmds - 1;
6733
6734 if (scsi_add_host(sh, &ha->pcidev->dev))
6735 goto err_out;
6736
6737 ips_sh[index] = sh;
6738 ips_ha[index] = ha;
6739
6740 scsi_scan_host(sh);
6741
6742 return 0;
6743
6744err_out:
6745 free_irq(ha->pcidev->irq, ha);
6746err_out_sh:
6747 scsi_host_put(sh);
6748 return -1;
6749}
6750
6751
6752
6753
6754
6755
6756
6757static void
6758ips_remove_device(struct pci_dev *pci_dev)
6759{
6760 struct Scsi_Host *sh = pci_get_drvdata(pci_dev);
6761
6762 pci_set_drvdata(pci_dev, NULL);
6763
6764 ips_release(sh);
6765
6766 pci_release_regions(pci_dev);
6767 pci_disable_device(pci_dev);
6768}
6769
6770
6771
6772
6773
6774
6775
6776
6777static int __init
6778ips_module_init(void)
6779{
6780#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__)
6781 printk(KERN_ERR "ips: This driver has only been tested on the x86/ia64/x86_64 platforms\n");
6782 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
6783#endif
6784
6785 if (pci_register_driver(&ips_pci_driver) < 0)
6786 return -ENODEV;
6787 ips_driver_template.module = THIS_MODULE;
6788 ips_order_controllers();
6789 if (!ips_detect(&ips_driver_template)) {
6790 pci_unregister_driver(&ips_pci_driver);
6791 return -ENODEV;
6792 }
6793 register_reboot_notifier(&ips_notifier);
6794 return 0;
6795}
6796
6797
6798
6799
6800
6801
6802
6803
6804static void __exit
6805ips_module_exit(void)
6806{
6807 pci_unregister_driver(&ips_pci_driver);
6808 unregister_reboot_notifier(&ips_notifier);
6809}
6810
6811module_init(ips_module_init);
6812module_exit(ips_module_exit);
6813
6814
6815
6816
6817
6818
6819
6820
6821
6822
6823static int
6824ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
6825{
6826 int index = -1;
6827 int rc;
6828
6829 METHOD_TRACE("ips_insert_device", 1);
6830 rc = pci_enable_device(pci_dev);
6831 if (rc)
6832 return rc;
6833
6834 rc = pci_request_regions(pci_dev, "ips");
6835 if (rc)
6836 goto err_out;
6837
6838 rc = ips_init_phase1(pci_dev, &index);
6839 if (rc == SUCCESS)
6840 rc = ips_init_phase2(index);
6841
6842 if (ips_hotplug)
6843 if (ips_register_scsi(index)) {
6844 ips_free(ips_ha[index]);
6845 rc = -1;
6846 }
6847
6848 if (rc == SUCCESS)
6849 ips_num_controllers++;
6850
6851 ips_next_controller = ips_num_controllers;
6852
6853 if (rc < 0) {
6854 rc = -ENODEV;
6855 goto err_out_regions;
6856 }
6857
6858 pci_set_drvdata(pci_dev, ips_sh[index]);
6859 return 0;
6860
6861err_out_regions:
6862 pci_release_regions(pci_dev);
6863err_out:
6864 pci_disable_device(pci_dev);
6865 return rc;
6866}
6867
6868
6869
6870
6871
6872
6873
6874
6875
6876
6877static int
6878ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
6879{
6880 ips_ha_t *ha;
6881 uint32_t io_addr;
6882 uint32_t mem_addr;
6883 uint32_t io_len;
6884 uint32_t mem_len;
6885 uint8_t bus;
6886 uint8_t func;
6887 int j;
6888 int index;
6889 dma_addr_t dma_address;
6890 char __iomem *ioremap_ptr;
6891 char __iomem *mem_ptr;
6892 uint32_t IsDead;
6893
6894 METHOD_TRACE("ips_init_phase1", 1);
6895 index = IPS_MAX_ADAPTERS;
6896 for (j = 0; j < IPS_MAX_ADAPTERS; j++) {
6897 if (ips_ha[j] == NULL) {
6898 index = j;
6899 break;
6900 }
6901 }
6902
6903 if (index >= IPS_MAX_ADAPTERS)
6904 return -1;
6905
6906
6907 bus = pci_dev->bus->number;
6908 func = pci_dev->devfn;
6909
6910
6911 mem_addr = 0;
6912 io_addr = 0;
6913 mem_len = 0;
6914 io_len = 0;
6915
6916 for (j = 0; j < 2; j++) {
6917 if (!pci_resource_start(pci_dev, j))
6918 break;
6919
6920 if (pci_resource_flags(pci_dev, j) & IORESOURCE_IO) {
6921 io_addr = pci_resource_start(pci_dev, j);
6922 io_len = pci_resource_len(pci_dev, j);
6923 } else {
6924 mem_addr = pci_resource_start(pci_dev, j);
6925 mem_len = pci_resource_len(pci_dev, j);
6926 }
6927 }
6928
6929
6930 if (mem_addr) {
6931 uint32_t base;
6932 uint32_t offs;
6933
6934 base = mem_addr & PAGE_MASK;
6935 offs = mem_addr - base;
6936 ioremap_ptr = ioremap(base, PAGE_SIZE);
6937 if (!ioremap_ptr)
6938 return -1;
6939 mem_ptr = ioremap_ptr + offs;
6940 } else {
6941 ioremap_ptr = NULL;
6942 mem_ptr = NULL;
6943 }
6944
6945
6946 ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL);
6947 if (ha == NULL) {
6948 IPS_PRINTK(KERN_WARNING, pci_dev,
6949 "Unable to allocate temporary ha struct\n");
6950 return -1;
6951 }
6952
6953 ips_sh[index] = NULL;
6954 ips_ha[index] = ha;
6955 ha->active = 1;
6956
6957
6958 ha->io_addr = io_addr;
6959 ha->io_len = io_len;
6960 ha->mem_addr = mem_addr;
6961 ha->mem_len = mem_len;
6962 ha->mem_ptr = mem_ptr;
6963 ha->ioremap_ptr = ioremap_ptr;
6964 ha->host_num = (uint32_t) index;
6965 ha->slot_num = PCI_SLOT(pci_dev->devfn);
6966 ha->pcidev = pci_dev;
6967
6968
6969
6970
6971
6972
6973
6974 if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
6975 !pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(64))) {
6976 (ha)->flags |= IPS_HA_ENH_SG;
6977 } else {
6978 if (pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(32)) != 0) {
6979 printk(KERN_WARNING "Unable to set DMA Mask\n");
6980 return ips_abort_init(ha, index);
6981 }
6982 }
6983 if(ips_cd_boot && !ips_FlashData){
6984 ips_FlashData = pci_alloc_consistent(pci_dev, PAGE_SIZE << 7,
6985 &ips_flashbusaddr);
6986 }
6987
6988 ha->enq = pci_alloc_consistent(pci_dev, sizeof (IPS_ENQ),
6989 &ha->enq_busaddr);
6990 if (!ha->enq) {
6991 IPS_PRINTK(KERN_WARNING, pci_dev,
6992 "Unable to allocate host inquiry structure\n");
6993 return ips_abort_init(ha, index);
6994 }
6995
6996 ha->adapt = pci_alloc_consistent(pci_dev, sizeof (IPS_ADAPTER) +
6997 sizeof (IPS_IO_CMD), &dma_address);
6998 if (!ha->adapt) {
6999 IPS_PRINTK(KERN_WARNING, pci_dev,
7000 "Unable to allocate host adapt & dummy structures\n");
7001 return ips_abort_init(ha, index);
7002 }
7003 ha->adapt->hw_status_start = dma_address;
7004 ha->dummy = (void *) (ha->adapt + 1);
7005
7006
7007
7008 ha->logical_drive_info = pci_alloc_consistent(pci_dev, sizeof (IPS_LD_INFO), &dma_address);
7009 if (!ha->logical_drive_info) {
7010 IPS_PRINTK(KERN_WARNING, pci_dev,
7011 "Unable to allocate logical drive info structure\n");
7012 return ips_abort_init(ha, index);
7013 }
7014 ha->logical_drive_info_dma_addr = dma_address;
7015
7016
7017 ha->conf = kmalloc(sizeof (IPS_CONF), GFP_KERNEL);
7018
7019 if (!ha->conf) {
7020 IPS_PRINTK(KERN_WARNING, pci_dev,
7021 "Unable to allocate host conf structure\n");
7022 return ips_abort_init(ha, index);
7023 }
7024
7025 ha->nvram = kmalloc(sizeof (IPS_NVRAM_P5), GFP_KERNEL);
7026
7027 if (!ha->nvram) {
7028 IPS_PRINTK(KERN_WARNING, pci_dev,
7029 "Unable to allocate host NVRAM structure\n");
7030 return ips_abort_init(ha, index);
7031 }
7032
7033 ha->subsys = kmalloc(sizeof (IPS_SUBSYS), GFP_KERNEL);
7034
7035 if (!ha->subsys) {
7036 IPS_PRINTK(KERN_WARNING, pci_dev,
7037 "Unable to allocate host subsystem structure\n");
7038 return ips_abort_init(ha, index);
7039 }
7040
7041
7042
7043 if (ips_ioctlsize < PAGE_SIZE)
7044 ips_ioctlsize = PAGE_SIZE;
7045
7046 ha->ioctl_data = pci_alloc_consistent(pci_dev, ips_ioctlsize,
7047 &ha->ioctl_busaddr);
7048 ha->ioctl_len = ips_ioctlsize;
7049 if (!ha->ioctl_data) {
7050 IPS_PRINTK(KERN_WARNING, pci_dev,
7051 "Unable to allocate IOCTL data\n");
7052 return ips_abort_init(ha, index);
7053 }
7054
7055
7056
7057
7058 ips_setup_funclist(ha);
7059
7060 if ((IPS_IS_MORPHEUS(ha)) || (IPS_IS_MARCO(ha))) {
7061
7062 IsDead = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
7063 if (IsDead == 0xDEADBEEF) {
7064 ips_reset_morpheus(ha);
7065 }
7066 }
7067
7068
7069
7070
7071
7072 if (!(*ha->func.isinit) (ha)) {
7073 if (!(*ha->func.init) (ha)) {
7074
7075
7076
7077 IPS_PRINTK(KERN_WARNING, pci_dev,
7078 "Unable to initialize controller\n");
7079 return ips_abort_init(ha, index);
7080 }
7081 }
7082
7083 *indexPtr = index;
7084 return SUCCESS;
7085}
7086
7087
7088
7089
7090
7091
7092
7093
7094
7095
7096static int
7097ips_init_phase2(int index)
7098{
7099 ips_ha_t *ha;
7100
7101 ha = ips_ha[index];
7102
7103 METHOD_TRACE("ips_init_phase2", 1);
7104 if (!ha->active) {
7105 ips_ha[index] = NULL;
7106 return -1;
7107 }
7108
7109
7110 if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
7111 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7112 "Unable to install interrupt handler\n");
7113 return ips_abort_init(ha, index);
7114 }
7115
7116
7117
7118
7119 ha->max_cmds = 1;
7120 if (!ips_allocatescbs(ha)) {
7121 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7122 "Unable to allocate a CCB\n");
7123 free_irq(ha->pcidev->irq, ha);
7124 return ips_abort_init(ha, index);
7125 }
7126
7127 if (!ips_hainit(ha)) {
7128 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7129 "Unable to initialize controller\n");
7130 free_irq(ha->pcidev->irq, ha);
7131 return ips_abort_init(ha, index);
7132 }
7133
7134 ips_deallocatescbs(ha, 1);
7135
7136
7137 if (!ips_allocatescbs(ha)) {
7138 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7139 "Unable to allocate CCBs\n");
7140 free_irq(ha->pcidev->irq, ha);
7141 return ips_abort_init(ha, index);
7142 }
7143
7144 return SUCCESS;
7145}
7146
7147MODULE_LICENSE("GPL");
7148MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
7149MODULE_VERSION(IPS_VER_STRING);
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169
7170