1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164#include <asm/io.h>
165#include <asm/byteorder.h>
166#include <asm/page.h>
167#include <linux/stddef.h>
168#include <linux/string.h>
169#include <linux/errno.h>
170#include <linux/kernel.h>
171#include <linux/ioport.h>
172#include <linux/slab.h>
173#include <linux/delay.h>
174#include <linux/pci.h>
175#include <linux/proc_fs.h>
176#include <linux/reboot.h>
177#include <linux/interrupt.h>
178
179#include <linux/blkdev.h>
180#include <linux/types.h>
181#include <linux/dma-mapping.h>
182
183#include <scsi/sg.h>
184#include "scsi.h"
185#include <scsi/scsi_host.h>
186
187#include "ips.h"
188
189#include <linux/module.h>
190
191#include <linux/stat.h>
192
193#include <linux/spinlock.h>
194#include <linux/init.h>
195
196#include <linux/smp.h>
197
198#ifdef MODULE
199static char *ips = NULL;
200module_param(ips, charp, 0);
201#endif
202
203
204
205
206#define IPS_VERSION_HIGH IPS_VER_MAJOR_STRING "." IPS_VER_MINOR_STRING
207#define IPS_VERSION_LOW "." IPS_VER_BUILD_STRING " "
208
209#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
210 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
211 DMA_BIDIRECTIONAL : \
212 scb->scsi_cmd->sc_data_direction)
213
214#ifdef IPS_DEBUG
215#define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n");
216#define DEBUG(i, s) if (ips_debug >= i) printk(KERN_NOTICE s "\n");
217#define DEBUG_VAR(i, s, v...) if (ips_debug >= i) printk(KERN_NOTICE s "\n", v);
218#else
219#define METHOD_TRACE(s, i)
220#define DEBUG(i, s)
221#define DEBUG_VAR(i, s, v...)
222#endif
223
224
225
226
227static int ips_eh_abort(struct scsi_cmnd *);
228static int ips_eh_reset(struct scsi_cmnd *);
229static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *);
230static const char *ips_info(struct Scsi_Host *);
231static irqreturn_t do_ipsintr(int, void *);
232static int ips_hainit(ips_ha_t *);
233static int ips_map_status(ips_ha_t *, ips_scb_t *, ips_stat_t *);
234static int ips_send_wait(ips_ha_t *, ips_scb_t *, int, int);
235static int ips_send_cmd(ips_ha_t *, ips_scb_t *);
236static int ips_online(ips_ha_t *, ips_scb_t *);
237static int ips_inquiry(ips_ha_t *, ips_scb_t *);
238static int ips_rdcap(ips_ha_t *, ips_scb_t *);
239static int ips_msense(ips_ha_t *, ips_scb_t *);
240static int ips_reqsen(ips_ha_t *, ips_scb_t *);
241static int ips_deallocatescbs(ips_ha_t *, int);
242static int ips_allocatescbs(ips_ha_t *);
243static int ips_reset_copperhead(ips_ha_t *);
244static int ips_reset_copperhead_memio(ips_ha_t *);
245static int ips_reset_morpheus(ips_ha_t *);
246static int ips_issue_copperhead(ips_ha_t *, ips_scb_t *);
247static int ips_issue_copperhead_memio(ips_ha_t *, ips_scb_t *);
248static int ips_issue_i2o(ips_ha_t *, ips_scb_t *);
249static int ips_issue_i2o_memio(ips_ha_t *, ips_scb_t *);
250static int ips_isintr_copperhead(ips_ha_t *);
251static int ips_isintr_copperhead_memio(ips_ha_t *);
252static int ips_isintr_morpheus(ips_ha_t *);
253static int ips_wait(ips_ha_t *, int, int);
254static int ips_write_driver_status(ips_ha_t *, int);
255static int ips_read_adapter_status(ips_ha_t *, int);
256static int ips_read_subsystem_parameters(ips_ha_t *, int);
257static int ips_read_config(ips_ha_t *, int);
258static int ips_clear_adapter(ips_ha_t *, int);
259static int ips_readwrite_page5(ips_ha_t *, int, int);
260static int ips_init_copperhead(ips_ha_t *);
261static int ips_init_copperhead_memio(ips_ha_t *);
262static int ips_init_morpheus(ips_ha_t *);
263static int ips_isinit_copperhead(ips_ha_t *);
264static int ips_isinit_copperhead_memio(ips_ha_t *);
265static int ips_isinit_morpheus(ips_ha_t *);
266static int ips_erase_bios(ips_ha_t *);
267static int ips_program_bios(ips_ha_t *, char *, uint32_t, uint32_t);
268static int ips_verify_bios(ips_ha_t *, char *, uint32_t, uint32_t);
269static int ips_erase_bios_memio(ips_ha_t *);
270static int ips_program_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
271static int ips_verify_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
272static int ips_flash_copperhead(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
273static int ips_flash_bios(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
274static int ips_flash_firmware(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
275static void ips_free_flash_copperhead(ips_ha_t * ha);
276static void ips_get_bios_version(ips_ha_t *, int);
277static void ips_identify_controller(ips_ha_t *);
278static void ips_chkstatus(ips_ha_t *, IPS_STATUS *);
279static void ips_enable_int_copperhead(ips_ha_t *);
280static void ips_enable_int_copperhead_memio(ips_ha_t *);
281static void ips_enable_int_morpheus(ips_ha_t *);
282static int ips_intr_copperhead(ips_ha_t *);
283static int ips_intr_morpheus(ips_ha_t *);
284static void ips_next(ips_ha_t *, int);
285static void ipsintr_blocking(ips_ha_t *, struct ips_scb *);
286static void ipsintr_done(ips_ha_t *, struct ips_scb *);
287static void ips_done(ips_ha_t *, ips_scb_t *);
288static void ips_free(ips_ha_t *);
289static void ips_init_scb(ips_ha_t *, ips_scb_t *);
290static void ips_freescb(ips_ha_t *, ips_scb_t *);
291static void ips_setup_funclist(ips_ha_t *);
292static void ips_statinit(ips_ha_t *);
293static void ips_statinit_memio(ips_ha_t *);
294static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time64_t);
295static void ips_ffdc_reset(ips_ha_t *, int);
296static void ips_ffdc_time(ips_ha_t *);
297static uint32_t ips_statupd_copperhead(ips_ha_t *);
298static uint32_t ips_statupd_copperhead_memio(ips_ha_t *);
299static uint32_t ips_statupd_morpheus(ips_ha_t *);
300static ips_scb_t *ips_getscb(ips_ha_t *);
301static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *);
302static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *);
303static void ips_putq_copp_tail(ips_copp_queue_t *,
304 ips_copp_wait_item_t *);
305static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *);
306static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *);
307static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *);
308static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *,
309 struct scsi_cmnd *);
310static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *,
311 ips_copp_wait_item_t *);
312static ips_copp_wait_item_t *ips_removeq_copp_head(ips_copp_queue_t *);
313
314static int ips_is_passthru(struct scsi_cmnd *);
315static int ips_make_passthru(ips_ha_t *, struct scsi_cmnd *, ips_scb_t *, int);
316static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
317static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *);
318static void ips_scmd_buf_write(struct scsi_cmnd * scmd, void *data,
319 unsigned int count);
320static void ips_scmd_buf_read(struct scsi_cmnd * scmd, void *data,
321 unsigned int count);
322
323static int ips_write_info(struct Scsi_Host *, char *, int);
324static int ips_show_info(struct seq_file *, struct Scsi_Host *);
325static int ips_host_info(ips_ha_t *, struct seq_file *);
326static int ips_abort_init(ips_ha_t * ha, int index);
327static int ips_init_phase2(int index);
328
329static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr);
330static int ips_register_scsi(int index);
331
332static int ips_poll_for_flush_complete(ips_ha_t * ha);
333static void ips_flush_and_reset(ips_ha_t *ha);
334
335
336
337
338static const char ips_name[] = "ips";
339static struct Scsi_Host *ips_sh[IPS_MAX_ADAPTERS];
340static ips_ha_t *ips_ha[IPS_MAX_ADAPTERS];
341static unsigned int ips_next_controller;
342static unsigned int ips_num_controllers;
343static unsigned int ips_released_controllers;
344static int ips_hotplug;
345static int ips_cmd_timeout = 60;
346static int ips_reset_timeout = 60 * 5;
347static int ips_force_memio = 1;
348static int ips_force_i2o = 1;
349static int ips_ioctlsize = IPS_IOCTL_SIZE;
350static int ips_cd_boot;
351static char *ips_FlashData = NULL;
352static dma_addr_t ips_flashbusaddr;
353static long ips_FlashDataInUse;
354static uint32_t MaxLiteCmds = 32;
355static struct scsi_host_template ips_driver_template = {
356 .info = ips_info,
357 .queuecommand = ips_queue,
358 .eh_abort_handler = ips_eh_abort,
359 .eh_host_reset_handler = ips_eh_reset,
360 .proc_name = "ips",
361 .show_info = ips_show_info,
362 .write_info = ips_write_info,
363 .slave_configure = ips_slave_configure,
364 .bios_param = ips_biosparam,
365 .this_id = -1,
366 .sg_tablesize = IPS_MAX_SG,
367 .cmd_per_lun = 3,
368 .no_write_same = 1,
369};
370
371
372
373static struct pci_device_id ips_pci_table[] = {
374 { 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
375 { 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
376 { 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
377 { 0, }
378};
379
380MODULE_DEVICE_TABLE( pci, ips_pci_table );
381
382static char ips_hot_plug_name[] = "ips";
383
384static int ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent);
385static void ips_remove_device(struct pci_dev *pci_dev);
386
387static struct pci_driver ips_pci_driver = {
388 .name = ips_hot_plug_name,
389 .id_table = ips_pci_table,
390 .probe = ips_insert_device,
391 .remove = ips_remove_device,
392};
393
394
395
396
397
398static int ips_halt(struct notifier_block *nb, ulong event, void *buf);
399
400#define MAX_ADAPTER_NAME 15
401
402static char ips_adapter_name[][30] = {
403 "ServeRAID",
404 "ServeRAID II",
405 "ServeRAID on motherboard",
406 "ServeRAID on motherboard",
407 "ServeRAID 3H",
408 "ServeRAID 3L",
409 "ServeRAID 4H",
410 "ServeRAID 4M",
411 "ServeRAID 4L",
412 "ServeRAID 4Mx",
413 "ServeRAID 4Lx",
414 "ServeRAID 5i",
415 "ServeRAID 5i",
416 "ServeRAID 6M",
417 "ServeRAID 6i",
418 "ServeRAID 7t",
419 "ServeRAID 7k",
420 "ServeRAID 7M"
421};
422
423static struct notifier_block ips_notifier = {
424 ips_halt, NULL, 0
425};
426
427
428
429
430static char ips_command_direction[] = {
431 IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT,
432 IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK,
433 IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
434 IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
435 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_OUT,
436 IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
437 IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_IN,
438 IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
439 IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK,
440 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
441 IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE,
442 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
443 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT,
444 IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_NONE,
445 IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK,
446 IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
447 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
448 IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
449 IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
450 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
451 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
452 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
453 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
454 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
455 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
456 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
457 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
458 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
459 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
460 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
461 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
462 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
463 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
464 IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE,
465 IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_OUT,
466 IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE,
467 IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN,
468 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
469 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
470 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
471 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
472 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
473 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
474 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
475 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
476 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
477 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT,
478 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
479 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
480 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
481 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK
482};
483
484
485
486
487
488
489
490
491
492
493
494static int
495ips_setup(char *ips_str)
496{
497
498 int i;
499 char *key;
500 char *value;
501 static const IPS_OPTION options[] = {
502 {"noi2o", &ips_force_i2o, 0},
503 {"nommap", &ips_force_memio, 0},
504 {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE},
505 {"cdboot", &ips_cd_boot, 0},
506 {"maxcmds", &MaxLiteCmds, 32},
507 };
508
509
510
511 while ((key = strsep(&ips_str, ",."))) {
512 if (!*key)
513 continue;
514 value = strchr(key, ':');
515 if (value)
516 *value++ = '\0';
517
518
519
520
521 for (i = 0; i < ARRAY_SIZE(options); i++) {
522 if (strncasecmp
523 (key, options[i].option_name,
524 strlen(options[i].option_name)) == 0) {
525 if (value)
526 *options[i].option_flag =
527 simple_strtoul(value, NULL, 0);
528 else
529 *options[i].option_flag =
530 options[i].option_value;
531 break;
532 }
533 }
534 }
535
536 return (1);
537}
538
539__setup("ips=", ips_setup);
540
541
542
543
544
545
546
547
548
549
550
551
552static int
553ips_detect(struct scsi_host_template * SHT)
554{
555 int i;
556
557 METHOD_TRACE("ips_detect", 1);
558
559#ifdef MODULE
560 if (ips)
561 ips_setup(ips);
562#endif
563
564 for (i = 0; i < ips_num_controllers; i++) {
565 if (ips_register_scsi(i))
566 ips_free(ips_ha[i]);
567 ips_released_controllers++;
568 }
569 ips_hotplug = 1;
570 return (ips_num_controllers);
571}
572
573
574
575
576
577static void
578ips_setup_funclist(ips_ha_t * ha)
579{
580
581
582
583
584 if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) {
585
586 ha->func.isintr = ips_isintr_morpheus;
587 ha->func.isinit = ips_isinit_morpheus;
588 ha->func.issue = ips_issue_i2o_memio;
589 ha->func.init = ips_init_morpheus;
590 ha->func.statupd = ips_statupd_morpheus;
591 ha->func.reset = ips_reset_morpheus;
592 ha->func.intr = ips_intr_morpheus;
593 ha->func.enableint = ips_enable_int_morpheus;
594 } else if (IPS_USE_MEMIO(ha)) {
595
596 ha->func.isintr = ips_isintr_copperhead_memio;
597 ha->func.isinit = ips_isinit_copperhead_memio;
598 ha->func.init = ips_init_copperhead_memio;
599 ha->func.statupd = ips_statupd_copperhead_memio;
600 ha->func.statinit = ips_statinit_memio;
601 ha->func.reset = ips_reset_copperhead_memio;
602 ha->func.intr = ips_intr_copperhead;
603 ha->func.erasebios = ips_erase_bios_memio;
604 ha->func.programbios = ips_program_bios_memio;
605 ha->func.verifybios = ips_verify_bios_memio;
606 ha->func.enableint = ips_enable_int_copperhead_memio;
607 if (IPS_USE_I2O_DELIVER(ha))
608 ha->func.issue = ips_issue_i2o_memio;
609 else
610 ha->func.issue = ips_issue_copperhead_memio;
611 } else {
612
613 ha->func.isintr = ips_isintr_copperhead;
614 ha->func.isinit = ips_isinit_copperhead;
615 ha->func.init = ips_init_copperhead;
616 ha->func.statupd = ips_statupd_copperhead;
617 ha->func.statinit = ips_statinit;
618 ha->func.reset = ips_reset_copperhead;
619 ha->func.intr = ips_intr_copperhead;
620 ha->func.erasebios = ips_erase_bios;
621 ha->func.programbios = ips_program_bios;
622 ha->func.verifybios = ips_verify_bios;
623 ha->func.enableint = ips_enable_int_copperhead;
624
625 if (IPS_USE_I2O_DELIVER(ha))
626 ha->func.issue = ips_issue_i2o;
627 else
628 ha->func.issue = ips_issue_copperhead;
629 }
630}
631
632
633
634
635
636
637
638
639
640
641static int
642ips_release(struct Scsi_Host *sh)
643{
644 ips_scb_t *scb;
645 ips_ha_t *ha;
646 int i;
647
648 METHOD_TRACE("ips_release", 1);
649
650 scsi_remove_host(sh);
651
652 for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++) ;
653
654 if (i == IPS_MAX_ADAPTERS) {
655 printk(KERN_WARNING
656 "(%s) release, invalid Scsi_Host pointer.\n", ips_name);
657 BUG();
658 return (FALSE);
659 }
660
661 ha = IPS_HA(sh);
662
663 if (!ha)
664 return (FALSE);
665
666
667 scb = &ha->scbs[ha->max_cmds - 1];
668
669 ips_init_scb(ha, scb);
670
671 scb->timeout = ips_cmd_timeout;
672 scb->cdb[0] = IPS_CMD_FLUSH;
673
674 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
675 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
676 scb->cmd.flush_cache.state = IPS_NORM_STATE;
677 scb->cmd.flush_cache.reserved = 0;
678 scb->cmd.flush_cache.reserved2 = 0;
679 scb->cmd.flush_cache.reserved3 = 0;
680 scb->cmd.flush_cache.reserved4 = 0;
681
682 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
683
684
685 if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE)
686 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Incomplete Flush.\n");
687
688 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Complete.\n");
689
690 ips_sh[i] = NULL;
691 ips_ha[i] = NULL;
692
693
694 ips_free(ha);
695
696
697 free_irq(ha->pcidev->irq, ha);
698
699 scsi_host_put(sh);
700
701 ips_released_controllers++;
702
703 return (FALSE);
704}
705
706
707
708
709
710
711
712
713
714
715static int
716ips_halt(struct notifier_block *nb, ulong event, void *buf)
717{
718 ips_scb_t *scb;
719 ips_ha_t *ha;
720 int i;
721
722 if ((event != SYS_RESTART) && (event != SYS_HALT) &&
723 (event != SYS_POWER_OFF))
724 return (NOTIFY_DONE);
725
726 for (i = 0; i < ips_next_controller; i++) {
727 ha = (ips_ha_t *) ips_ha[i];
728
729 if (!ha)
730 continue;
731
732 if (!ha->active)
733 continue;
734
735
736 scb = &ha->scbs[ha->max_cmds - 1];
737
738 ips_init_scb(ha, scb);
739
740 scb->timeout = ips_cmd_timeout;
741 scb->cdb[0] = IPS_CMD_FLUSH;
742
743 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
744 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
745 scb->cmd.flush_cache.state = IPS_NORM_STATE;
746 scb->cmd.flush_cache.reserved = 0;
747 scb->cmd.flush_cache.reserved2 = 0;
748 scb->cmd.flush_cache.reserved3 = 0;
749 scb->cmd.flush_cache.reserved4 = 0;
750
751 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
752
753
754 if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) ==
755 IPS_FAILURE)
756 IPS_PRINTK(KERN_WARNING, ha->pcidev,
757 "Incomplete Flush.\n");
758 else
759 IPS_PRINTK(KERN_WARNING, ha->pcidev,
760 "Flushing Complete.\n");
761 }
762
763 return (NOTIFY_OK);
764}
765
766
767
768
769
770
771
772
773
774
775int ips_eh_abort(struct scsi_cmnd *SC)
776{
777 ips_ha_t *ha;
778 ips_copp_wait_item_t *item;
779 int ret;
780 struct Scsi_Host *host;
781
782 METHOD_TRACE("ips_eh_abort", 1);
783
784 if (!SC)
785 return (FAILED);
786
787 host = SC->device->host;
788 ha = (ips_ha_t *) SC->device->host->hostdata;
789
790 if (!ha)
791 return (FAILED);
792
793 if (!ha->active)
794 return (FAILED);
795
796 spin_lock(host->host_lock);
797
798
799 item = ha->copp_waitlist.head;
800 while ((item) && (item->scsi_cmd != SC))
801 item = item->next;
802
803 if (item) {
804
805 ips_removeq_copp(&ha->copp_waitlist, item);
806 ret = (SUCCESS);
807
808
809 } else if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
810
811 ret = (SUCCESS);
812 } else {
813
814 ret = (FAILED);
815 }
816
817 spin_unlock(host->host_lock);
818 return ret;
819}
820
821
822
823
824
825
826
827
828
829
830
831
832static int __ips_eh_reset(struct scsi_cmnd *SC)
833{
834 int ret;
835 int i;
836 ips_ha_t *ha;
837 ips_scb_t *scb;
838 ips_copp_wait_item_t *item;
839
840 METHOD_TRACE("ips_eh_reset", 1);
841
842#ifdef NO_IPS_RESET
843 return (FAILED);
844#else
845
846 if (!SC) {
847 DEBUG(1, "Reset called with NULL scsi command");
848
849 return (FAILED);
850 }
851
852 ha = (ips_ha_t *) SC->device->host->hostdata;
853
854 if (!ha) {
855 DEBUG(1, "Reset called with NULL ha struct");
856
857 return (FAILED);
858 }
859
860 if (!ha->active)
861 return (FAILED);
862
863
864 item = ha->copp_waitlist.head;
865 while ((item) && (item->scsi_cmd != SC))
866 item = item->next;
867
868 if (item) {
869
870 ips_removeq_copp(&ha->copp_waitlist, item);
871 return (SUCCESS);
872 }
873
874
875 if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
876
877 return (SUCCESS);
878 }
879
880
881
882
883
884
885
886
887
888
889
890 if (ha->ioctl_reset == 0) {
891 scb = &ha->scbs[ha->max_cmds - 1];
892
893 ips_init_scb(ha, scb);
894
895 scb->timeout = ips_cmd_timeout;
896 scb->cdb[0] = IPS_CMD_FLUSH;
897
898 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
899 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
900 scb->cmd.flush_cache.state = IPS_NORM_STATE;
901 scb->cmd.flush_cache.reserved = 0;
902 scb->cmd.flush_cache.reserved2 = 0;
903 scb->cmd.flush_cache.reserved3 = 0;
904 scb->cmd.flush_cache.reserved4 = 0;
905
906
907 ret = ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_IORL);
908 if (ret == IPS_SUCCESS) {
909 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
910 "Reset Request - Flushed Cache\n");
911 return (SUCCESS);
912 }
913 }
914
915
916
917
918 ha->ioctl_reset = 0;
919
920
921
922
923
924 IPS_PRINTK(KERN_NOTICE, ha->pcidev, "Resetting controller.\n");
925 ret = (*ha->func.reset) (ha);
926
927 if (!ret) {
928 struct scsi_cmnd *scsi_cmd;
929
930 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
931 "Controller reset failed - controller now offline.\n");
932
933
934 DEBUG_VAR(1, "(%s%d) Failing active commands",
935 ips_name, ha->host_num);
936
937 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
938 scb->scsi_cmd->result = DID_ERROR << 16;
939 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
940 ips_freescb(ha, scb);
941 }
942
943
944 DEBUG_VAR(1, "(%s%d) Failing pending commands",
945 ips_name, ha->host_num);
946
947 while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
948 scsi_cmd->result = DID_ERROR;
949 scsi_cmd->scsi_done(scsi_cmd);
950 }
951
952 ha->active = FALSE;
953 return (FAILED);
954 }
955
956 if (!ips_clear_adapter(ha, IPS_INTR_IORL)) {
957 struct scsi_cmnd *scsi_cmd;
958
959 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
960 "Controller reset failed - controller now offline.\n");
961
962
963 DEBUG_VAR(1, "(%s%d) Failing active commands",
964 ips_name, ha->host_num);
965
966 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
967 scb->scsi_cmd->result = DID_ERROR << 16;
968 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
969 ips_freescb(ha, scb);
970 }
971
972
973 DEBUG_VAR(1, "(%s%d) Failing pending commands",
974 ips_name, ha->host_num);
975
976 while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
977 scsi_cmd->result = DID_ERROR << 16;
978 scsi_cmd->scsi_done(scsi_cmd);
979 }
980
981 ha->active = FALSE;
982 return (FAILED);
983 }
984
985
986 if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) {
987 ha->last_ffdc = ktime_get_real_seconds();
988 ha->reset_count++;
989 ips_ffdc_reset(ha, IPS_INTR_IORL);
990 }
991
992
993 DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num);
994
995 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
996 scb->scsi_cmd->result = DID_RESET << 16;
997 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
998 ips_freescb(ha, scb);
999 }
1000
1001
1002 for (i = 1; i < ha->nbus; i++)
1003 ha->dcdb_active[i - 1] = 0;
1004
1005
1006 ha->num_ioctl = 0;
1007
1008 ips_next(ha, IPS_INTR_IORL);
1009
1010 return (SUCCESS);
1011#endif
1012
1013}
1014
1015static int ips_eh_reset(struct scsi_cmnd *SC)
1016{
1017 int rc;
1018
1019 spin_lock_irq(SC->device->host->host_lock);
1020 rc = __ips_eh_reset(SC);
1021 spin_unlock_irq(SC->device->host->host_lock);
1022
1023 return rc;
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
1039{
1040 ips_ha_t *ha;
1041 ips_passthru_t *pt;
1042
1043 METHOD_TRACE("ips_queue", 1);
1044
1045 ha = (ips_ha_t *) SC->device->host->hostdata;
1046
1047 if (!ha)
1048 goto out_error;
1049
1050 if (!ha->active)
1051 goto out_error;
1052
1053 if (ips_is_passthru(SC)) {
1054 if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) {
1055 SC->result = DID_BUS_BUSY << 16;
1056 done(SC);
1057
1058 return (0);
1059 }
1060 } else if (ha->scb_waitlist.count == IPS_MAX_QUEUE) {
1061 SC->result = DID_BUS_BUSY << 16;
1062 done(SC);
1063
1064 return (0);
1065 }
1066
1067 SC->scsi_done = done;
1068
1069 DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)",
1070 ips_name,
1071 ha->host_num,
1072 SC->cmnd[0],
1073 SC->device->channel, SC->device->id, SC->device->lun);
1074
1075
1076 if ((scmd_channel(SC) > 0)
1077 && (scmd_id(SC) == ha->ha_id[scmd_channel(SC)])) {
1078 SC->result = DID_NO_CONNECT << 16;
1079 done(SC);
1080
1081 return (0);
1082 }
1083
1084 if (ips_is_passthru(SC)) {
1085
1086 ips_copp_wait_item_t *scratch;
1087
1088
1089
1090
1091 pt = (ips_passthru_t *) scsi_sglist(SC);
1092 if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
1093 (pt->CoppCP.cmd.reset.adapter_flag == 1)) {
1094 if (ha->scb_activelist.count != 0) {
1095 SC->result = DID_BUS_BUSY << 16;
1096 done(SC);
1097 return (0);
1098 }
1099 ha->ioctl_reset = 1;
1100 __ips_eh_reset(SC);
1101 SC->result = DID_OK << 16;
1102 SC->scsi_done(SC);
1103 return (0);
1104 }
1105
1106
1107 scratch = kmalloc(sizeof (ips_copp_wait_item_t), GFP_ATOMIC);
1108
1109 if (!scratch) {
1110 SC->result = DID_ERROR << 16;
1111 done(SC);
1112
1113 return (0);
1114 }
1115
1116 scratch->scsi_cmd = SC;
1117 scratch->next = NULL;
1118
1119 ips_putq_copp_tail(&ha->copp_waitlist, scratch);
1120 } else {
1121 ips_putq_wait_tail(&ha->scb_waitlist, SC);
1122 }
1123
1124 ips_next(ha, IPS_INTR_IORL);
1125
1126 return (0);
1127out_error:
1128 SC->result = DID_ERROR << 16;
1129 done(SC);
1130
1131 return (0);
1132}
1133
1134static DEF_SCSI_QCMD(ips_queue)
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1146 sector_t capacity, int geom[])
1147{
1148 ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
1149 int heads;
1150 int sectors;
1151 int cylinders;
1152
1153 METHOD_TRACE("ips_biosparam", 1);
1154
1155 if (!ha)
1156
1157 return (0);
1158
1159 if (!ha->active)
1160 return (0);
1161
1162 if (!ips_read_adapter_status(ha, IPS_INTR_ON))
1163
1164 return (0);
1165
1166 if ((capacity > 0x400000) && ((ha->enq->ucMiscFlag & 0x8) == 0)) {
1167 heads = IPS_NORM_HEADS;
1168 sectors = IPS_NORM_SECTORS;
1169 } else {
1170 heads = IPS_COMP_HEADS;
1171 sectors = IPS_COMP_SECTORS;
1172 }
1173
1174 cylinders = (unsigned long) capacity / (heads * sectors);
1175
1176 DEBUG_VAR(2, "Geometry: heads: %d, sectors: %d, cylinders: %d",
1177 heads, sectors, cylinders);
1178
1179 geom[0] = heads;
1180 geom[1] = sectors;
1181 geom[2] = cylinders;
1182
1183 return (0);
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195static int
1196ips_slave_configure(struct scsi_device * SDptr)
1197{
1198 ips_ha_t *ha;
1199 int min;
1200
1201 ha = IPS_HA(SDptr->host);
1202 if (SDptr->tagged_supported && SDptr->type == TYPE_DISK) {
1203 min = ha->max_cmds / 2;
1204 if (ha->enq->ucLogDriveCount <= 2)
1205 min = ha->max_cmds - 1;
1206 scsi_change_queue_depth(SDptr, min);
1207 }
1208
1209 SDptr->skip_ms_page_8 = 1;
1210 SDptr->skip_ms_page_3f = 1;
1211 return 0;
1212}
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223static irqreturn_t
1224do_ipsintr(int irq, void *dev_id)
1225{
1226 ips_ha_t *ha;
1227 struct Scsi_Host *host;
1228 int irqstatus;
1229
1230 METHOD_TRACE("do_ipsintr", 2);
1231
1232 ha = (ips_ha_t *) dev_id;
1233 if (!ha)
1234 return IRQ_NONE;
1235 host = ips_sh[ha->host_num];
1236
1237 if (!host) {
1238 (*ha->func.intr) (ha);
1239 return IRQ_HANDLED;
1240 }
1241
1242 spin_lock(host->host_lock);
1243
1244 if (!ha->active) {
1245 spin_unlock(host->host_lock);
1246 return IRQ_HANDLED;
1247 }
1248
1249 irqstatus = (*ha->func.intr) (ha);
1250
1251 spin_unlock(host->host_lock);
1252
1253
1254 ips_next(ha, IPS_INTR_ON);
1255 return IRQ_RETVAL(irqstatus);
1256}
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269int
1270ips_intr_copperhead(ips_ha_t * ha)
1271{
1272 ips_stat_t *sp;
1273 ips_scb_t *scb;
1274 IPS_STATUS cstatus;
1275 int intrstatus;
1276
1277 METHOD_TRACE("ips_intr", 2);
1278
1279 if (!ha)
1280 return 0;
1281
1282 if (!ha->active)
1283 return 0;
1284
1285 intrstatus = (*ha->func.isintr) (ha);
1286
1287 if (!intrstatus) {
1288
1289
1290
1291
1292 return 0;
1293 }
1294
1295 while (TRUE) {
1296 sp = &ha->sp;
1297
1298 intrstatus = (*ha->func.isintr) (ha);
1299
1300 if (!intrstatus)
1301 break;
1302 else
1303 cstatus.value = (*ha->func.statupd) (ha);
1304
1305 if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
1306
1307 continue;
1308 }
1309
1310 ips_chkstatus(ha, &cstatus);
1311 scb = (ips_scb_t *) sp->scb_addr;
1312
1313
1314
1315
1316
1317 (*scb->callback) (ha, scb);
1318 }
1319 return 1;
1320}
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333int
1334ips_intr_morpheus(ips_ha_t * ha)
1335{
1336 ips_stat_t *sp;
1337 ips_scb_t *scb;
1338 IPS_STATUS cstatus;
1339 int intrstatus;
1340
1341 METHOD_TRACE("ips_intr_morpheus", 2);
1342
1343 if (!ha)
1344 return 0;
1345
1346 if (!ha->active)
1347 return 0;
1348
1349 intrstatus = (*ha->func.isintr) (ha);
1350
1351 if (!intrstatus) {
1352
1353
1354
1355
1356 return 0;
1357 }
1358
1359 while (TRUE) {
1360 sp = &ha->sp;
1361
1362 intrstatus = (*ha->func.isintr) (ha);
1363
1364 if (!intrstatus)
1365 break;
1366 else
1367 cstatus.value = (*ha->func.statupd) (ha);
1368
1369 if (cstatus.value == 0xffffffff)
1370
1371 break;
1372
1373 if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
1374 IPS_PRINTK(KERN_WARNING, ha->pcidev,
1375 "Spurious interrupt; no ccb.\n");
1376
1377 continue;
1378 }
1379
1380 ips_chkstatus(ha, &cstatus);
1381 scb = (ips_scb_t *) sp->scb_addr;
1382
1383
1384
1385
1386
1387 (*scb->callback) (ha, scb);
1388 }
1389 return 1;
1390}
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401static const char *
1402ips_info(struct Scsi_Host *SH)
1403{
1404 static char buffer[256];
1405 char *bp;
1406 ips_ha_t *ha;
1407
1408 METHOD_TRACE("ips_info", 1);
1409
1410 ha = IPS_HA(SH);
1411
1412 if (!ha)
1413 return (NULL);
1414
1415 bp = &buffer[0];
1416 memset(bp, 0, sizeof (buffer));
1417
1418 sprintf(bp, "%s%s%s Build %d", "IBM PCI ServeRAID ",
1419 IPS_VERSION_HIGH, IPS_VERSION_LOW, IPS_BUILD_IDENT);
1420
1421 if (ha->ad_type > 0 && ha->ad_type <= MAX_ADAPTER_NAME) {
1422 strcat(bp, " <");
1423 strcat(bp, ips_adapter_name[ha->ad_type - 1]);
1424 strcat(bp, ">");
1425 }
1426
1427 return (bp);
1428}
1429
1430static int
1431ips_write_info(struct Scsi_Host *host, char *buffer, int length)
1432{
1433 int i;
1434 ips_ha_t *ha = NULL;
1435
1436
1437 for (i = 0; i < ips_next_controller; i++) {
1438 if (ips_sh[i]) {
1439 if (ips_sh[i] == host) {
1440 ha = (ips_ha_t *) ips_sh[i]->hostdata;
1441 break;
1442 }
1443 }
1444 }
1445
1446 if (!ha)
1447 return (-EINVAL);
1448
1449 return 0;
1450}
1451
1452static int
1453ips_show_info(struct seq_file *m, struct Scsi_Host *host)
1454{
1455 int i;
1456 ips_ha_t *ha = NULL;
1457
1458
1459 for (i = 0; i < ips_next_controller; i++) {
1460 if (ips_sh[i]) {
1461 if (ips_sh[i] == host) {
1462 ha = (ips_ha_t *) ips_sh[i]->hostdata;
1463 break;
1464 }
1465 }
1466 }
1467
1468 if (!ha)
1469 return (-EINVAL);
1470
1471 return ips_host_info(ha, m);
1472}
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487static int ips_is_passthru(struct scsi_cmnd *SC)
1488{
1489 unsigned long flags;
1490
1491 METHOD_TRACE("ips_is_passthru", 1);
1492
1493 if (!SC)
1494 return (0);
1495
1496 if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
1497 (SC->device->channel == 0) &&
1498 (SC->device->id == IPS_ADAPTER_ID) &&
1499 (SC->device->lun == 0) && scsi_sglist(SC)) {
1500 struct scatterlist *sg = scsi_sglist(SC);
1501 char *buffer;
1502
1503
1504
1505 local_irq_save(flags);
1506 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
1507 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
1508 buffer[2] == 'P' && buffer[3] == 'P') {
1509 kunmap_atomic(buffer - sg->offset);
1510 local_irq_restore(flags);
1511 return 1;
1512 }
1513 kunmap_atomic(buffer - sg->offset);
1514 local_irq_restore(flags);
1515 }
1516 return 0;
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527static int
1528ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
1529{
1530 void *bigger_buf;
1531 dma_addr_t dma_busaddr;
1532
1533 if (ha->ioctl_data && length <= ha->ioctl_len)
1534 return 0;
1535
1536 bigger_buf = dma_alloc_coherent(&ha->pcidev->dev, length, &dma_busaddr,
1537 GFP_KERNEL);
1538 if (bigger_buf) {
1539
1540 dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
1541 ha->ioctl_data, ha->ioctl_busaddr);
1542
1543 ha->ioctl_data = (char *) bigger_buf;
1544 ha->ioctl_len = length;
1545 ha->ioctl_busaddr = dma_busaddr;
1546 } else {
1547 return -1;
1548 }
1549 return 0;
1550}
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561static int
1562ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
1563{
1564 ips_passthru_t *pt;
1565 int length = 0;
1566 int i, ret;
1567 struct scatterlist *sg = scsi_sglist(SC);
1568
1569 METHOD_TRACE("ips_make_passthru", 1);
1570
1571 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
1572 length += sg->length;
1573
1574 if (length < sizeof (ips_passthru_t)) {
1575
1576 DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
1577 ips_name, ha->host_num);
1578 return (IPS_FAILURE);
1579 }
1580 if (ips_alloc_passthru_buffer(ha, length)) {
1581
1582
1583 if (ha->ioctl_data) {
1584 pt = (ips_passthru_t *) ha->ioctl_data;
1585 ips_scmd_buf_read(SC, pt, sizeof (ips_passthru_t));
1586 pt->BasicStatus = 0x0B;
1587 pt->ExtendedStatus = 0x00;
1588 ips_scmd_buf_write(SC, pt, sizeof (ips_passthru_t));
1589 }
1590 return IPS_FAILURE;
1591 }
1592 ha->ioctl_datasize = length;
1593
1594 ips_scmd_buf_read(SC, ha->ioctl_data, ha->ioctl_datasize);
1595 pt = (ips_passthru_t *) ha->ioctl_data;
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607 switch (pt->CoppCmd) {
1608 case IPS_NUMCTRLS:
1609 memcpy(ha->ioctl_data + sizeof (ips_passthru_t),
1610 &ips_num_controllers, sizeof (int));
1611 ips_scmd_buf_write(SC, ha->ioctl_data,
1612 sizeof (ips_passthru_t) + sizeof (int));
1613 SC->result = DID_OK << 16;
1614
1615 return (IPS_SUCCESS_IMM);
1616
1617 case IPS_COPPUSRCMD:
1618 case IPS_COPPIOCCMD:
1619 if (SC->cmnd[0] == IPS_IOCTL_COMMAND) {
1620 if (length < (sizeof (ips_passthru_t) + pt->CmdBSize)) {
1621
1622 DEBUG_VAR(1,
1623 "(%s%d) Passthru structure wrong size",
1624 ips_name, ha->host_num);
1625
1626 return (IPS_FAILURE);
1627 }
1628
1629 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
1630 pt->CoppCP.cmd.flashfw.op_code ==
1631 IPS_CMD_RW_BIOSFW) {
1632 ret = ips_flash_copperhead(ha, pt, scb);
1633 ips_scmd_buf_write(SC, ha->ioctl_data,
1634 sizeof (ips_passthru_t));
1635 return ret;
1636 }
1637 if (ips_usrcmd(ha, pt, scb))
1638 return (IPS_SUCCESS);
1639 else
1640 return (IPS_FAILURE);
1641 }
1642
1643 break;
1644
1645 }
1646
1647 return (IPS_FAILURE);
1648}
1649
1650
1651
1652
1653
1654
1655static int
1656ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1657{
1658 int datasize;
1659
1660
1661
1662 if (IPS_IS_TROMBONE(ha) && pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) {
1663 if (ips_usrcmd(ha, pt, scb))
1664 return IPS_SUCCESS;
1665 else
1666 return IPS_FAILURE;
1667 }
1668 pt->BasicStatus = 0x0B;
1669 pt->ExtendedStatus = 0;
1670 scb->scsi_cmd->result = DID_OK << 16;
1671
1672
1673 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1674 pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
1675 pt->BasicStatus = 0;
1676 return ips_flash_bios(ha, pt, scb);
1677 } else if (pt->CoppCP.cmd.flashfw.packet_num == 0) {
1678 if (ips_FlashData && !test_and_set_bit(0, &ips_FlashDataInUse)){
1679 ha->flash_data = ips_FlashData;
1680 ha->flash_busaddr = ips_flashbusaddr;
1681 ha->flash_len = PAGE_SIZE << 7;
1682 ha->flash_datasize = 0;
1683 } else if (!ha->flash_data) {
1684 datasize = pt->CoppCP.cmd.flashfw.total_packets *
1685 pt->CoppCP.cmd.flashfw.count;
1686 ha->flash_data = dma_alloc_coherent(&ha->pcidev->dev,
1687 datasize, &ha->flash_busaddr, GFP_KERNEL);
1688 if (!ha->flash_data){
1689 printk(KERN_WARNING "Unable to allocate a flash buffer\n");
1690 return IPS_FAILURE;
1691 }
1692 ha->flash_datasize = 0;
1693 ha->flash_len = datasize;
1694 } else
1695 return IPS_FAILURE;
1696 } else {
1697 if (pt->CoppCP.cmd.flashfw.count + ha->flash_datasize >
1698 ha->flash_len) {
1699 ips_free_flash_copperhead(ha);
1700 IPS_PRINTK(KERN_WARNING, ha->pcidev,
1701 "failed size sanity check\n");
1702 return IPS_FAILURE;
1703 }
1704 }
1705 if (!ha->flash_data)
1706 return IPS_FAILURE;
1707 pt->BasicStatus = 0;
1708 memcpy(&ha->flash_data[ha->flash_datasize], pt + 1,
1709 pt->CoppCP.cmd.flashfw.count);
1710 ha->flash_datasize += pt->CoppCP.cmd.flashfw.count;
1711 if (pt->CoppCP.cmd.flashfw.packet_num ==
1712 pt->CoppCP.cmd.flashfw.total_packets - 1) {
1713 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE)
1714 return ips_flash_bios(ha, pt, scb);
1715 else if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE)
1716 return ips_flash_firmware(ha, pt, scb);
1717 }
1718 return IPS_SUCCESS_IMM;
1719}
1720
1721
1722
1723
1724
1725
1726static int
1727ips_flash_bios(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1728{
1729
1730 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1731 pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_BIOS) {
1732 if ((!ha->func.programbios) || (!ha->func.erasebios) ||
1733 (!ha->func.verifybios))
1734 goto error;
1735 if ((*ha->func.erasebios) (ha)) {
1736 DEBUG_VAR(1,
1737 "(%s%d) flash bios failed - unable to erase flash",
1738 ips_name, ha->host_num);
1739 goto error;
1740 } else
1741 if ((*ha->func.programbios) (ha,
1742 ha->flash_data +
1743 IPS_BIOS_HEADER,
1744 ha->flash_datasize -
1745 IPS_BIOS_HEADER, 0)) {
1746 DEBUG_VAR(1,
1747 "(%s%d) flash bios failed - unable to flash",
1748 ips_name, ha->host_num);
1749 goto error;
1750 } else
1751 if ((*ha->func.verifybios) (ha,
1752 ha->flash_data +
1753 IPS_BIOS_HEADER,
1754 ha->flash_datasize -
1755 IPS_BIOS_HEADER, 0)) {
1756 DEBUG_VAR(1,
1757 "(%s%d) flash bios failed - unable to verify flash",
1758 ips_name, ha->host_num);
1759 goto error;
1760 }
1761 ips_free_flash_copperhead(ha);
1762 return IPS_SUCCESS_IMM;
1763 } else if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1764 pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
1765 if (!ha->func.erasebios)
1766 goto error;
1767 if ((*ha->func.erasebios) (ha)) {
1768 DEBUG_VAR(1,
1769 "(%s%d) flash bios failed - unable to erase flash",
1770 ips_name, ha->host_num);
1771 goto error;
1772 }
1773 return IPS_SUCCESS_IMM;
1774 }
1775 error:
1776 pt->BasicStatus = 0x0B;
1777 pt->ExtendedStatus = 0x00;
1778 ips_free_flash_copperhead(ha);
1779 return IPS_FAILURE;
1780}
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790static int
1791ips_fill_scb_sg_single(ips_ha_t * ha, dma_addr_t busaddr,
1792 ips_scb_t * scb, int indx, unsigned int e_len)
1793{
1794
1795 int ret_val = 0;
1796
1797 if ((scb->data_len + e_len) > ha->max_xfer) {
1798 e_len = ha->max_xfer - scb->data_len;
1799 scb->breakup = indx;
1800 ++scb->sg_break;
1801 ret_val = -1;
1802 } else {
1803 scb->breakup = 0;
1804 scb->sg_break = 0;
1805 }
1806 if (IPS_USE_ENH_SGLIST(ha)) {
1807 scb->sg_list.enh_list[indx].address_lo =
1808 cpu_to_le32(lower_32_bits(busaddr));
1809 scb->sg_list.enh_list[indx].address_hi =
1810 cpu_to_le32(upper_32_bits(busaddr));
1811 scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len);
1812 } else {
1813 scb->sg_list.std_list[indx].address =
1814 cpu_to_le32(lower_32_bits(busaddr));
1815 scb->sg_list.std_list[indx].length = cpu_to_le32(e_len);
1816 }
1817
1818 ++scb->sg_len;
1819 scb->data_len += e_len;
1820 return ret_val;
1821}
1822
1823
1824
1825
1826
1827
1828static int
1829ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1830{
1831 IPS_SG_LIST sg_list;
1832 uint32_t cmd_busaddr;
1833
1834 if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE &&
1835 pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_FW) {
1836 memset(&pt->CoppCP.cmd, 0, sizeof (IPS_HOST_COMMAND));
1837 pt->CoppCP.cmd.flashfw.op_code = IPS_CMD_DOWNLOAD;
1838 pt->CoppCP.cmd.flashfw.count = cpu_to_le32(ha->flash_datasize);
1839 } else {
1840 pt->BasicStatus = 0x0B;
1841 pt->ExtendedStatus = 0x00;
1842 ips_free_flash_copperhead(ha);
1843 return IPS_FAILURE;
1844 }
1845
1846 sg_list.list = scb->sg_list.list;
1847 cmd_busaddr = scb->scb_busaddr;
1848
1849 memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
1850
1851 scb->sg_list.list = sg_list.list;
1852 scb->scb_busaddr = cmd_busaddr;
1853 scb->bus = scb->scsi_cmd->device->channel;
1854 scb->target_id = scb->scsi_cmd->device->id;
1855 scb->lun = scb->scsi_cmd->device->lun;
1856 scb->sg_len = 0;
1857 scb->data_len = 0;
1858 scb->flags = 0;
1859 scb->op_code = 0;
1860 scb->callback = ipsintr_done;
1861 scb->timeout = ips_cmd_timeout;
1862
1863 scb->data_len = ha->flash_datasize;
1864 scb->data_busaddr =
1865 dma_map_single(&ha->pcidev->dev, ha->flash_data, scb->data_len,
1866 IPS_DMA_DIR(scb));
1867 scb->flags |= IPS_SCB_MAP_SINGLE;
1868 scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
1869 scb->cmd.flashfw.buffer_addr = cpu_to_le32(scb->data_busaddr);
1870 if (pt->TimeOut)
1871 scb->timeout = pt->TimeOut;
1872 scb->scsi_cmd->result = DID_OK << 16;
1873 return IPS_SUCCESS;
1874}
1875
1876
1877
1878
1879
1880
1881static void
1882ips_free_flash_copperhead(ips_ha_t * ha)
1883{
1884 if (ha->flash_data == ips_FlashData)
1885 test_and_clear_bit(0, &ips_FlashDataInUse);
1886 else if (ha->flash_data)
1887 dma_free_coherent(&ha->pcidev->dev, ha->flash_len,
1888 ha->flash_data, ha->flash_busaddr);
1889 ha->flash_data = NULL;
1890}
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901static int
1902ips_usrcmd(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1903{
1904 IPS_SG_LIST sg_list;
1905 uint32_t cmd_busaddr;
1906
1907 METHOD_TRACE("ips_usrcmd", 1);
1908
1909 if ((!scb) || (!pt) || (!ha))
1910 return (0);
1911
1912
1913 sg_list.list = scb->sg_list.list;
1914 cmd_busaddr = scb->scb_busaddr;
1915
1916 memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
1917 memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof (IPS_DCDB_TABLE));
1918
1919
1920 scb->sg_list.list = sg_list.list;
1921 scb->scb_busaddr = cmd_busaddr;
1922 scb->bus = scb->scsi_cmd->device->channel;
1923 scb->target_id = scb->scsi_cmd->device->id;
1924 scb->lun = scb->scsi_cmd->device->lun;
1925 scb->sg_len = 0;
1926 scb->data_len = 0;
1927 scb->flags = 0;
1928 scb->op_code = 0;
1929 scb->callback = ipsintr_done;
1930 scb->timeout = ips_cmd_timeout;
1931 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
1932
1933
1934 if ((scb->cmd.basic_io.op_code == IPS_CMD_READ_SG) ||
1935 (scb->cmd.basic_io.op_code == IPS_CMD_WRITE_SG) ||
1936 (scb->cmd.basic_io.op_code == IPS_CMD_DCDB_SG))
1937 return (0);
1938
1939 if (pt->CmdBSize) {
1940 scb->data_len = pt->CmdBSize;
1941 scb->data_busaddr = ha->ioctl_busaddr + sizeof (ips_passthru_t);
1942 } else {
1943 scb->data_busaddr = 0L;
1944 }
1945
1946 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
1947 scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
1948 (unsigned long) &scb->
1949 dcdb -
1950 (unsigned long) scb);
1951
1952 if (pt->CmdBSize) {
1953 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
1954 scb->dcdb.buffer_pointer =
1955 cpu_to_le32(scb->data_busaddr);
1956 else
1957 scb->cmd.basic_io.sg_addr =
1958 cpu_to_le32(scb->data_busaddr);
1959 }
1960
1961
1962 if (pt->TimeOut) {
1963 scb->timeout = pt->TimeOut;
1964
1965 if (pt->TimeOut <= 10)
1966 scb->dcdb.cmd_attribute |= IPS_TIMEOUT10;
1967 else if (pt->TimeOut <= 60)
1968 scb->dcdb.cmd_attribute |= IPS_TIMEOUT60;
1969 else
1970 scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M;
1971 }
1972
1973
1974 scb->scsi_cmd->result = DID_OK << 16;
1975
1976
1977 return (1);
1978}
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989static void
1990ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
1991{
1992 ips_passthru_t *pt;
1993
1994 METHOD_TRACE("ips_cleanup_passthru", 1);
1995
1996 if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
1997 DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
1998 ips_name, ha->host_num);
1999
2000 return;
2001 }
2002 pt = (ips_passthru_t *) ha->ioctl_data;
2003
2004
2005 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
2006 memcpy(&pt->CoppCP.dcdb, &scb->dcdb, sizeof (IPS_DCDB_TABLE));
2007
2008 pt->BasicStatus = scb->basic_status;
2009 pt->ExtendedStatus = scb->extended_status;
2010 pt->AdapterType = ha->ad_type;
2011
2012 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
2013 (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD ||
2014 scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW))
2015 ips_free_flash_copperhead(ha);
2016
2017 ips_scmd_buf_write(scb->scsi_cmd, ha->ioctl_data, ha->ioctl_datasize);
2018}
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029static int
2030ips_host_info(ips_ha_t *ha, struct seq_file *m)
2031{
2032 METHOD_TRACE("ips_host_info", 1);
2033
2034 seq_puts(m, "\nIBM ServeRAID General Information:\n\n");
2035
2036 if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) &&
2037 (le16_to_cpu(ha->nvram->adapter_type) != 0))
2038 seq_printf(m, "\tController Type : %s\n",
2039 ips_adapter_name[ha->ad_type - 1]);
2040 else
2041 seq_puts(m, "\tController Type : Unknown\n");
2042
2043 if (ha->io_addr)
2044 seq_printf(m,
2045 "\tIO region : 0x%x (%d bytes)\n",
2046 ha->io_addr, ha->io_len);
2047
2048 if (ha->mem_addr) {
2049 seq_printf(m,
2050 "\tMemory region : 0x%x (%d bytes)\n",
2051 ha->mem_addr, ha->mem_len);
2052 seq_printf(m,
2053 "\tShared memory address : 0x%lx\n",
2054 (unsigned long)ha->mem_ptr);
2055 }
2056
2057 seq_printf(m, "\tIRQ number : %d\n", ha->pcidev->irq);
2058
2059
2060
2061
2062 if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) {
2063 if (ha->nvram->bios_low[3] == 0) {
2064 seq_printf(m,
2065 "\tBIOS Version : %c%c%c%c%c%c%c\n",
2066 ha->nvram->bios_high[0], ha->nvram->bios_high[1],
2067 ha->nvram->bios_high[2], ha->nvram->bios_high[3],
2068 ha->nvram->bios_low[0], ha->nvram->bios_low[1],
2069 ha->nvram->bios_low[2]);
2070
2071 } else {
2072 seq_printf(m,
2073 "\tBIOS Version : %c%c%c%c%c%c%c%c\n",
2074 ha->nvram->bios_high[0], ha->nvram->bios_high[1],
2075 ha->nvram->bios_high[2], ha->nvram->bios_high[3],
2076 ha->nvram->bios_low[0], ha->nvram->bios_low[1],
2077 ha->nvram->bios_low[2], ha->nvram->bios_low[3]);
2078 }
2079
2080 }
2081
2082 if (ha->enq->CodeBlkVersion[7] == 0) {
2083 seq_printf(m,
2084 "\tFirmware Version : %c%c%c%c%c%c%c\n",
2085 ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
2086 ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
2087 ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
2088 ha->enq->CodeBlkVersion[6]);
2089 } else {
2090 seq_printf(m,
2091 "\tFirmware Version : %c%c%c%c%c%c%c%c\n",
2092 ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
2093 ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
2094 ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
2095 ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]);
2096 }
2097
2098 if (ha->enq->BootBlkVersion[7] == 0) {
2099 seq_printf(m,
2100 "\tBoot Block Version : %c%c%c%c%c%c%c\n",
2101 ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
2102 ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
2103 ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
2104 ha->enq->BootBlkVersion[6]);
2105 } else {
2106 seq_printf(m,
2107 "\tBoot Block Version : %c%c%c%c%c%c%c%c\n",
2108 ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
2109 ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
2110 ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
2111 ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]);
2112 }
2113
2114 seq_printf(m, "\tDriver Version : %s%s\n",
2115 IPS_VERSION_HIGH, IPS_VERSION_LOW);
2116
2117 seq_printf(m, "\tDriver Build : %d\n",
2118 IPS_BUILD_IDENT);
2119
2120 seq_printf(m, "\tMax Physical Devices : %d\n",
2121 ha->enq->ucMaxPhysicalDevices);
2122 seq_printf(m, "\tMax Active Commands : %d\n",
2123 ha->max_cmds);
2124 seq_printf(m, "\tCurrent Queued Commands : %d\n",
2125 ha->scb_waitlist.count);
2126 seq_printf(m, "\tCurrent Active Commands : %d\n",
2127 ha->scb_activelist.count - ha->num_ioctl);
2128 seq_printf(m, "\tCurrent Queued PT Commands : %d\n",
2129 ha->copp_waitlist.count);
2130 seq_printf(m, "\tCurrent Active PT Commands : %d\n",
2131 ha->num_ioctl);
2132
2133 seq_putc(m, '\n');
2134
2135 return 0;
2136}
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147static void
2148ips_identify_controller(ips_ha_t * ha)
2149{
2150 METHOD_TRACE("ips_identify_controller", 1);
2151
2152 switch (ha->pcidev->device) {
2153 case IPS_DEVICEID_COPPERHEAD:
2154 if (ha->pcidev->revision <= IPS_REVID_SERVERAID) {
2155 ha->ad_type = IPS_ADTYPE_SERVERAID;
2156 } else if (ha->pcidev->revision == IPS_REVID_SERVERAID2) {
2157 ha->ad_type = IPS_ADTYPE_SERVERAID2;
2158 } else if (ha->pcidev->revision == IPS_REVID_NAVAJO) {
2159 ha->ad_type = IPS_ADTYPE_NAVAJO;
2160 } else if ((ha->pcidev->revision == IPS_REVID_SERVERAID2)
2161 && (ha->slot_num == 0)) {
2162 ha->ad_type = IPS_ADTYPE_KIOWA;
2163 } else if ((ha->pcidev->revision >= IPS_REVID_CLARINETP1) &&
2164 (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) {
2165 if (ha->enq->ucMaxPhysicalDevices == 15)
2166 ha->ad_type = IPS_ADTYPE_SERVERAID3L;
2167 else
2168 ha->ad_type = IPS_ADTYPE_SERVERAID3;
2169 } else if ((ha->pcidev->revision >= IPS_REVID_TROMBONE32) &&
2170 (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) {
2171 ha->ad_type = IPS_ADTYPE_SERVERAID4H;
2172 }
2173 break;
2174
2175 case IPS_DEVICEID_MORPHEUS:
2176 switch (ha->pcidev->subsystem_device) {
2177 case IPS_SUBDEVICEID_4L:
2178 ha->ad_type = IPS_ADTYPE_SERVERAID4L;
2179 break;
2180
2181 case IPS_SUBDEVICEID_4M:
2182 ha->ad_type = IPS_ADTYPE_SERVERAID4M;
2183 break;
2184
2185 case IPS_SUBDEVICEID_4MX:
2186 ha->ad_type = IPS_ADTYPE_SERVERAID4MX;
2187 break;
2188
2189 case IPS_SUBDEVICEID_4LX:
2190 ha->ad_type = IPS_ADTYPE_SERVERAID4LX;
2191 break;
2192
2193 case IPS_SUBDEVICEID_5I2:
2194 ha->ad_type = IPS_ADTYPE_SERVERAID5I2;
2195 break;
2196
2197 case IPS_SUBDEVICEID_5I1:
2198 ha->ad_type = IPS_ADTYPE_SERVERAID5I1;
2199 break;
2200 }
2201
2202 break;
2203
2204 case IPS_DEVICEID_MARCO:
2205 switch (ha->pcidev->subsystem_device) {
2206 case IPS_SUBDEVICEID_6M:
2207 ha->ad_type = IPS_ADTYPE_SERVERAID6M;
2208 break;
2209 case IPS_SUBDEVICEID_6I:
2210 ha->ad_type = IPS_ADTYPE_SERVERAID6I;
2211 break;
2212 case IPS_SUBDEVICEID_7k:
2213 ha->ad_type = IPS_ADTYPE_SERVERAID7k;
2214 break;
2215 case IPS_SUBDEVICEID_7M:
2216 ha->ad_type = IPS_ADTYPE_SERVERAID7M;
2217 break;
2218 }
2219 break;
2220 }
2221}
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232static void
2233ips_get_bios_version(ips_ha_t * ha, int intr)
2234{
2235 ips_scb_t *scb;
2236 int ret;
2237 uint8_t major;
2238 uint8_t minor;
2239 uint8_t subminor;
2240 uint8_t *buffer;
2241
2242 METHOD_TRACE("ips_get_bios_version", 1);
2243
2244 major = 0;
2245 minor = 0;
2246
2247 memcpy(ha->bios_version, " ?", 8);
2248
2249 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) {
2250 if (IPS_USE_MEMIO(ha)) {
2251
2252
2253
2254 writel(0, ha->mem_ptr + IPS_REG_FLAP);
2255 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2256 udelay(25);
2257
2258 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
2259 return;
2260
2261 writel(1, ha->mem_ptr + IPS_REG_FLAP);
2262 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2263 udelay(25);
2264
2265 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
2266 return;
2267
2268
2269 writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP);
2270 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2271 udelay(25);
2272
2273 major = readb(ha->mem_ptr + IPS_REG_FLDP);
2274
2275
2276 writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP);
2277 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2278 udelay(25);
2279 minor = readb(ha->mem_ptr + IPS_REG_FLDP);
2280
2281
2282 writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP);
2283 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2284 udelay(25);
2285 subminor = readb(ha->mem_ptr + IPS_REG_FLDP);
2286
2287 } else {
2288
2289
2290
2291 outl(0, ha->io_addr + IPS_REG_FLAP);
2292 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2293 udelay(25);
2294
2295 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
2296 return;
2297
2298 outl(1, ha->io_addr + IPS_REG_FLAP);
2299 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2300 udelay(25);
2301
2302 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
2303 return;
2304
2305
2306 outl(0x1FF, ha->io_addr + IPS_REG_FLAP);
2307 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2308 udelay(25);
2309
2310 major = inb(ha->io_addr + IPS_REG_FLDP);
2311
2312
2313 outl(0x1FE, ha->io_addr + IPS_REG_FLAP);
2314 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2315 udelay(25);
2316
2317 minor = inb(ha->io_addr + IPS_REG_FLDP);
2318
2319
2320 outl(0x1FD, ha->io_addr + IPS_REG_FLAP);
2321 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2322 udelay(25);
2323
2324 subminor = inb(ha->io_addr + IPS_REG_FLDP);
2325
2326 }
2327 } else {
2328
2329
2330 buffer = ha->ioctl_data;
2331
2332 memset(buffer, 0, 0x1000);
2333
2334 scb = &ha->scbs[ha->max_cmds - 1];
2335
2336 ips_init_scb(ha, scb);
2337
2338 scb->timeout = ips_cmd_timeout;
2339 scb->cdb[0] = IPS_CMD_RW_BIOSFW;
2340
2341 scb->cmd.flashfw.op_code = IPS_CMD_RW_BIOSFW;
2342 scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
2343 scb->cmd.flashfw.type = 1;
2344 scb->cmd.flashfw.direction = 0;
2345 scb->cmd.flashfw.count = cpu_to_le32(0x800);
2346 scb->cmd.flashfw.total_packets = 1;
2347 scb->cmd.flashfw.packet_num = 0;
2348 scb->data_len = 0x1000;
2349 scb->cmd.flashfw.buffer_addr = ha->ioctl_busaddr;
2350
2351
2352 if (((ret =
2353 ips_send_wait(ha, scb, ips_cmd_timeout,
2354 intr)) == IPS_FAILURE)
2355 || (ret == IPS_SUCCESS_IMM)
2356 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
2357
2358
2359 return;
2360 }
2361
2362 if ((buffer[0xC0] == 0x55) && (buffer[0xC1] == 0xAA)) {
2363 major = buffer[0x1ff + 0xC0];
2364 minor = buffer[0x1fe + 0xC0];
2365 subminor = buffer[0x1fd + 0xC0];
2366 } else {
2367 return;
2368 }
2369 }
2370
2371 ha->bios_version[0] = hex_asc_upper_hi(major);
2372 ha->bios_version[1] = '.';
2373 ha->bios_version[2] = hex_asc_upper_lo(major);
2374 ha->bios_version[3] = hex_asc_upper_lo(subminor);
2375 ha->bios_version[4] = '.';
2376 ha->bios_version[5] = hex_asc_upper_hi(minor);
2377 ha->bios_version[6] = hex_asc_upper_lo(minor);
2378 ha->bios_version[7] = 0;
2379}
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392static int
2393ips_hainit(ips_ha_t * ha)
2394{
2395 int i;
2396
2397 METHOD_TRACE("ips_hainit", 1);
2398
2399 if (!ha)
2400 return (0);
2401
2402 if (ha->func.statinit)
2403 (*ha->func.statinit) (ha);
2404
2405 if (ha->func.enableint)
2406 (*ha->func.enableint) (ha);
2407
2408
2409 ha->reset_count = 1;
2410 ha->last_ffdc = ktime_get_real_seconds();
2411 ips_ffdc_reset(ha, IPS_INTR_IORL);
2412
2413 if (!ips_read_config(ha, IPS_INTR_IORL)) {
2414 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2415 "unable to read config from controller.\n");
2416
2417 return (0);
2418 }
2419
2420 if (!ips_read_adapter_status(ha, IPS_INTR_IORL)) {
2421 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2422 "unable to read controller status.\n");
2423
2424 return (0);
2425 }
2426
2427
2428 ips_identify_controller(ha);
2429
2430 if (!ips_read_subsystem_parameters(ha, IPS_INTR_IORL)) {
2431 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2432 "unable to read subsystem parameters.\n");
2433
2434 return (0);
2435 }
2436
2437
2438 if (!ips_write_driver_status(ha, IPS_INTR_IORL)) {
2439 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2440 "unable to write driver info to controller.\n");
2441
2442 return (0);
2443 }
2444
2445
2446 if ((ha->conf->ucLogDriveCount > 0) && (ha->requires_esl == 1))
2447 ips_clear_adapter(ha, IPS_INTR_IORL);
2448
2449
2450 ha->ntargets = IPS_MAX_TARGETS + 1;
2451 ha->nlun = 1;
2452 ha->nbus = (ha->enq->ucMaxPhysicalDevices / IPS_MAX_TARGETS) + 1;
2453
2454 switch (ha->conf->logical_drive[0].ucStripeSize) {
2455 case 4:
2456 ha->max_xfer = 0x10000;
2457 break;
2458
2459 case 5:
2460 ha->max_xfer = 0x20000;
2461 break;
2462
2463 case 6:
2464 ha->max_xfer = 0x40000;
2465 break;
2466
2467 case 7:
2468 default:
2469 ha->max_xfer = 0x80000;
2470 break;
2471 }
2472
2473
2474 if (le32_to_cpu(ha->subsys->param[4]) & 0x1) {
2475
2476 ha->max_cmds = ha->enq->ucConcurrentCmdCount;
2477 } else {
2478
2479 switch (ha->conf->logical_drive[0].ucStripeSize) {
2480 case 4:
2481 ha->max_cmds = 32;
2482 break;
2483
2484 case 5:
2485 ha->max_cmds = 16;
2486 break;
2487
2488 case 6:
2489 ha->max_cmds = 8;
2490 break;
2491
2492 case 7:
2493 default:
2494 ha->max_cmds = 4;
2495 break;
2496 }
2497 }
2498
2499
2500 if ((ha->ad_type == IPS_ADTYPE_SERVERAID3L) ||
2501 (ha->ad_type == IPS_ADTYPE_SERVERAID4L) ||
2502 (ha->ad_type == IPS_ADTYPE_SERVERAID4LX)) {
2503 if ((ha->max_cmds > MaxLiteCmds) && (MaxLiteCmds))
2504 ha->max_cmds = MaxLiteCmds;
2505 }
2506
2507
2508 ha->ha_id[0] = IPS_ADAPTER_ID;
2509 for (i = 1; i < ha->nbus; i++) {
2510 ha->ha_id[i] = ha->conf->init_id[i - 1] & 0x1f;
2511 ha->dcdb_active[i - 1] = 0;
2512 }
2513
2514 return (1);
2515}
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526static void
2527ips_next(ips_ha_t * ha, int intr)
2528{
2529 ips_scb_t *scb;
2530 struct scsi_cmnd *SC;
2531 struct scsi_cmnd *p;
2532 struct scsi_cmnd *q;
2533 ips_copp_wait_item_t *item;
2534 int ret;
2535 struct Scsi_Host *host;
2536 METHOD_TRACE("ips_next", 1);
2537
2538 if (!ha)
2539 return;
2540 host = ips_sh[ha->host_num];
2541
2542
2543
2544
2545 if (intr == IPS_INTR_ON)
2546 spin_lock(host->host_lock);
2547
2548 if ((ha->subsys->param[3] & 0x300000)
2549 && (ha->scb_activelist.count == 0)) {
2550 time64_t now = ktime_get_real_seconds();
2551 if (now - ha->last_ffdc > IPS_SECS_8HOURS) {
2552 ha->last_ffdc = now;
2553 ips_ffdc_time(ha);
2554 }
2555 }
2556
2557
2558
2559
2560
2561
2562
2563
2564 while ((ha->num_ioctl < IPS_MAX_IOCTL) &&
2565 (ha->copp_waitlist.head) && (scb = ips_getscb(ha))) {
2566
2567 item = ips_removeq_copp_head(&ha->copp_waitlist);
2568 ha->num_ioctl++;
2569 if (intr == IPS_INTR_ON)
2570 spin_unlock(host->host_lock);
2571 scb->scsi_cmd = item->scsi_cmd;
2572 kfree(item);
2573
2574 ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr);
2575
2576 if (intr == IPS_INTR_ON)
2577 spin_lock(host->host_lock);
2578 switch (ret) {
2579 case IPS_FAILURE:
2580 if (scb->scsi_cmd) {
2581 scb->scsi_cmd->result = DID_ERROR << 16;
2582 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2583 }
2584
2585 ips_freescb(ha, scb);
2586 break;
2587 case IPS_SUCCESS_IMM:
2588 if (scb->scsi_cmd) {
2589 scb->scsi_cmd->result = DID_OK << 16;
2590 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2591 }
2592
2593 ips_freescb(ha, scb);
2594 break;
2595 default:
2596 break;
2597 }
2598
2599 if (ret != IPS_SUCCESS) {
2600 ha->num_ioctl--;
2601 continue;
2602 }
2603
2604 ret = ips_send_cmd(ha, scb);
2605
2606 if (ret == IPS_SUCCESS)
2607 ips_putq_scb_head(&ha->scb_activelist, scb);
2608 else
2609 ha->num_ioctl--;
2610
2611 switch (ret) {
2612 case IPS_FAILURE:
2613 if (scb->scsi_cmd) {
2614 scb->scsi_cmd->result = DID_ERROR << 16;
2615 }
2616
2617 ips_freescb(ha, scb);
2618 break;
2619 case IPS_SUCCESS_IMM:
2620 ips_freescb(ha, scb);
2621 break;
2622 default:
2623 break;
2624 }
2625
2626 }
2627
2628
2629
2630
2631
2632 p = ha->scb_waitlist.head;
2633 while ((p) && (scb = ips_getscb(ha))) {
2634 if ((scmd_channel(p) > 0)
2635 && (ha->
2636 dcdb_active[scmd_channel(p) -
2637 1] & (1 << scmd_id(p)))) {
2638 ips_freescb(ha, scb);
2639 p = (struct scsi_cmnd *) p->host_scribble;
2640 continue;
2641 }
2642
2643 q = p;
2644 SC = ips_removeq_wait(&ha->scb_waitlist, q);
2645
2646 if (intr == IPS_INTR_ON)
2647 spin_unlock(host->host_lock);
2648
2649 SC->result = DID_OK;
2650 SC->host_scribble = NULL;
2651
2652 scb->target_id = SC->device->id;
2653 scb->lun = SC->device->lun;
2654 scb->bus = SC->device->channel;
2655 scb->scsi_cmd = SC;
2656 scb->breakup = 0;
2657 scb->data_len = 0;
2658 scb->callback = ipsintr_done;
2659 scb->timeout = ips_cmd_timeout;
2660 memset(&scb->cmd, 0, 16);
2661
2662
2663 memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
2664
2665 scb->sg_count = scsi_dma_map(SC);
2666 BUG_ON(scb->sg_count < 0);
2667 if (scb->sg_count) {
2668 struct scatterlist *sg;
2669 int i;
2670
2671 scb->flags |= IPS_SCB_MAP_SG;
2672
2673 scsi_for_each_sg(SC, sg, scb->sg_count, i) {
2674 if (ips_fill_scb_sg_single
2675 (ha, sg_dma_address(sg), scb, i,
2676 sg_dma_len(sg)) < 0)
2677 break;
2678 }
2679 scb->dcdb.transfer_length = scb->data_len;
2680 } else {
2681 scb->data_busaddr = 0L;
2682 scb->sg_len = 0;
2683 scb->data_len = 0;
2684 scb->dcdb.transfer_length = 0;
2685 }
2686
2687 scb->dcdb.cmd_attribute =
2688 ips_command_direction[scb->scsi_cmd->cmnd[0]];
2689
2690
2691
2692 if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) &&
2693 (scb->data_len == 0))
2694 scb->dcdb.cmd_attribute = 0;
2695
2696 if (!(scb->dcdb.cmd_attribute & 0x3))
2697 scb->dcdb.transfer_length = 0;
2698
2699 if (scb->data_len >= IPS_MAX_XFER) {
2700 scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
2701 scb->dcdb.transfer_length = 0;
2702 }
2703 if (intr == IPS_INTR_ON)
2704 spin_lock(host->host_lock);
2705
2706 ret = ips_send_cmd(ha, scb);
2707
2708 switch (ret) {
2709 case IPS_SUCCESS:
2710 ips_putq_scb_head(&ha->scb_activelist, scb);
2711 break;
2712 case IPS_FAILURE:
2713 if (scb->scsi_cmd) {
2714 scb->scsi_cmd->result = DID_ERROR << 16;
2715 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2716 }
2717
2718 if (scb->bus)
2719 ha->dcdb_active[scb->bus - 1] &=
2720 ~(1 << scb->target_id);
2721
2722 ips_freescb(ha, scb);
2723 break;
2724 case IPS_SUCCESS_IMM:
2725 if (scb->scsi_cmd)
2726 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2727
2728 if (scb->bus)
2729 ha->dcdb_active[scb->bus - 1] &=
2730 ~(1 << scb->target_id);
2731
2732 ips_freescb(ha, scb);
2733 break;
2734 default:
2735 break;
2736 }
2737
2738 p = (struct scsi_cmnd *) p->host_scribble;
2739
2740 }
2741
2742 if (intr == IPS_INTR_ON)
2743 spin_unlock(host->host_lock);
2744}
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757static void
2758ips_putq_scb_head(ips_scb_queue_t * queue, ips_scb_t * item)
2759{
2760 METHOD_TRACE("ips_putq_scb_head", 1);
2761
2762 if (!item)
2763 return;
2764
2765 item->q_next = queue->head;
2766 queue->head = item;
2767
2768 if (!queue->tail)
2769 queue->tail = item;
2770
2771 queue->count++;
2772}
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785static ips_scb_t *
2786ips_removeq_scb_head(ips_scb_queue_t * queue)
2787{
2788 ips_scb_t *item;
2789
2790 METHOD_TRACE("ips_removeq_scb_head", 1);
2791
2792 item = queue->head;
2793
2794 if (!item) {
2795 return (NULL);
2796 }
2797
2798 queue->head = item->q_next;
2799 item->q_next = NULL;
2800
2801 if (queue->tail == item)
2802 queue->tail = NULL;
2803
2804 queue->count--;
2805
2806 return (item);
2807}
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820static ips_scb_t *
2821ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item)
2822{
2823 ips_scb_t *p;
2824
2825 METHOD_TRACE("ips_removeq_scb", 1);
2826
2827 if (!item)
2828 return (NULL);
2829
2830 if (item == queue->head) {
2831 return (ips_removeq_scb_head(queue));
2832 }
2833
2834 p = queue->head;
2835
2836 while ((p) && (item != p->q_next))
2837 p = p->q_next;
2838
2839 if (p) {
2840
2841 p->q_next = item->q_next;
2842
2843 if (!item->q_next)
2844 queue->tail = p;
2845
2846 item->q_next = NULL;
2847 queue->count--;
2848
2849 return (item);
2850 }
2851
2852 return (NULL);
2853}
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item)
2867{
2868 METHOD_TRACE("ips_putq_wait_tail", 1);
2869
2870 if (!item)
2871 return;
2872
2873 item->host_scribble = NULL;
2874
2875 if (queue->tail)
2876 queue->tail->host_scribble = (char *) item;
2877
2878 queue->tail = item;
2879
2880 if (!queue->head)
2881 queue->head = item;
2882
2883 queue->count++;
2884}
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue)
2898{
2899 struct scsi_cmnd *item;
2900
2901 METHOD_TRACE("ips_removeq_wait_head", 1);
2902
2903 item = queue->head;
2904
2905 if (!item) {
2906 return (NULL);
2907 }
2908
2909 queue->head = (struct scsi_cmnd *) item->host_scribble;
2910 item->host_scribble = NULL;
2911
2912 if (queue->tail == item)
2913 queue->tail = NULL;
2914
2915 queue->count--;
2916
2917 return (item);
2918}
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue,
2932 struct scsi_cmnd *item)
2933{
2934 struct scsi_cmnd *p;
2935
2936 METHOD_TRACE("ips_removeq_wait", 1);
2937
2938 if (!item)
2939 return (NULL);
2940
2941 if (item == queue->head) {
2942 return (ips_removeq_wait_head(queue));
2943 }
2944
2945 p = queue->head;
2946
2947 while ((p) && (item != (struct scsi_cmnd *) p->host_scribble))
2948 p = (struct scsi_cmnd *) p->host_scribble;
2949
2950 if (p) {
2951
2952 p->host_scribble = item->host_scribble;
2953
2954 if (!item->host_scribble)
2955 queue->tail = p;
2956
2957 item->host_scribble = NULL;
2958 queue->count--;
2959
2960 return (item);
2961 }
2962
2963 return (NULL);
2964}
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977static void
2978ips_putq_copp_tail(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
2979{
2980 METHOD_TRACE("ips_putq_copp_tail", 1);
2981
2982 if (!item)
2983 return;
2984
2985 item->next = NULL;
2986
2987 if (queue->tail)
2988 queue->tail->next = item;
2989
2990 queue->tail = item;
2991
2992 if (!queue->head)
2993 queue->head = item;
2994
2995 queue->count++;
2996}
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009static ips_copp_wait_item_t *
3010ips_removeq_copp_head(ips_copp_queue_t * queue)
3011{
3012 ips_copp_wait_item_t *item;
3013
3014 METHOD_TRACE("ips_removeq_copp_head", 1);
3015
3016 item = queue->head;
3017
3018 if (!item) {
3019 return (NULL);
3020 }
3021
3022 queue->head = item->next;
3023 item->next = NULL;
3024
3025 if (queue->tail == item)
3026 queue->tail = NULL;
3027
3028 queue->count--;
3029
3030 return (item);
3031}
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044static ips_copp_wait_item_t *
3045ips_removeq_copp(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
3046{
3047 ips_copp_wait_item_t *p;
3048
3049 METHOD_TRACE("ips_removeq_copp", 1);
3050
3051 if (!item)
3052 return (NULL);
3053
3054 if (item == queue->head) {
3055 return (ips_removeq_copp_head(queue));
3056 }
3057
3058 p = queue->head;
3059
3060 while ((p) && (item != p->next))
3061 p = p->next;
3062
3063 if (p) {
3064
3065 p->next = item->next;
3066
3067 if (!item->next)
3068 queue->tail = p;
3069
3070 item->next = NULL;
3071 queue->count--;
3072
3073 return (item);
3074 }
3075
3076 return (NULL);
3077}
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088static void
3089ipsintr_blocking(ips_ha_t * ha, ips_scb_t * scb)
3090{
3091 METHOD_TRACE("ipsintr_blocking", 2);
3092
3093 ips_freescb(ha, scb);
3094 if ((ha->waitflag == TRUE) && (ha->cmd_in_progress == scb->cdb[0])) {
3095 ha->waitflag = FALSE;
3096
3097 return;
3098 }
3099}
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110static void
3111ipsintr_done(ips_ha_t * ha, ips_scb_t * scb)
3112{
3113 METHOD_TRACE("ipsintr_done", 2);
3114
3115 if (!scb) {
3116 IPS_PRINTK(KERN_WARNING, ha->pcidev,
3117 "Spurious interrupt; scb NULL.\n");
3118
3119 return;
3120 }
3121
3122 if (scb->scsi_cmd == NULL) {
3123
3124 IPS_PRINTK(KERN_WARNING, ha->pcidev,
3125 "Spurious interrupt; scsi_cmd not set.\n");
3126
3127 return;
3128 }
3129
3130 ips_done(ha, scb);
3131}
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142static void
3143ips_done(ips_ha_t * ha, ips_scb_t * scb)
3144{
3145 int ret;
3146
3147 METHOD_TRACE("ips_done", 1);
3148
3149 if (!scb)
3150 return;
3151
3152 if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) {
3153 ips_cleanup_passthru(ha, scb);
3154 ha->num_ioctl--;
3155 } else {
3156
3157
3158
3159
3160
3161 if ((scb->breakup) || (scb->sg_break)) {
3162 struct scatterlist *sg;
3163 int i, sg_dma_index, ips_sg_index = 0;
3164
3165
3166 scb->data_len = 0;
3167
3168 sg = scsi_sglist(scb->scsi_cmd);
3169
3170
3171 sg_dma_index = scb->breakup;
3172 for (i = 0; i < scb->breakup; i++)
3173 sg = sg_next(sg);
3174
3175
3176 ips_fill_scb_sg_single(ha,
3177 sg_dma_address(sg),
3178 scb, ips_sg_index++,
3179 sg_dma_len(sg));
3180
3181 for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
3182 sg_dma_index++, sg = sg_next(sg)) {
3183 if (ips_fill_scb_sg_single
3184 (ha,
3185 sg_dma_address(sg),
3186 scb, ips_sg_index++,
3187 sg_dma_len(sg)) < 0)
3188 break;
3189 }
3190
3191 scb->dcdb.transfer_length = scb->data_len;
3192 scb->dcdb.cmd_attribute |=
3193 ips_command_direction[scb->scsi_cmd->cmnd[0]];
3194
3195 if (!(scb->dcdb.cmd_attribute & 0x3))
3196 scb->dcdb.transfer_length = 0;
3197
3198 if (scb->data_len >= IPS_MAX_XFER) {
3199 scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
3200 scb->dcdb.transfer_length = 0;
3201 }
3202
3203 ret = ips_send_cmd(ha, scb);
3204
3205 switch (ret) {
3206 case IPS_FAILURE:
3207 if (scb->scsi_cmd) {
3208 scb->scsi_cmd->result = DID_ERROR << 16;
3209 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
3210 }
3211
3212 ips_freescb(ha, scb);
3213 break;
3214 case IPS_SUCCESS_IMM:
3215 if (scb->scsi_cmd) {
3216 scb->scsi_cmd->result = DID_ERROR << 16;
3217 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
3218 }
3219
3220 ips_freescb(ha, scb);
3221 break;
3222 default:
3223 break;
3224 }
3225
3226 return;
3227 }
3228 }
3229
3230 if (scb->bus) {
3231 ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id);
3232 }
3233
3234 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
3235
3236 ips_freescb(ha, scb);
3237}
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248static int
3249ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
3250{
3251 int errcode;
3252 int device_error;
3253 uint32_t transfer_len;
3254 IPS_DCDB_TABLE_TAPE *tapeDCDB;
3255 IPS_SCSI_INQ_DATA inquiryData;
3256
3257 METHOD_TRACE("ips_map_status", 1);
3258
3259 if (scb->bus) {
3260 DEBUG_VAR(2,
3261 "(%s%d) Physical device error (%d %d %d): %x %x, Sense Key: %x, ASC: %x, ASCQ: %x",
3262 ips_name, ha->host_num,
3263 scb->scsi_cmd->device->channel,
3264 scb->scsi_cmd->device->id, scb->scsi_cmd->device->lun,
3265 scb->basic_status, scb->extended_status,
3266 scb->extended_status ==
3267 IPS_ERR_CKCOND ? scb->dcdb.sense_info[2] & 0xf : 0,
3268 scb->extended_status ==
3269 IPS_ERR_CKCOND ? scb->dcdb.sense_info[12] : 0,
3270 scb->extended_status ==
3271 IPS_ERR_CKCOND ? scb->dcdb.sense_info[13] : 0);
3272 }
3273
3274
3275 errcode = DID_ERROR;
3276 device_error = 0;
3277
3278 switch (scb->basic_status & IPS_GSC_STATUS_MASK) {
3279 case IPS_CMD_TIMEOUT:
3280 errcode = DID_TIME_OUT;
3281 break;
3282
3283 case IPS_INVAL_OPCO:
3284 case IPS_INVAL_CMD_BLK:
3285 case IPS_INVAL_PARM_BLK:
3286 case IPS_LD_ERROR:
3287 case IPS_CMD_CMPLT_WERROR:
3288 break;
3289
3290 case IPS_PHYS_DRV_ERROR:
3291 switch (scb->extended_status) {
3292 case IPS_ERR_SEL_TO:
3293 if (scb->bus)
3294 errcode = DID_NO_CONNECT;
3295
3296 break;
3297
3298 case IPS_ERR_OU_RUN:
3299 if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) ||
3300 (scb->cmd.dcdb.op_code ==
3301 IPS_CMD_EXTENDED_DCDB_SG)) {
3302 tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3303 transfer_len = tapeDCDB->transfer_length;
3304 } else {
3305 transfer_len =
3306 (uint32_t) scb->dcdb.transfer_length;
3307 }
3308
3309 if ((scb->bus) && (transfer_len < scb->data_len)) {
3310
3311 errcode = DID_OK;
3312
3313
3314 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3315 ips_scmd_buf_read(scb->scsi_cmd,
3316 &inquiryData, sizeof (inquiryData));
3317 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) {
3318 errcode = DID_TIME_OUT;
3319 break;
3320 }
3321 }
3322 } else
3323 errcode = DID_ERROR;
3324
3325 break;
3326
3327 case IPS_ERR_RECOVERY:
3328
3329 if (scb->bus)
3330 errcode = DID_OK;
3331
3332 break;
3333
3334 case IPS_ERR_HOST_RESET:
3335 case IPS_ERR_DEV_RESET:
3336 errcode = DID_RESET;
3337 break;
3338
3339 case IPS_ERR_CKCOND:
3340 if (scb->bus) {
3341 if ((scb->cmd.dcdb.op_code ==
3342 IPS_CMD_EXTENDED_DCDB)
3343 || (scb->cmd.dcdb.op_code ==
3344 IPS_CMD_EXTENDED_DCDB_SG)) {
3345 tapeDCDB =
3346 (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3347 memcpy(scb->scsi_cmd->sense_buffer,
3348 tapeDCDB->sense_info,
3349 SCSI_SENSE_BUFFERSIZE);
3350 } else {
3351 memcpy(scb->scsi_cmd->sense_buffer,
3352 scb->dcdb.sense_info,
3353 SCSI_SENSE_BUFFERSIZE);
3354 }
3355 device_error = 2;
3356 }
3357
3358 errcode = DID_OK;
3359
3360 break;
3361
3362 default:
3363 errcode = DID_ERROR;
3364 break;
3365
3366 }
3367 }
3368
3369 scb->scsi_cmd->result = device_error | (errcode << 16);
3370
3371 return (1);
3372}
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385static int
3386ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
3387{
3388 int ret;
3389
3390 METHOD_TRACE("ips_send_wait", 1);
3391
3392 if (intr != IPS_FFDC) {
3393 ha->waitflag = TRUE;
3394 ha->cmd_in_progress = scb->cdb[0];
3395 }
3396 scb->callback = ipsintr_blocking;
3397 ret = ips_send_cmd(ha, scb);
3398
3399 if ((ret == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM))
3400 return (ret);
3401
3402 if (intr != IPS_FFDC)
3403 ret = ips_wait(ha, timeout, intr);
3404
3405 return (ret);
3406}
3407
3408
3409
3410
3411
3412
3413
3414
3415static void
3416ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
3417{
3418 unsigned long flags;
3419
3420 local_irq_save(flags);
3421 scsi_sg_copy_from_buffer(scmd, data, count);
3422 local_irq_restore(flags);
3423}
3424
3425
3426
3427
3428
3429
3430
3431
3432static void
3433ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
3434{
3435 unsigned long flags;
3436
3437 local_irq_save(flags);
3438 scsi_sg_copy_to_buffer(scmd, data, count);
3439 local_irq_restore(flags);
3440}
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451static int
3452ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
3453{
3454 int ret;
3455 char *sp;
3456 int device_error;
3457 IPS_DCDB_TABLE_TAPE *tapeDCDB;
3458 int TimeOut;
3459
3460 METHOD_TRACE("ips_send_cmd", 1);
3461
3462 ret = IPS_SUCCESS;
3463
3464 if (!scb->scsi_cmd) {
3465
3466
3467 if (scb->bus > 0) {
3468
3469
3470 if ((ha->waitflag == TRUE) &&
3471 (ha->cmd_in_progress == scb->cdb[0])) {
3472 ha->waitflag = FALSE;
3473 }
3474
3475 return (1);
3476 }
3477 } else if ((scb->bus == 0) && (!ips_is_passthru(scb->scsi_cmd))) {
3478
3479 ret = IPS_SUCCESS_IMM;
3480
3481 switch (scb->scsi_cmd->cmnd[0]) {
3482 case ALLOW_MEDIUM_REMOVAL:
3483 case REZERO_UNIT:
3484 case ERASE:
3485 case WRITE_FILEMARKS:
3486 case SPACE:
3487 scb->scsi_cmd->result = DID_ERROR << 16;
3488 break;
3489
3490 case START_STOP:
3491 scb->scsi_cmd->result = DID_OK << 16;
3492 break;
3493
3494 case TEST_UNIT_READY:
3495 case INQUIRY:
3496 if (scb->target_id == IPS_ADAPTER_ID) {
3497
3498
3499
3500
3501 if (scb->scsi_cmd->cmnd[0] == TEST_UNIT_READY)
3502 scb->scsi_cmd->result = DID_OK << 16;
3503
3504 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3505 IPS_SCSI_INQ_DATA inquiry;
3506
3507 memset(&inquiry, 0,
3508 sizeof (IPS_SCSI_INQ_DATA));
3509
3510 inquiry.DeviceType =
3511 IPS_SCSI_INQ_TYPE_PROCESSOR;
3512 inquiry.DeviceTypeQualifier =
3513 IPS_SCSI_INQ_LU_CONNECTED;
3514 inquiry.Version = IPS_SCSI_INQ_REV2;
3515 inquiry.ResponseDataFormat =
3516 IPS_SCSI_INQ_RD_REV2;
3517 inquiry.AdditionalLength = 31;
3518 inquiry.Flags[0] =
3519 IPS_SCSI_INQ_Address16;
3520 inquiry.Flags[1] =
3521 IPS_SCSI_INQ_WBus16 |
3522 IPS_SCSI_INQ_Sync;
3523 memcpy(inquiry.VendorId, "IBM ",
3524 8);
3525 memcpy(inquiry.ProductId,
3526 "SERVERAID ", 16);
3527 memcpy(inquiry.ProductRevisionLevel,
3528 "1.00", 4);
3529
3530 ips_scmd_buf_write(scb->scsi_cmd,
3531 &inquiry,
3532 sizeof (inquiry));
3533
3534 scb->scsi_cmd->result = DID_OK << 16;
3535 }
3536 } else {
3537 scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
3538 scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
3539 scb->cmd.logical_info.reserved = 0;
3540 scb->cmd.logical_info.reserved2 = 0;
3541 scb->data_len = sizeof (IPS_LD_INFO);
3542 scb->data_busaddr = ha->logical_drive_info_dma_addr;
3543 scb->flags = 0;
3544 scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
3545 ret = IPS_SUCCESS;
3546 }
3547
3548 break;
3549
3550 case REQUEST_SENSE:
3551 ips_reqsen(ha, scb);
3552 scb->scsi_cmd->result = DID_OK << 16;
3553 break;
3554
3555 case READ_6:
3556 case WRITE_6:
3557 if (!scb->sg_len) {
3558 scb->cmd.basic_io.op_code =
3559 (scb->scsi_cmd->cmnd[0] ==
3560 READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE;
3561 scb->cmd.basic_io.enhanced_sg = 0;
3562 scb->cmd.basic_io.sg_addr =
3563 cpu_to_le32(scb->data_busaddr);
3564 } else {
3565 scb->cmd.basic_io.op_code =
3566 (scb->scsi_cmd->cmnd[0] ==
3567 READ_6) ? IPS_CMD_READ_SG :
3568 IPS_CMD_WRITE_SG;
3569 scb->cmd.basic_io.enhanced_sg =
3570 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3571 scb->cmd.basic_io.sg_addr =
3572 cpu_to_le32(scb->sg_busaddr);
3573 }
3574
3575 scb->cmd.basic_io.segment_4G = 0;
3576 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3577 scb->cmd.basic_io.log_drv = scb->target_id;
3578 scb->cmd.basic_io.sg_count = scb->sg_len;
3579
3580 if (scb->cmd.basic_io.lba)
3581 le32_add_cpu(&scb->cmd.basic_io.lba,
3582 le16_to_cpu(scb->cmd.basic_io.
3583 sector_count));
3584 else
3585 scb->cmd.basic_io.lba =
3586 (((scb->scsi_cmd->
3587 cmnd[1] & 0x1f) << 16) | (scb->scsi_cmd->
3588 cmnd[2] << 8) |
3589 (scb->scsi_cmd->cmnd[3]));
3590
3591 scb->cmd.basic_io.sector_count =
3592 cpu_to_le16(scb->data_len / IPS_BLKSIZE);
3593
3594 if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0)
3595 scb->cmd.basic_io.sector_count =
3596 cpu_to_le16(256);
3597
3598 ret = IPS_SUCCESS;
3599 break;
3600
3601 case READ_10:
3602 case WRITE_10:
3603 if (!scb->sg_len) {
3604 scb->cmd.basic_io.op_code =
3605 (scb->scsi_cmd->cmnd[0] ==
3606 READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE;
3607 scb->cmd.basic_io.enhanced_sg = 0;
3608 scb->cmd.basic_io.sg_addr =
3609 cpu_to_le32(scb->data_busaddr);
3610 } else {
3611 scb->cmd.basic_io.op_code =
3612 (scb->scsi_cmd->cmnd[0] ==
3613 READ_10) ? IPS_CMD_READ_SG :
3614 IPS_CMD_WRITE_SG;
3615 scb->cmd.basic_io.enhanced_sg =
3616 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3617 scb->cmd.basic_io.sg_addr =
3618 cpu_to_le32(scb->sg_busaddr);
3619 }
3620
3621 scb->cmd.basic_io.segment_4G = 0;
3622 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3623 scb->cmd.basic_io.log_drv = scb->target_id;
3624 scb->cmd.basic_io.sg_count = scb->sg_len;
3625
3626 if (scb->cmd.basic_io.lba)
3627 le32_add_cpu(&scb->cmd.basic_io.lba,
3628 le16_to_cpu(scb->cmd.basic_io.
3629 sector_count));
3630 else
3631 scb->cmd.basic_io.lba =
3632 ((scb->scsi_cmd->cmnd[2] << 24) | (scb->
3633 scsi_cmd->
3634 cmnd[3]
3635 << 16) |
3636 (scb->scsi_cmd->cmnd[4] << 8) | scb->
3637 scsi_cmd->cmnd[5]);
3638
3639 scb->cmd.basic_io.sector_count =
3640 cpu_to_le16(scb->data_len / IPS_BLKSIZE);
3641
3642 if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) {
3643
3644
3645
3646
3647
3648 scb->scsi_cmd->result = DID_OK << 16;
3649 } else
3650 ret = IPS_SUCCESS;
3651
3652 break;
3653
3654 case RESERVE:
3655 case RELEASE:
3656 scb->scsi_cmd->result = DID_OK << 16;
3657 break;
3658
3659 case MODE_SENSE:
3660 scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
3661 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3662 scb->cmd.basic_io.segment_4G = 0;
3663 scb->cmd.basic_io.enhanced_sg = 0;
3664 scb->data_len = sizeof (*ha->enq);
3665 scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
3666 ret = IPS_SUCCESS;
3667 break;
3668
3669 case READ_CAPACITY:
3670 scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
3671 scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
3672 scb->cmd.logical_info.reserved = 0;
3673 scb->cmd.logical_info.reserved2 = 0;
3674 scb->cmd.logical_info.reserved3 = 0;
3675 scb->data_len = sizeof (IPS_LD_INFO);
3676 scb->data_busaddr = ha->logical_drive_info_dma_addr;
3677 scb->flags = 0;
3678 scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
3679 ret = IPS_SUCCESS;
3680 break;
3681
3682 case SEND_DIAGNOSTIC:
3683 case REASSIGN_BLOCKS:
3684 case FORMAT_UNIT:
3685 case SEEK_10:
3686 case VERIFY:
3687 case READ_DEFECT_DATA:
3688 case READ_BUFFER:
3689 case WRITE_BUFFER:
3690 scb->scsi_cmd->result = DID_OK << 16;
3691 break;
3692
3693 default:
3694
3695
3696
3697 sp = (char *) scb->scsi_cmd->sense_buffer;
3698
3699 sp[0] = 0x70;
3700 sp[2] = ILLEGAL_REQUEST;
3701 sp[7] = 0x0A;
3702 sp[12] = 0x20;
3703 sp[13] = 0x00;
3704
3705 device_error = 2;
3706 scb->scsi_cmd->result = device_error | (DID_OK << 16);
3707 break;
3708 }
3709 }
3710
3711 if (ret == IPS_SUCCESS_IMM)
3712 return (ret);
3713
3714
3715 if (scb->bus > 0) {
3716
3717
3718
3719 if (ha->conf->dev[scb->bus - 1][scb->target_id].ucState == 0) {
3720 scb->scsi_cmd->result = DID_NO_CONNECT << 16;
3721 return (IPS_SUCCESS_IMM);
3722 }
3723
3724 ha->dcdb_active[scb->bus - 1] |= (1 << scb->target_id);
3725 scb->cmd.dcdb.command_id = IPS_COMMAND_ID(ha, scb);
3726 scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
3727 (unsigned long) &scb->
3728 dcdb -
3729 (unsigned long) scb);
3730 scb->cmd.dcdb.reserved = 0;
3731 scb->cmd.dcdb.reserved2 = 0;
3732 scb->cmd.dcdb.reserved3 = 0;
3733 scb->cmd.dcdb.segment_4G = 0;
3734 scb->cmd.dcdb.enhanced_sg = 0;
3735
3736 TimeOut = scb->scsi_cmd->request->timeout;
3737
3738 if (ha->subsys->param[4] & 0x00100000) {
3739 if (!scb->sg_len) {
3740 scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB;
3741 } else {
3742 scb->cmd.dcdb.op_code =
3743 IPS_CMD_EXTENDED_DCDB_SG;
3744 scb->cmd.dcdb.enhanced_sg =
3745 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3746 }
3747
3748 tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3749 tapeDCDB->device_address =
3750 ((scb->bus - 1) << 4) | scb->target_id;
3751 tapeDCDB->cmd_attribute |= IPS_DISCONNECT_ALLOWED;
3752 tapeDCDB->cmd_attribute &= ~IPS_TRANSFER64K;
3753
3754 if (TimeOut) {
3755 if (TimeOut < (10 * HZ))
3756 tapeDCDB->cmd_attribute |= IPS_TIMEOUT10;
3757 else if (TimeOut < (60 * HZ))
3758 tapeDCDB->cmd_attribute |= IPS_TIMEOUT60;
3759 else if (TimeOut < (1200 * HZ))
3760 tapeDCDB->cmd_attribute |= IPS_TIMEOUT20M;
3761 }
3762
3763 tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len;
3764 tapeDCDB->reserved_for_LUN = 0;
3765 tapeDCDB->transfer_length = scb->data_len;
3766 if (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)
3767 tapeDCDB->buffer_pointer =
3768 cpu_to_le32(scb->sg_busaddr);
3769 else
3770 tapeDCDB->buffer_pointer =
3771 cpu_to_le32(scb->data_busaddr);
3772 tapeDCDB->sg_count = scb->sg_len;
3773 tapeDCDB->sense_length = sizeof (tapeDCDB->sense_info);
3774 tapeDCDB->scsi_status = 0;
3775 tapeDCDB->reserved = 0;
3776 memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd,
3777 scb->scsi_cmd->cmd_len);
3778 } else {
3779 if (!scb->sg_len) {
3780 scb->cmd.dcdb.op_code = IPS_CMD_DCDB;
3781 } else {
3782 scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG;
3783 scb->cmd.dcdb.enhanced_sg =
3784 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3785 }
3786
3787 scb->dcdb.device_address =
3788 ((scb->bus - 1) << 4) | scb->target_id;
3789 scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED;
3790
3791 if (TimeOut) {
3792 if (TimeOut < (10 * HZ))
3793 scb->dcdb.cmd_attribute |= IPS_TIMEOUT10;
3794 else if (TimeOut < (60 * HZ))
3795 scb->dcdb.cmd_attribute |= IPS_TIMEOUT60;
3796 else if (TimeOut < (1200 * HZ))
3797 scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M;
3798 }
3799
3800 scb->dcdb.transfer_length = scb->data_len;
3801 if (scb->dcdb.cmd_attribute & IPS_TRANSFER64K)
3802 scb->dcdb.transfer_length = 0;
3803 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB_SG)
3804 scb->dcdb.buffer_pointer =
3805 cpu_to_le32(scb->sg_busaddr);
3806 else
3807 scb->dcdb.buffer_pointer =
3808 cpu_to_le32(scb->data_busaddr);
3809 scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len;
3810 scb->dcdb.sense_length = sizeof (scb->dcdb.sense_info);
3811 scb->dcdb.sg_count = scb->sg_len;
3812 scb->dcdb.reserved = 0;
3813 memcpy(scb->dcdb.scsi_cdb, scb->scsi_cmd->cmnd,
3814 scb->scsi_cmd->cmd_len);
3815 scb->dcdb.scsi_status = 0;
3816 scb->dcdb.reserved2[0] = 0;
3817 scb->dcdb.reserved2[1] = 0;
3818 scb->dcdb.reserved2[2] = 0;
3819 }
3820 }
3821
3822 return ((*ha->func.issue) (ha, scb));
3823}
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834static void
3835ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
3836{
3837 ips_scb_t *scb;
3838 ips_stat_t *sp;
3839 uint8_t basic_status;
3840 uint8_t ext_status;
3841 int errcode;
3842 IPS_SCSI_INQ_DATA inquiryData;
3843
3844 METHOD_TRACE("ips_chkstatus", 1);
3845
3846 scb = &ha->scbs[pstatus->fields.command_id];
3847 scb->basic_status = basic_status =
3848 pstatus->fields.basic_status & IPS_BASIC_STATUS_MASK;
3849 scb->extended_status = ext_status = pstatus->fields.extended_status;
3850
3851 sp = &ha->sp;
3852 sp->residue_len = 0;
3853 sp->scb_addr = (void *) scb;
3854
3855
3856 ips_removeq_scb(&ha->scb_activelist, scb);
3857
3858 if (!scb->scsi_cmd)
3859
3860 return;
3861
3862 DEBUG_VAR(2, "(%s%d) ips_chkstatus: cmd 0x%X id %d (%d %d %d)",
3863 ips_name,
3864 ha->host_num,
3865 scb->cdb[0],
3866 scb->cmd.basic_io.command_id,
3867 scb->bus, scb->target_id, scb->lun);
3868
3869 if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd)))
3870
3871 return;
3872
3873 errcode = DID_OK;
3874
3875 if (((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_SUCCESS) ||
3876 ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR)) {
3877
3878 if (scb->bus == 0) {
3879 if ((basic_status & IPS_GSC_STATUS_MASK) ==
3880 IPS_CMD_RECOVERED_ERROR) {
3881 DEBUG_VAR(1,
3882 "(%s%d) Recovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
3883 ips_name, ha->host_num,
3884 scb->cmd.basic_io.op_code,
3885 basic_status, ext_status);
3886 }
3887
3888 switch (scb->scsi_cmd->cmnd[0]) {
3889 case ALLOW_MEDIUM_REMOVAL:
3890 case REZERO_UNIT:
3891 case ERASE:
3892 case WRITE_FILEMARKS:
3893 case SPACE:
3894 errcode = DID_ERROR;
3895 break;
3896
3897 case START_STOP:
3898 break;
3899
3900 case TEST_UNIT_READY:
3901 if (!ips_online(ha, scb)) {
3902 errcode = DID_TIME_OUT;
3903 }
3904 break;
3905
3906 case INQUIRY:
3907 if (ips_online(ha, scb)) {
3908 ips_inquiry(ha, scb);
3909 } else {
3910 errcode = DID_TIME_OUT;
3911 }
3912 break;
3913
3914 case REQUEST_SENSE:
3915 ips_reqsen(ha, scb);
3916 break;
3917
3918 case READ_6:
3919 case WRITE_6:
3920 case READ_10:
3921 case WRITE_10:
3922 case RESERVE:
3923 case RELEASE:
3924 break;
3925
3926 case MODE_SENSE:
3927 if (!ips_online(ha, scb)
3928 || !ips_msense(ha, scb)) {
3929 errcode = DID_ERROR;
3930 }
3931 break;
3932
3933 case READ_CAPACITY:
3934 if (ips_online(ha, scb))
3935 ips_rdcap(ha, scb);
3936 else {
3937 errcode = DID_TIME_OUT;
3938 }
3939 break;
3940
3941 case SEND_DIAGNOSTIC:
3942 case REASSIGN_BLOCKS:
3943 break;
3944
3945 case FORMAT_UNIT:
3946 errcode = DID_ERROR;
3947 break;
3948
3949 case SEEK_10:
3950 case VERIFY:
3951 case READ_DEFECT_DATA:
3952 case READ_BUFFER:
3953 case WRITE_BUFFER:
3954 break;
3955
3956 default:
3957 errcode = DID_ERROR;
3958 }
3959
3960 scb->scsi_cmd->result = errcode << 16;
3961 } else {
3962
3963 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3964 ips_scmd_buf_read(scb->scsi_cmd,
3965 &inquiryData, sizeof (inquiryData));
3966 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK)
3967 scb->scsi_cmd->result = DID_TIME_OUT << 16;
3968 }
3969 }
3970 } else {
3971 if (scb->bus == 0) {
3972 DEBUG_VAR(1,
3973 "(%s%d) Unrecovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
3974 ips_name, ha->host_num,
3975 scb->cmd.basic_io.op_code, basic_status,
3976 ext_status);
3977 }
3978
3979 ips_map_status(ha, scb, sp);
3980 }
3981}
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992static int
3993ips_online(ips_ha_t * ha, ips_scb_t * scb)
3994{
3995 METHOD_TRACE("ips_online", 1);
3996
3997 if (scb->target_id >= IPS_MAX_LD)
3998 return (0);
3999
4000 if ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1) {
4001 memset(ha->logical_drive_info, 0, sizeof (IPS_LD_INFO));
4002 return (0);
4003 }
4004
4005 if (ha->logical_drive_info->drive_info[scb->target_id].state !=
4006 IPS_LD_OFFLINE
4007 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4008 IPS_LD_FREE
4009 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4010 IPS_LD_CRS
4011 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4012 IPS_LD_SYS)
4013 return (1);
4014 else
4015 return (0);
4016}
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027static int
4028ips_inquiry(ips_ha_t * ha, ips_scb_t * scb)
4029{
4030 IPS_SCSI_INQ_DATA inquiry;
4031
4032 METHOD_TRACE("ips_inquiry", 1);
4033
4034 memset(&inquiry, 0, sizeof (IPS_SCSI_INQ_DATA));
4035
4036 inquiry.DeviceType = IPS_SCSI_INQ_TYPE_DASD;
4037 inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED;
4038 inquiry.Version = IPS_SCSI_INQ_REV2;
4039 inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2;
4040 inquiry.AdditionalLength = 31;
4041 inquiry.Flags[0] = IPS_SCSI_INQ_Address16;
4042 inquiry.Flags[1] =
4043 IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync | IPS_SCSI_INQ_CmdQue;
4044 memcpy(inquiry.VendorId, "IBM ", 8);
4045 memcpy(inquiry.ProductId, "SERVERAID ", 16);
4046 memcpy(inquiry.ProductRevisionLevel, "1.00", 4);
4047
4048 ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof (inquiry));
4049
4050 return (1);
4051}
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062static int
4063ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
4064{
4065 IPS_SCSI_CAPACITY cap;
4066
4067 METHOD_TRACE("ips_rdcap", 1);
4068
4069 if (scsi_bufflen(scb->scsi_cmd) < 8)
4070 return (0);
4071
4072 cap.lba =
4073 cpu_to_be32(le32_to_cpu
4074 (ha->logical_drive_info->
4075 drive_info[scb->target_id].sector_count) - 1);
4076 cap.len = cpu_to_be32((uint32_t) IPS_BLKSIZE);
4077
4078 ips_scmd_buf_write(scb->scsi_cmd, &cap, sizeof (cap));
4079
4080 return (1);
4081}
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092static int
4093ips_msense(ips_ha_t * ha, ips_scb_t * scb)
4094{
4095 uint16_t heads;
4096 uint16_t sectors;
4097 uint32_t cylinders;
4098 IPS_SCSI_MODE_PAGE_DATA mdata;
4099
4100 METHOD_TRACE("ips_msense", 1);
4101
4102 if (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) > 0x400000 &&
4103 (ha->enq->ucMiscFlag & 0x8) == 0) {
4104 heads = IPS_NORM_HEADS;
4105 sectors = IPS_NORM_SECTORS;
4106 } else {
4107 heads = IPS_COMP_HEADS;
4108 sectors = IPS_COMP_SECTORS;
4109 }
4110
4111 cylinders =
4112 (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) -
4113 1) / (heads * sectors);
4114
4115 memset(&mdata, 0, sizeof (IPS_SCSI_MODE_PAGE_DATA));
4116
4117 mdata.hdr.BlockDescLength = 8;
4118
4119 switch (scb->scsi_cmd->cmnd[2] & 0x3f) {
4120 case 0x03:
4121 mdata.pdata.pg3.PageCode = 3;
4122 mdata.pdata.pg3.PageLength = sizeof (IPS_SCSI_MODE_PAGE3);
4123 mdata.hdr.DataLength =
4124 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg3.PageLength;
4125 mdata.pdata.pg3.TracksPerZone = 0;
4126 mdata.pdata.pg3.AltSectorsPerZone = 0;
4127 mdata.pdata.pg3.AltTracksPerZone = 0;
4128 mdata.pdata.pg3.AltTracksPerVolume = 0;
4129 mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors);
4130 mdata.pdata.pg3.BytesPerSector = cpu_to_be16(IPS_BLKSIZE);
4131 mdata.pdata.pg3.Interleave = cpu_to_be16(1);
4132 mdata.pdata.pg3.TrackSkew = 0;
4133 mdata.pdata.pg3.CylinderSkew = 0;
4134 mdata.pdata.pg3.flags = IPS_SCSI_MP3_SoftSector;
4135 break;
4136
4137 case 0x4:
4138 mdata.pdata.pg4.PageCode = 4;
4139 mdata.pdata.pg4.PageLength = sizeof (IPS_SCSI_MODE_PAGE4);
4140 mdata.hdr.DataLength =
4141 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg4.PageLength;
4142 mdata.pdata.pg4.CylindersHigh =
4143 cpu_to_be16((cylinders >> 8) & 0xFFFF);
4144 mdata.pdata.pg4.CylindersLow = (cylinders & 0xFF);
4145 mdata.pdata.pg4.Heads = heads;
4146 mdata.pdata.pg4.WritePrecompHigh = 0;
4147 mdata.pdata.pg4.WritePrecompLow = 0;
4148 mdata.pdata.pg4.ReducedWriteCurrentHigh = 0;
4149 mdata.pdata.pg4.ReducedWriteCurrentLow = 0;
4150 mdata.pdata.pg4.StepRate = cpu_to_be16(1);
4151 mdata.pdata.pg4.LandingZoneHigh = 0;
4152 mdata.pdata.pg4.LandingZoneLow = 0;
4153 mdata.pdata.pg4.flags = 0;
4154 mdata.pdata.pg4.RotationalOffset = 0;
4155 mdata.pdata.pg4.MediumRotationRate = 0;
4156 break;
4157 case 0x8:
4158 mdata.pdata.pg8.PageCode = 8;
4159 mdata.pdata.pg8.PageLength = sizeof (IPS_SCSI_MODE_PAGE8);
4160 mdata.hdr.DataLength =
4161 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg8.PageLength;
4162
4163 break;
4164
4165 default:
4166 return (0);
4167 }
4168
4169 ips_scmd_buf_write(scb->scsi_cmd, &mdata, sizeof (mdata));
4170
4171 return (1);
4172}
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183static int
4184ips_reqsen(ips_ha_t * ha, ips_scb_t * scb)
4185{
4186 IPS_SCSI_REQSEN reqsen;
4187
4188 METHOD_TRACE("ips_reqsen", 1);
4189
4190 memset(&reqsen, 0, sizeof (IPS_SCSI_REQSEN));
4191
4192 reqsen.ResponseCode =
4193 IPS_SCSI_REQSEN_VALID | IPS_SCSI_REQSEN_CURRENT_ERR;
4194 reqsen.AdditionalLength = 10;
4195 reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE;
4196 reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE;
4197
4198 ips_scmd_buf_write(scb->scsi_cmd, &reqsen, sizeof (reqsen));
4199
4200 return (1);
4201}
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212static void
4213ips_free(ips_ha_t * ha)
4214{
4215
4216 METHOD_TRACE("ips_free", 1);
4217
4218 if (ha) {
4219 if (ha->enq) {
4220 dma_free_coherent(&ha->pcidev->dev, sizeof(IPS_ENQ),
4221 ha->enq, ha->enq_busaddr);
4222 ha->enq = NULL;
4223 }
4224
4225 kfree(ha->conf);
4226 ha->conf = NULL;
4227
4228 if (ha->adapt) {
4229 dma_free_coherent(&ha->pcidev->dev,
4230 sizeof (IPS_ADAPTER) +
4231 sizeof (IPS_IO_CMD), ha->adapt,
4232 ha->adapt->hw_status_start);
4233 ha->adapt = NULL;
4234 }
4235
4236 if (ha->logical_drive_info) {
4237 dma_free_coherent(&ha->pcidev->dev,
4238 sizeof (IPS_LD_INFO),
4239 ha->logical_drive_info,
4240 ha->logical_drive_info_dma_addr);
4241 ha->logical_drive_info = NULL;
4242 }
4243
4244 kfree(ha->nvram);
4245 ha->nvram = NULL;
4246
4247 kfree(ha->subsys);
4248 ha->subsys = NULL;
4249
4250 if (ha->ioctl_data) {
4251 dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
4252 ha->ioctl_data, ha->ioctl_busaddr);
4253 ha->ioctl_data = NULL;
4254 ha->ioctl_datasize = 0;
4255 ha->ioctl_len = 0;
4256 }
4257 ips_deallocatescbs(ha, ha->max_cmds);
4258
4259
4260 if (ha->mem_ptr) {
4261 iounmap(ha->ioremap_ptr);
4262 ha->ioremap_ptr = NULL;
4263 ha->mem_ptr = NULL;
4264 }
4265
4266 ha->mem_addr = 0;
4267
4268 }
4269}
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280static int
4281ips_deallocatescbs(ips_ha_t * ha, int cmds)
4282{
4283 if (ha->scbs) {
4284 dma_free_coherent(&ha->pcidev->dev,
4285 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
4286 ha->scbs->sg_list.list,
4287 ha->scbs->sg_busaddr);
4288 dma_free_coherent(&ha->pcidev->dev, sizeof (ips_scb_t) * cmds,
4289 ha->scbs, ha->scbs->scb_busaddr);
4290 ha->scbs = NULL;
4291 }
4292 return 1;
4293}
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304static int
4305ips_allocatescbs(ips_ha_t * ha)
4306{
4307 ips_scb_t *scb_p;
4308 IPS_SG_LIST ips_sg;
4309 int i;
4310 dma_addr_t command_dma, sg_dma;
4311
4312 METHOD_TRACE("ips_allocatescbs", 1);
4313
4314
4315 ha->scbs = dma_alloc_coherent(&ha->pcidev->dev,
4316 ha->max_cmds * sizeof (ips_scb_t),
4317 &command_dma, GFP_KERNEL);
4318 if (ha->scbs == NULL)
4319 return 0;
4320 ips_sg.list = dma_alloc_coherent(&ha->pcidev->dev,
4321 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * ha->max_cmds,
4322 &sg_dma, GFP_KERNEL);
4323 if (ips_sg.list == NULL) {
4324 dma_free_coherent(&ha->pcidev->dev,
4325 ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
4326 command_dma);
4327 return 0;
4328 }
4329
4330 memset(ha->scbs, 0, ha->max_cmds * sizeof (ips_scb_t));
4331
4332 for (i = 0; i < ha->max_cmds; i++) {
4333 scb_p = &ha->scbs[i];
4334 scb_p->scb_busaddr = command_dma + sizeof (ips_scb_t) * i;
4335
4336 if (IPS_USE_ENH_SGLIST(ha)) {
4337 scb_p->sg_list.enh_list =
4338 ips_sg.enh_list + i * IPS_MAX_SG;
4339 scb_p->sg_busaddr =
4340 sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
4341 } else {
4342 scb_p->sg_list.std_list =
4343 ips_sg.std_list + i * IPS_MAX_SG;
4344 scb_p->sg_busaddr =
4345 sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
4346 }
4347
4348
4349 if (i < ha->max_cmds - 1) {
4350 scb_p->q_next = ha->scb_freelist;
4351 ha->scb_freelist = scb_p;
4352 }
4353 }
4354
4355
4356 return (1);
4357}
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368static void
4369ips_init_scb(ips_ha_t * ha, ips_scb_t * scb)
4370{
4371 IPS_SG_LIST sg_list;
4372 uint32_t cmd_busaddr, sg_busaddr;
4373 METHOD_TRACE("ips_init_scb", 1);
4374
4375 if (scb == NULL)
4376 return;
4377
4378 sg_list.list = scb->sg_list.list;
4379 cmd_busaddr = scb->scb_busaddr;
4380 sg_busaddr = scb->sg_busaddr;
4381
4382 memset(scb, 0, sizeof (ips_scb_t));
4383 memset(ha->dummy, 0, sizeof (IPS_IO_CMD));
4384
4385
4386 ha->dummy->op_code = 0xFF;
4387 ha->dummy->ccsar = cpu_to_le32(ha->adapt->hw_status_start
4388 + sizeof (IPS_ADAPTER));
4389 ha->dummy->command_id = IPS_MAX_CMDS;
4390
4391
4392 scb->scb_busaddr = cmd_busaddr;
4393 scb->sg_busaddr = sg_busaddr;
4394 scb->sg_list.list = sg_list.list;
4395
4396
4397 scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE);
4398 scb->cmd.basic_io.ccsar = cpu_to_le32(ha->adapt->hw_status_start
4399 + sizeof (IPS_ADAPTER));
4400}
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413static ips_scb_t *
4414ips_getscb(ips_ha_t * ha)
4415{
4416 ips_scb_t *scb;
4417
4418 METHOD_TRACE("ips_getscb", 1);
4419
4420 if ((scb = ha->scb_freelist) == NULL) {
4421
4422 return (NULL);
4423 }
4424
4425 ha->scb_freelist = scb->q_next;
4426 scb->flags = 0;
4427 scb->q_next = NULL;
4428
4429 ips_init_scb(ha, scb);
4430
4431 return (scb);
4432}
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445static void
4446ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
4447{
4448
4449 METHOD_TRACE("ips_freescb", 1);
4450 if (scb->flags & IPS_SCB_MAP_SG)
4451 scsi_dma_unmap(scb->scsi_cmd);
4452 else if (scb->flags & IPS_SCB_MAP_SINGLE)
4453 dma_unmap_single(&ha->pcidev->dev, scb->data_busaddr,
4454 scb->data_len, IPS_DMA_DIR(scb));
4455
4456
4457 if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
4458 scb->q_next = ha->scb_freelist;
4459 ha->scb_freelist = scb;
4460 }
4461}
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472static int
4473ips_isinit_copperhead(ips_ha_t * ha)
4474{
4475 uint8_t scpr;
4476 uint8_t isr;
4477
4478 METHOD_TRACE("ips_isinit_copperhead", 1);
4479
4480 isr = inb(ha->io_addr + IPS_REG_HISR);
4481 scpr = inb(ha->io_addr + IPS_REG_SCPR);
4482
4483 if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
4484 return (0);
4485 else
4486 return (1);
4487}
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498static int
4499ips_isinit_copperhead_memio(ips_ha_t * ha)
4500{
4501 uint8_t isr = 0;
4502 uint8_t scpr;
4503
4504 METHOD_TRACE("ips_is_init_copperhead_memio", 1);
4505
4506 isr = readb(ha->mem_ptr + IPS_REG_HISR);
4507 scpr = readb(ha->mem_ptr + IPS_REG_SCPR);
4508
4509 if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
4510 return (0);
4511 else
4512 return (1);
4513}
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524static int
4525ips_isinit_morpheus(ips_ha_t * ha)
4526{
4527 uint32_t post;
4528 uint32_t bits;
4529
4530 METHOD_TRACE("ips_is_init_morpheus", 1);
4531
4532 if (ips_isintr_morpheus(ha))
4533 ips_flush_and_reset(ha);
4534
4535 post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4536 bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4537
4538 if (post == 0)
4539 return (0);
4540 else if (bits & 0x3)
4541 return (0);
4542 else
4543 return (1);
4544}
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556static void
4557ips_flush_and_reset(ips_ha_t *ha)
4558{
4559 ips_scb_t *scb;
4560 int ret;
4561 int time;
4562 int done;
4563 dma_addr_t command_dma;
4564
4565
4566 scb = dma_alloc_coherent(&ha->pcidev->dev, sizeof(ips_scb_t),
4567 &command_dma, GFP_KERNEL);
4568 if (scb) {
4569 memset(scb, 0, sizeof(ips_scb_t));
4570 ips_init_scb(ha, scb);
4571 scb->scb_busaddr = command_dma;
4572
4573 scb->timeout = ips_cmd_timeout;
4574 scb->cdb[0] = IPS_CMD_FLUSH;
4575
4576 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
4577 scb->cmd.flush_cache.command_id = IPS_MAX_CMDS;
4578 scb->cmd.flush_cache.state = IPS_NORM_STATE;
4579 scb->cmd.flush_cache.reserved = 0;
4580 scb->cmd.flush_cache.reserved2 = 0;
4581 scb->cmd.flush_cache.reserved3 = 0;
4582 scb->cmd.flush_cache.reserved4 = 0;
4583
4584 ret = ips_send_cmd(ha, scb);
4585
4586 if (ret == IPS_SUCCESS) {
4587 time = 60 * IPS_ONE_SEC;
4588 done = 0;
4589
4590 while ((time > 0) && (!done)) {
4591 done = ips_poll_for_flush_complete(ha);
4592
4593 udelay(1000);
4594 time--;
4595 }
4596 }
4597 }
4598
4599
4600 (*ha->func.reset) (ha);
4601
4602 dma_free_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), scb, command_dma);
4603 return;
4604}
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616static int
4617ips_poll_for_flush_complete(ips_ha_t * ha)
4618{
4619 IPS_STATUS cstatus;
4620
4621 while (TRUE) {
4622 cstatus.value = (*ha->func.statupd) (ha);
4623
4624 if (cstatus.value == 0xffffffff)
4625 break;
4626
4627
4628 if (cstatus.fields.command_id == IPS_MAX_CMDS)
4629 return 1;
4630 }
4631
4632 return 0;
4633}
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643static void
4644ips_enable_int_copperhead(ips_ha_t * ha)
4645{
4646 METHOD_TRACE("ips_enable_int_copperhead", 1);
4647
4648 outb(ha->io_addr + IPS_REG_HISR, IPS_BIT_EI);
4649 inb(ha->io_addr + IPS_REG_HISR);
4650}
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660static void
4661ips_enable_int_copperhead_memio(ips_ha_t * ha)
4662{
4663 METHOD_TRACE("ips_enable_int_copperhead_memio", 1);
4664
4665 writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
4666 readb(ha->mem_ptr + IPS_REG_HISR);
4667}
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677static void
4678ips_enable_int_morpheus(ips_ha_t * ha)
4679{
4680 uint32_t Oimr;
4681
4682 METHOD_TRACE("ips_enable_int_morpheus", 1);
4683
4684 Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4685 Oimr &= ~0x08;
4686 writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
4687 readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4688}
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699static int
4700ips_init_copperhead(ips_ha_t * ha)
4701{
4702 uint8_t Isr;
4703 uint8_t Cbsp;
4704 uint8_t PostByte[IPS_MAX_POST_BYTES];
4705 int i, j;
4706
4707 METHOD_TRACE("ips_init_copperhead", 1);
4708
4709 for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
4710 for (j = 0; j < 45; j++) {
4711 Isr = inb(ha->io_addr + IPS_REG_HISR);
4712 if (Isr & IPS_BIT_GHI)
4713 break;
4714
4715
4716 MDELAY(IPS_ONE_SEC);
4717 }
4718
4719 if (j >= 45)
4720
4721 return (0);
4722
4723 PostByte[i] = inb(ha->io_addr + IPS_REG_ISPR);
4724 outb(Isr, ha->io_addr + IPS_REG_HISR);
4725 }
4726
4727 if (PostByte[0] < IPS_GOOD_POST_STATUS) {
4728 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4729 "reset controller fails (post status %x %x).\n",
4730 PostByte[0], PostByte[1]);
4731
4732 return (0);
4733 }
4734
4735 for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
4736 for (j = 0; j < 240; j++) {
4737 Isr = inb(ha->io_addr + IPS_REG_HISR);
4738 if (Isr & IPS_BIT_GHI)
4739 break;
4740
4741
4742 MDELAY(IPS_ONE_SEC);
4743 }
4744
4745 if (j >= 240)
4746
4747 return (0);
4748
4749 inb(ha->io_addr + IPS_REG_ISPR);
4750 outb(Isr, ha->io_addr + IPS_REG_HISR);
4751 }
4752
4753 for (i = 0; i < 240; i++) {
4754 Cbsp = inb(ha->io_addr + IPS_REG_CBSP);
4755
4756 if ((Cbsp & IPS_BIT_OP) == 0)
4757 break;
4758
4759
4760 MDELAY(IPS_ONE_SEC);
4761 }
4762
4763 if (i >= 240)
4764
4765 return (0);
4766
4767
4768 outl(0x1010, ha->io_addr + IPS_REG_CCCR);
4769
4770
4771 outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR);
4772
4773 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
4774
4775 outl(0, ha->io_addr + IPS_REG_NDAE);
4776
4777
4778 outb(IPS_BIT_EI, ha->io_addr + IPS_REG_HISR);
4779
4780 return (1);
4781}
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792static int
4793ips_init_copperhead_memio(ips_ha_t * ha)
4794{
4795 uint8_t Isr = 0;
4796 uint8_t Cbsp;
4797 uint8_t PostByte[IPS_MAX_POST_BYTES];
4798 int i, j;
4799
4800 METHOD_TRACE("ips_init_copperhead_memio", 1);
4801
4802 for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
4803 for (j = 0; j < 45; j++) {
4804 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
4805 if (Isr & IPS_BIT_GHI)
4806 break;
4807
4808
4809 MDELAY(IPS_ONE_SEC);
4810 }
4811
4812 if (j >= 45)
4813
4814 return (0);
4815
4816 PostByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR);
4817 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
4818 }
4819
4820 if (PostByte[0] < IPS_GOOD_POST_STATUS) {
4821 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4822 "reset controller fails (post status %x %x).\n",
4823 PostByte[0], PostByte[1]);
4824
4825 return (0);
4826 }
4827
4828 for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
4829 for (j = 0; j < 240; j++) {
4830 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
4831 if (Isr & IPS_BIT_GHI)
4832 break;
4833
4834
4835 MDELAY(IPS_ONE_SEC);
4836 }
4837
4838 if (j >= 240)
4839
4840 return (0);
4841
4842 readb(ha->mem_ptr + IPS_REG_ISPR);
4843 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
4844 }
4845
4846 for (i = 0; i < 240; i++) {
4847 Cbsp = readb(ha->mem_ptr + IPS_REG_CBSP);
4848
4849 if ((Cbsp & IPS_BIT_OP) == 0)
4850 break;
4851
4852
4853 MDELAY(IPS_ONE_SEC);
4854 }
4855
4856 if (i >= 240)
4857
4858 return (0);
4859
4860
4861 writel(0x1010, ha->mem_ptr + IPS_REG_CCCR);
4862
4863
4864 writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR);
4865
4866 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
4867
4868 writel(0, ha->mem_ptr + IPS_REG_NDAE);
4869
4870
4871 writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
4872
4873
4874 return (1);
4875}
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886static int
4887ips_init_morpheus(ips_ha_t * ha)
4888{
4889 uint32_t Post;
4890 uint32_t Config;
4891 uint32_t Isr;
4892 uint32_t Oimr;
4893 int i;
4894
4895 METHOD_TRACE("ips_init_morpheus", 1);
4896
4897
4898 for (i = 0; i < 45; i++) {
4899 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4900
4901 if (Isr & IPS_BIT_I960_MSG0I)
4902 break;
4903
4904
4905 MDELAY(IPS_ONE_SEC);
4906 }
4907
4908 if (i >= 45) {
4909
4910 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4911 "timeout waiting for post.\n");
4912
4913 return (0);
4914 }
4915
4916 Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4917
4918 if (Post == 0x4F00) {
4919 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4920 "Flashing Battery PIC, Please wait ...\n");
4921
4922
4923 Isr = (uint32_t) IPS_BIT_I960_MSG0I;
4924 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4925
4926 for (i = 0; i < 120; i++) {
4927 Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4928 if (Post != 0x4F00)
4929 break;
4930
4931 MDELAY(IPS_ONE_SEC);
4932 }
4933
4934 if (i >= 120) {
4935 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4936 "timeout waiting for Battery PIC Flash\n");
4937 return (0);
4938 }
4939
4940 }
4941
4942
4943 Isr = (uint32_t) IPS_BIT_I960_MSG0I;
4944 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4945
4946 if (Post < (IPS_GOOD_POST_STATUS << 8)) {
4947 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4948 "reset controller fails (post status %x).\n", Post);
4949
4950 return (0);
4951 }
4952
4953
4954 for (i = 0; i < 240; i++) {
4955 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4956
4957 if (Isr & IPS_BIT_I960_MSG1I)
4958 break;
4959
4960
4961 MDELAY(IPS_ONE_SEC);
4962 }
4963
4964 if (i >= 240) {
4965
4966 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4967 "timeout waiting for config.\n");
4968
4969 return (0);
4970 }
4971
4972 Config = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
4973
4974
4975 Isr = (uint32_t) IPS_BIT_I960_MSG1I;
4976 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4977
4978
4979 Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4980 Oimr &= ~0x8;
4981 writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
4982
4983
4984
4985
4986 if (Post == 0xEF10) {
4987 if ((Config == 0x000F) || (Config == 0x0009))
4988 ha->requires_esl = 1;
4989 }
4990
4991 return (1);
4992}
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002
5003static int
5004ips_reset_copperhead(ips_ha_t * ha)
5005{
5006 int reset_counter;
5007
5008 METHOD_TRACE("ips_reset_copperhead", 1);
5009
5010 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d",
5011 ips_name, ha->host_num, ha->io_addr, ha->pcidev->irq);
5012
5013 reset_counter = 0;
5014
5015 while (reset_counter < 2) {
5016 reset_counter++;
5017
5018 outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
5019
5020
5021 MDELAY(IPS_ONE_SEC);
5022
5023 outb(0, ha->io_addr + IPS_REG_SCPR);
5024
5025
5026 MDELAY(IPS_ONE_SEC);
5027
5028 if ((*ha->func.init) (ha))
5029 break;
5030 else if (reset_counter >= 2) {
5031
5032 return (0);
5033 }
5034 }
5035
5036 return (1);
5037}
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048static int
5049ips_reset_copperhead_memio(ips_ha_t * ha)
5050{
5051 int reset_counter;
5052
5053 METHOD_TRACE("ips_reset_copperhead_memio", 1);
5054
5055 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d",
5056 ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
5057
5058 reset_counter = 0;
5059
5060 while (reset_counter < 2) {
5061 reset_counter++;
5062
5063 writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
5064
5065
5066 MDELAY(IPS_ONE_SEC);
5067
5068 writeb(0, ha->mem_ptr + IPS_REG_SCPR);
5069
5070
5071 MDELAY(IPS_ONE_SEC);
5072
5073 if ((*ha->func.init) (ha))
5074 break;
5075 else if (reset_counter >= 2) {
5076
5077 return (0);
5078 }
5079 }
5080
5081 return (1);
5082}
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093static int
5094ips_reset_morpheus(ips_ha_t * ha)
5095{
5096 int reset_counter;
5097 uint8_t junk;
5098
5099 METHOD_TRACE("ips_reset_morpheus", 1);
5100
5101 DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d",
5102 ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
5103
5104 reset_counter = 0;
5105
5106 while (reset_counter < 2) {
5107 reset_counter++;
5108
5109 writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
5110
5111
5112 MDELAY(5 * IPS_ONE_SEC);
5113
5114
5115 pci_read_config_byte(ha->pcidev, 4, &junk);
5116
5117 if ((*ha->func.init) (ha))
5118 break;
5119 else if (reset_counter >= 2) {
5120
5121 return (0);
5122 }
5123 }
5124
5125 return (1);
5126}
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137static void
5138ips_statinit(ips_ha_t * ha)
5139{
5140 uint32_t phys_status_start;
5141
5142 METHOD_TRACE("ips_statinit", 1);
5143
5144 ha->adapt->p_status_start = ha->adapt->status;
5145 ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
5146 ha->adapt->p_status_tail = ha->adapt->status;
5147
5148 phys_status_start = ha->adapt->hw_status_start;
5149 outl(phys_status_start, ha->io_addr + IPS_REG_SQSR);
5150 outl(phys_status_start + IPS_STATUS_Q_SIZE,
5151 ha->io_addr + IPS_REG_SQER);
5152 outl(phys_status_start + IPS_STATUS_SIZE,
5153 ha->io_addr + IPS_REG_SQHR);
5154 outl(phys_status_start, ha->io_addr + IPS_REG_SQTR);
5155
5156 ha->adapt->hw_status_tail = phys_status_start;
5157}
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168static void
5169ips_statinit_memio(ips_ha_t * ha)
5170{
5171 uint32_t phys_status_start;
5172
5173 METHOD_TRACE("ips_statinit_memio", 1);
5174
5175 ha->adapt->p_status_start = ha->adapt->status;
5176 ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
5177 ha->adapt->p_status_tail = ha->adapt->status;
5178
5179 phys_status_start = ha->adapt->hw_status_start;
5180 writel(phys_status_start, ha->mem_ptr + IPS_REG_SQSR);
5181 writel(phys_status_start + IPS_STATUS_Q_SIZE,
5182 ha->mem_ptr + IPS_REG_SQER);
5183 writel(phys_status_start + IPS_STATUS_SIZE, ha->mem_ptr + IPS_REG_SQHR);
5184 writel(phys_status_start, ha->mem_ptr + IPS_REG_SQTR);
5185
5186 ha->adapt->hw_status_tail = phys_status_start;
5187}
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198static uint32_t
5199ips_statupd_copperhead(ips_ha_t * ha)
5200{
5201 METHOD_TRACE("ips_statupd_copperhead", 1);
5202
5203 if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
5204 ha->adapt->p_status_tail++;
5205 ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
5206 } else {
5207 ha->adapt->p_status_tail = ha->adapt->p_status_start;
5208 ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
5209 }
5210
5211 outl(ha->adapt->hw_status_tail,
5212 ha->io_addr + IPS_REG_SQTR);
5213
5214 return (ha->adapt->p_status_tail->value);
5215}
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226static uint32_t
5227ips_statupd_copperhead_memio(ips_ha_t * ha)
5228{
5229 METHOD_TRACE("ips_statupd_copperhead_memio", 1);
5230
5231 if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
5232 ha->adapt->p_status_tail++;
5233 ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
5234 } else {
5235 ha->adapt->p_status_tail = ha->adapt->p_status_start;
5236 ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
5237 }
5238
5239 writel(ha->adapt->hw_status_tail, ha->mem_ptr + IPS_REG_SQTR);
5240
5241 return (ha->adapt->p_status_tail->value);
5242}
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253static uint32_t
5254ips_statupd_morpheus(ips_ha_t * ha)
5255{
5256 uint32_t val;
5257
5258 METHOD_TRACE("ips_statupd_morpheus", 1);
5259
5260 val = readl(ha->mem_ptr + IPS_REG_I2O_OUTMSGQ);
5261
5262 return (val);
5263}
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274static int
5275ips_issue_copperhead(ips_ha_t * ha, ips_scb_t * scb)
5276{
5277 uint32_t TimeOut;
5278 uint32_t val;
5279
5280 METHOD_TRACE("ips_issue_copperhead", 1);
5281
5282 if (scb->scsi_cmd) {
5283 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5284 ips_name,
5285 ha->host_num,
5286 scb->cdb[0],
5287 scb->cmd.basic_io.command_id,
5288 scb->bus, scb->target_id, scb->lun);
5289 } else {
5290 DEBUG_VAR(2, KERN_NOTICE "(%s%d) ips_issue: logical cmd id %d",
5291 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5292 }
5293
5294 TimeOut = 0;
5295
5296 while ((val =
5297 le32_to_cpu(inl(ha->io_addr + IPS_REG_CCCR))) & IPS_BIT_SEM) {
5298 udelay(1000);
5299
5300 if (++TimeOut >= IPS_SEM_TIMEOUT) {
5301 if (!(val & IPS_BIT_START_STOP))
5302 break;
5303
5304 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5305 "ips_issue val [0x%x].\n", val);
5306 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5307 "ips_issue semaphore chk timeout.\n");
5308
5309 return (IPS_FAILURE);
5310 }
5311 }
5312
5313 outl(scb->scb_busaddr, ha->io_addr + IPS_REG_CCSAR);
5314 outw(IPS_BIT_START_CMD, ha->io_addr + IPS_REG_CCCR);
5315
5316 return (IPS_SUCCESS);
5317}
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328static int
5329ips_issue_copperhead_memio(ips_ha_t * ha, ips_scb_t * scb)
5330{
5331 uint32_t TimeOut;
5332 uint32_t val;
5333
5334 METHOD_TRACE("ips_issue_copperhead_memio", 1);
5335
5336 if (scb->scsi_cmd) {
5337 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5338 ips_name,
5339 ha->host_num,
5340 scb->cdb[0],
5341 scb->cmd.basic_io.command_id,
5342 scb->bus, scb->target_id, scb->lun);
5343 } else {
5344 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5345 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5346 }
5347
5348 TimeOut = 0;
5349
5350 while ((val = readl(ha->mem_ptr + IPS_REG_CCCR)) & IPS_BIT_SEM) {
5351 udelay(1000);
5352
5353 if (++TimeOut >= IPS_SEM_TIMEOUT) {
5354 if (!(val & IPS_BIT_START_STOP))
5355 break;
5356
5357 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5358 "ips_issue val [0x%x].\n", val);
5359 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5360 "ips_issue semaphore chk timeout.\n");
5361
5362 return (IPS_FAILURE);
5363 }
5364 }
5365
5366 writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_CCSAR);
5367 writel(IPS_BIT_START_CMD, ha->mem_ptr + IPS_REG_CCCR);
5368
5369 return (IPS_SUCCESS);
5370}
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381static int
5382ips_issue_i2o(ips_ha_t * ha, ips_scb_t * scb)
5383{
5384
5385 METHOD_TRACE("ips_issue_i2o", 1);
5386
5387 if (scb->scsi_cmd) {
5388 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5389 ips_name,
5390 ha->host_num,
5391 scb->cdb[0],
5392 scb->cmd.basic_io.command_id,
5393 scb->bus, scb->target_id, scb->lun);
5394 } else {
5395 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5396 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5397 }
5398
5399 outl(scb->scb_busaddr, ha->io_addr + IPS_REG_I2O_INMSGQ);
5400
5401 return (IPS_SUCCESS);
5402}
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413static int
5414ips_issue_i2o_memio(ips_ha_t * ha, ips_scb_t * scb)
5415{
5416
5417 METHOD_TRACE("ips_issue_i2o_memio", 1);
5418
5419 if (scb->scsi_cmd) {
5420 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5421 ips_name,
5422 ha->host_num,
5423 scb->cdb[0],
5424 scb->cmd.basic_io.command_id,
5425 scb->bus, scb->target_id, scb->lun);
5426 } else {
5427 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5428 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5429 }
5430
5431 writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_I2O_INMSGQ);
5432
5433 return (IPS_SUCCESS);
5434}
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445static int
5446ips_isintr_copperhead(ips_ha_t * ha)
5447{
5448 uint8_t Isr;
5449
5450 METHOD_TRACE("ips_isintr_copperhead", 2);
5451
5452 Isr = inb(ha->io_addr + IPS_REG_HISR);
5453
5454 if (Isr == 0xFF)
5455
5456 return (0);
5457
5458 if (Isr & IPS_BIT_SCE)
5459 return (1);
5460 else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
5461
5462
5463 outb(Isr, ha->io_addr + IPS_REG_HISR);
5464 }
5465
5466 return (0);
5467}
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478static int
5479ips_isintr_copperhead_memio(ips_ha_t * ha)
5480{
5481 uint8_t Isr;
5482
5483 METHOD_TRACE("ips_isintr_memio", 2);
5484
5485 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
5486
5487 if (Isr == 0xFF)
5488
5489 return (0);
5490
5491 if (Isr & IPS_BIT_SCE)
5492 return (1);
5493 else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
5494
5495
5496 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
5497 }
5498
5499 return (0);
5500}
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511static int
5512ips_isintr_morpheus(ips_ha_t * ha)
5513{
5514 uint32_t Isr;
5515
5516 METHOD_TRACE("ips_isintr_morpheus", 2);
5517
5518 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
5519
5520 if (Isr & IPS_BIT_I2O_OPQI)
5521 return (1);
5522 else
5523 return (0);
5524}
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535static int
5536ips_wait(ips_ha_t * ha, int time, int intr)
5537{
5538 int ret;
5539 int done;
5540
5541 METHOD_TRACE("ips_wait", 1);
5542
5543 ret = IPS_FAILURE;
5544 done = FALSE;
5545
5546 time *= IPS_ONE_SEC;
5547
5548 while ((time > 0) && (!done)) {
5549 if (intr == IPS_INTR_ON) {
5550 if (ha->waitflag == FALSE) {
5551 ret = IPS_SUCCESS;
5552 done = TRUE;
5553 break;
5554 }
5555 } else if (intr == IPS_INTR_IORL) {
5556 if (ha->waitflag == FALSE) {
5557
5558
5559
5560
5561
5562 ret = IPS_SUCCESS;
5563 done = TRUE;
5564 break;
5565 }
5566
5567
5568
5569
5570
5571
5572
5573 (*ha->func.intr) (ha);
5574 }
5575
5576
5577 udelay(1000);
5578 time--;
5579 }
5580
5581 return (ret);
5582}
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593static int
5594ips_write_driver_status(ips_ha_t * ha, int intr)
5595{
5596 METHOD_TRACE("ips_write_driver_status", 1);
5597
5598 if (!ips_readwrite_page5(ha, FALSE, intr)) {
5599 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5600 "unable to read NVRAM page 5.\n");
5601
5602 return (0);
5603 }
5604
5605
5606
5607 if (le32_to_cpu(ha->nvram->signature) != IPS_NVRAM_P5_SIG) {
5608 DEBUG_VAR(1,
5609 "(%s%d) NVRAM page 5 has an invalid signature: %X.",
5610 ips_name, ha->host_num, ha->nvram->signature);
5611 ha->nvram->signature = IPS_NVRAM_P5_SIG;
5612 }
5613
5614 DEBUG_VAR(2,
5615 "(%s%d) Ad Type: %d, Ad Slot: %d, BIOS: %c%c%c%c %c%c%c%c.",
5616 ips_name, ha->host_num, le16_to_cpu(ha->nvram->adapter_type),
5617 ha->nvram->adapter_slot, ha->nvram->bios_high[0],
5618 ha->nvram->bios_high[1], ha->nvram->bios_high[2],
5619 ha->nvram->bios_high[3], ha->nvram->bios_low[0],
5620 ha->nvram->bios_low[1], ha->nvram->bios_low[2],
5621 ha->nvram->bios_low[3]);
5622
5623 ips_get_bios_version(ha, intr);
5624
5625
5626 ha->nvram->operating_system = IPS_OS_LINUX;
5627 ha->nvram->adapter_type = ha->ad_type;
5628 memcpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4);
5629 memcpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4);
5630 memcpy((char *) ha->nvram->bios_high, ha->bios_version, 4);
5631 memcpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4);
5632
5633 ha->nvram->versioning = 0;
5634
5635
5636 if (!ips_readwrite_page5(ha, TRUE, intr)) {
5637 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5638 "unable to write NVRAM page 5.\n");
5639
5640 return (0);
5641 }
5642
5643
5644 ha->slot_num = ha->nvram->adapter_slot;
5645
5646 return (1);
5647}
5648
5649
5650
5651
5652
5653
5654
5655
5656
5657
5658static int
5659ips_read_adapter_status(ips_ha_t * ha, int intr)
5660{
5661 ips_scb_t *scb;
5662 int ret;
5663
5664 METHOD_TRACE("ips_read_adapter_status", 1);
5665
5666 scb = &ha->scbs[ha->max_cmds - 1];
5667
5668 ips_init_scb(ha, scb);
5669
5670 scb->timeout = ips_cmd_timeout;
5671 scb->cdb[0] = IPS_CMD_ENQUIRY;
5672
5673 scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
5674 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5675 scb->cmd.basic_io.sg_count = 0;
5676 scb->cmd.basic_io.lba = 0;
5677 scb->cmd.basic_io.sector_count = 0;
5678 scb->cmd.basic_io.log_drv = 0;
5679 scb->data_len = sizeof (*ha->enq);
5680 scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
5681
5682
5683 if (((ret =
5684 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5685 || (ret == IPS_SUCCESS_IMM)
5686 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5687 return (0);
5688
5689 return (1);
5690}
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701static int
5702ips_read_subsystem_parameters(ips_ha_t * ha, int intr)
5703{
5704 ips_scb_t *scb;
5705 int ret;
5706
5707 METHOD_TRACE("ips_read_subsystem_parameters", 1);
5708
5709 scb = &ha->scbs[ha->max_cmds - 1];
5710
5711 ips_init_scb(ha, scb);
5712
5713 scb->timeout = ips_cmd_timeout;
5714 scb->cdb[0] = IPS_CMD_GET_SUBSYS;
5715
5716 scb->cmd.basic_io.op_code = IPS_CMD_GET_SUBSYS;
5717 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5718 scb->cmd.basic_io.sg_count = 0;
5719 scb->cmd.basic_io.lba = 0;
5720 scb->cmd.basic_io.sector_count = 0;
5721 scb->cmd.basic_io.log_drv = 0;
5722 scb->data_len = sizeof (*ha->subsys);
5723 scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
5724
5725
5726 if (((ret =
5727 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5728 || (ret == IPS_SUCCESS_IMM)
5729 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5730 return (0);
5731
5732 memcpy(ha->subsys, ha->ioctl_data, sizeof(*ha->subsys));
5733 return (1);
5734}
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745static int
5746ips_read_config(ips_ha_t * ha, int intr)
5747{
5748 ips_scb_t *scb;
5749 int i;
5750 int ret;
5751
5752 METHOD_TRACE("ips_read_config", 1);
5753
5754
5755 for (i = 0; i < 4; i++)
5756 ha->conf->init_id[i] = 7;
5757
5758 scb = &ha->scbs[ha->max_cmds - 1];
5759
5760 ips_init_scb(ha, scb);
5761
5762 scb->timeout = ips_cmd_timeout;
5763 scb->cdb[0] = IPS_CMD_READ_CONF;
5764
5765 scb->cmd.basic_io.op_code = IPS_CMD_READ_CONF;
5766 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5767 scb->data_len = sizeof (*ha->conf);
5768 scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
5769
5770
5771 if (((ret =
5772 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5773 || (ret == IPS_SUCCESS_IMM)
5774 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
5775
5776 memset(ha->conf, 0, sizeof (IPS_CONF));
5777
5778
5779 for (i = 0; i < 4; i++)
5780 ha->conf->init_id[i] = 7;
5781
5782
5783 if ((scb->basic_status & IPS_GSC_STATUS_MASK) ==
5784 IPS_CMD_CMPLT_WERROR)
5785 return (1);
5786
5787 return (0);
5788 }
5789
5790 memcpy(ha->conf, ha->ioctl_data, sizeof(*ha->conf));
5791 return (1);
5792}
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803static int
5804ips_readwrite_page5(ips_ha_t * ha, int write, int intr)
5805{
5806 ips_scb_t *scb;
5807 int ret;
5808
5809 METHOD_TRACE("ips_readwrite_page5", 1);
5810
5811 scb = &ha->scbs[ha->max_cmds - 1];
5812
5813 ips_init_scb(ha, scb);
5814
5815 scb->timeout = ips_cmd_timeout;
5816 scb->cdb[0] = IPS_CMD_RW_NVRAM_PAGE;
5817
5818 scb->cmd.nvram.op_code = IPS_CMD_RW_NVRAM_PAGE;
5819 scb->cmd.nvram.command_id = IPS_COMMAND_ID(ha, scb);
5820 scb->cmd.nvram.page = 5;
5821 scb->cmd.nvram.write = write;
5822 scb->cmd.nvram.reserved = 0;
5823 scb->cmd.nvram.reserved2 = 0;
5824 scb->data_len = sizeof (*ha->nvram);
5825 scb->cmd.nvram.buffer_addr = ha->ioctl_busaddr;
5826 if (write)
5827 memcpy(ha->ioctl_data, ha->nvram, sizeof(*ha->nvram));
5828
5829
5830 if (((ret =
5831 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5832 || (ret == IPS_SUCCESS_IMM)
5833 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
5834
5835 memset(ha->nvram, 0, sizeof (IPS_NVRAM_P5));
5836
5837 return (0);
5838 }
5839 if (!write)
5840 memcpy(ha->nvram, ha->ioctl_data, sizeof(*ha->nvram));
5841 return (1);
5842}
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853static int
5854ips_clear_adapter(ips_ha_t * ha, int intr)
5855{
5856 ips_scb_t *scb;
5857 int ret;
5858
5859 METHOD_TRACE("ips_clear_adapter", 1);
5860
5861 scb = &ha->scbs[ha->max_cmds - 1];
5862
5863 ips_init_scb(ha, scb);
5864
5865 scb->timeout = ips_reset_timeout;
5866 scb->cdb[0] = IPS_CMD_CONFIG_SYNC;
5867
5868 scb->cmd.config_sync.op_code = IPS_CMD_CONFIG_SYNC;
5869 scb->cmd.config_sync.command_id = IPS_COMMAND_ID(ha, scb);
5870 scb->cmd.config_sync.channel = 0;
5871 scb->cmd.config_sync.source_target = IPS_POCL;
5872 scb->cmd.config_sync.reserved = 0;
5873 scb->cmd.config_sync.reserved2 = 0;
5874 scb->cmd.config_sync.reserved3 = 0;
5875
5876
5877 if (((ret =
5878 ips_send_wait(ha, scb, ips_reset_timeout, intr)) == IPS_FAILURE)
5879 || (ret == IPS_SUCCESS_IMM)
5880 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5881 return (0);
5882
5883
5884 ips_init_scb(ha, scb);
5885
5886 scb->cdb[0] = IPS_CMD_ERROR_TABLE;
5887 scb->timeout = ips_reset_timeout;
5888
5889 scb->cmd.unlock_stripe.op_code = IPS_CMD_ERROR_TABLE;
5890 scb->cmd.unlock_stripe.command_id = IPS_COMMAND_ID(ha, scb);
5891 scb->cmd.unlock_stripe.log_drv = 0;
5892 scb->cmd.unlock_stripe.control = IPS_CSL;
5893 scb->cmd.unlock_stripe.reserved = 0;
5894 scb->cmd.unlock_stripe.reserved2 = 0;
5895 scb->cmd.unlock_stripe.reserved3 = 0;
5896
5897
5898 if (((ret =
5899 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5900 || (ret == IPS_SUCCESS_IMM)
5901 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5902 return (0);
5903
5904 return (1);
5905}
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915
5916static void
5917ips_ffdc_reset(ips_ha_t * ha, int intr)
5918{
5919 ips_scb_t *scb;
5920
5921 METHOD_TRACE("ips_ffdc_reset", 1);
5922
5923 scb = &ha->scbs[ha->max_cmds - 1];
5924
5925 ips_init_scb(ha, scb);
5926
5927 scb->timeout = ips_cmd_timeout;
5928 scb->cdb[0] = IPS_CMD_FFDC;
5929 scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
5930 scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
5931 scb->cmd.ffdc.reset_count = ha->reset_count;
5932 scb->cmd.ffdc.reset_type = 0x80;
5933
5934
5935 ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
5936
5937
5938 ips_send_wait(ha, scb, ips_cmd_timeout, intr);
5939}
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949
5950static void
5951ips_ffdc_time(ips_ha_t * ha)
5952{
5953 ips_scb_t *scb;
5954
5955 METHOD_TRACE("ips_ffdc_time", 1);
5956
5957 DEBUG_VAR(1, "(%s%d) Sending time update.", ips_name, ha->host_num);
5958
5959 scb = &ha->scbs[ha->max_cmds - 1];
5960
5961 ips_init_scb(ha, scb);
5962
5963 scb->timeout = ips_cmd_timeout;
5964 scb->cdb[0] = IPS_CMD_FFDC;
5965 scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
5966 scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
5967 scb->cmd.ffdc.reset_count = 0;
5968 scb->cmd.ffdc.reset_type = 0;
5969
5970
5971 ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
5972
5973
5974 ips_send_wait(ha, scb, ips_cmd_timeout, IPS_FFDC);
5975}
5976
5977
5978
5979
5980
5981
5982
5983
5984
5985static void
5986ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time64_t current_time)
5987{
5988 struct tm tm;
5989
5990 METHOD_TRACE("ips_fix_ffdc_time", 1);
5991
5992 time64_to_tm(current_time, 0, &tm);
5993
5994 scb->cmd.ffdc.hour = tm.tm_hour;
5995 scb->cmd.ffdc.minute = tm.tm_min;
5996 scb->cmd.ffdc.second = tm.tm_sec;
5997 scb->cmd.ffdc.yearH = (tm.tm_year + 1900) / 100;
5998 scb->cmd.ffdc.yearL = tm.tm_year % 100;
5999 scb->cmd.ffdc.month = tm.tm_mon + 1;
6000 scb->cmd.ffdc.day = tm.tm_mday;
6001}
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013
6014
6015static int
6016ips_erase_bios(ips_ha_t * ha)
6017{
6018 int timeout;
6019 uint8_t status = 0;
6020
6021 METHOD_TRACE("ips_erase_bios", 1);
6022
6023 status = 0;
6024
6025
6026 outl(0, ha->io_addr + IPS_REG_FLAP);
6027 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6028 udelay(25);
6029
6030 outb(0x50, ha->io_addr + IPS_REG_FLDP);
6031 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6032 udelay(25);
6033
6034
6035 outb(0x20, ha->io_addr + IPS_REG_FLDP);
6036 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6037 udelay(25);
6038
6039
6040 outb(0xD0, ha->io_addr + IPS_REG_FLDP);
6041 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6042 udelay(25);
6043
6044
6045 outb(0x70, ha->io_addr + IPS_REG_FLDP);
6046 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6047 udelay(25);
6048
6049 timeout = 80000;
6050
6051 while (timeout > 0) {
6052 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6053 outl(0, ha->io_addr + IPS_REG_FLAP);
6054 udelay(25);
6055 }
6056
6057 status = inb(ha->io_addr + IPS_REG_FLDP);
6058
6059 if (status & 0x80)
6060 break;
6061
6062 MDELAY(1);
6063 timeout--;
6064 }
6065
6066
6067 if (timeout <= 0) {
6068
6069
6070
6071 outb(0xB0, ha->io_addr + IPS_REG_FLDP);
6072 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6073 udelay(25);
6074
6075
6076 timeout = 10000;
6077 while (timeout > 0) {
6078 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6079 outl(0, ha->io_addr + IPS_REG_FLAP);
6080 udelay(25);
6081 }
6082
6083 status = inb(ha->io_addr + IPS_REG_FLDP);
6084
6085 if (status & 0xC0)
6086 break;
6087
6088 MDELAY(1);
6089 timeout--;
6090 }
6091
6092 return (1);
6093 }
6094
6095
6096 if (status & 0x08)
6097
6098 return (1);
6099
6100
6101 if (status & 0x30)
6102
6103 return (1);
6104
6105
6106
6107 outb(0x50, ha->io_addr + IPS_REG_FLDP);
6108 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6109 udelay(25);
6110
6111
6112 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6113 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6114 udelay(25);
6115
6116 return (0);
6117}
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127static int
6128ips_erase_bios_memio(ips_ha_t * ha)
6129{
6130 int timeout;
6131 uint8_t status;
6132
6133 METHOD_TRACE("ips_erase_bios_memio", 1);
6134
6135 status = 0;
6136
6137
6138 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6139 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6140 udelay(25);
6141
6142 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
6143 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6144 udelay(25);
6145
6146
6147 writeb(0x20, ha->mem_ptr + IPS_REG_FLDP);
6148 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6149 udelay(25);
6150
6151
6152 writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP);
6153 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6154 udelay(25);
6155
6156
6157 writeb(0x70, ha->mem_ptr + IPS_REG_FLDP);
6158 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6159 udelay(25);
6160
6161 timeout = 80000;
6162
6163 while (timeout > 0) {
6164 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6165 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6166 udelay(25);
6167 }
6168
6169 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6170
6171 if (status & 0x80)
6172 break;
6173
6174 MDELAY(1);
6175 timeout--;
6176 }
6177
6178
6179 if (timeout <= 0) {
6180
6181
6182
6183 writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP);
6184 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6185 udelay(25);
6186
6187
6188 timeout = 10000;
6189 while (timeout > 0) {
6190 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6191 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6192 udelay(25);
6193 }
6194
6195 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6196
6197 if (status & 0xC0)
6198 break;
6199
6200 MDELAY(1);
6201 timeout--;
6202 }
6203
6204 return (1);
6205 }
6206
6207
6208 if (status & 0x08)
6209
6210 return (1);
6211
6212
6213 if (status & 0x30)
6214
6215 return (1);
6216
6217
6218
6219 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
6220 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6221 udelay(25);
6222
6223
6224 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6225 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6226 udelay(25);
6227
6228 return (0);
6229}
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239static int
6240ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6241 uint32_t offset)
6242{
6243 int i;
6244 int timeout;
6245 uint8_t status = 0;
6246
6247 METHOD_TRACE("ips_program_bios", 1);
6248
6249 status = 0;
6250
6251 for (i = 0; i < buffersize; i++) {
6252
6253 outl(i + offset, ha->io_addr + IPS_REG_FLAP);
6254 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6255 udelay(25);
6256
6257 outb(0x40, ha->io_addr + IPS_REG_FLDP);
6258 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6259 udelay(25);
6260
6261 outb(buffer[i], ha->io_addr + IPS_REG_FLDP);
6262 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6263 udelay(25);
6264
6265
6266 timeout = 1000;
6267 while (timeout > 0) {
6268 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6269 outl(0, ha->io_addr + IPS_REG_FLAP);
6270 udelay(25);
6271 }
6272
6273 status = inb(ha->io_addr + IPS_REG_FLDP);
6274
6275 if (status & 0x80)
6276 break;
6277
6278 MDELAY(1);
6279 timeout--;
6280 }
6281
6282 if (timeout == 0) {
6283
6284 outl(0, ha->io_addr + IPS_REG_FLAP);
6285 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6286 udelay(25);
6287
6288 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6289 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6290 udelay(25);
6291
6292 return (1);
6293 }
6294
6295
6296 if (status & 0x18) {
6297
6298 outl(0, ha->io_addr + IPS_REG_FLAP);
6299 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6300 udelay(25);
6301
6302 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6303 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6304 udelay(25);
6305
6306 return (1);
6307 }
6308 }
6309
6310
6311 outl(0, ha->io_addr + IPS_REG_FLAP);
6312 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6313 udelay(25);
6314
6315 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6316 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6317 udelay(25);
6318
6319 return (0);
6320}
6321
6322
6323
6324
6325
6326
6327
6328
6329
6330static int
6331ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6332 uint32_t offset)
6333{
6334 int i;
6335 int timeout;
6336 uint8_t status = 0;
6337
6338 METHOD_TRACE("ips_program_bios_memio", 1);
6339
6340 status = 0;
6341
6342 for (i = 0; i < buffersize; i++) {
6343
6344 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
6345 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6346 udelay(25);
6347
6348 writeb(0x40, ha->mem_ptr + IPS_REG_FLDP);
6349 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6350 udelay(25);
6351
6352 writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP);
6353 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6354 udelay(25);
6355
6356
6357 timeout = 1000;
6358 while (timeout > 0) {
6359 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6360 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6361 udelay(25);
6362 }
6363
6364 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6365
6366 if (status & 0x80)
6367 break;
6368
6369 MDELAY(1);
6370 timeout--;
6371 }
6372
6373 if (timeout == 0) {
6374
6375 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6376 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6377 udelay(25);
6378
6379 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6380 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6381 udelay(25);
6382
6383 return (1);
6384 }
6385
6386
6387 if (status & 0x18) {
6388
6389 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6390 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6391 udelay(25);
6392
6393 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6394 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6395 udelay(25);
6396
6397 return (1);
6398 }
6399 }
6400
6401
6402 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6403 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6404 udelay(25);
6405
6406 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6407 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6408 udelay(25);
6409
6410 return (0);
6411}
6412
6413
6414
6415
6416
6417
6418
6419
6420
6421static int
6422ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6423 uint32_t offset)
6424{
6425 uint8_t checksum;
6426 int i;
6427
6428 METHOD_TRACE("ips_verify_bios", 1);
6429
6430
6431 outl(0, ha->io_addr + IPS_REG_FLAP);
6432 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6433 udelay(25);
6434
6435 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
6436 return (1);
6437
6438 outl(1, ha->io_addr + IPS_REG_FLAP);
6439 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6440 udelay(25);
6441 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
6442 return (1);
6443
6444 checksum = 0xff;
6445 for (i = 2; i < buffersize; i++) {
6446
6447 outl(i + offset, ha->io_addr + IPS_REG_FLAP);
6448 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6449 udelay(25);
6450
6451 checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP);
6452 }
6453
6454 if (checksum != 0)
6455
6456 return (1);
6457 else
6458
6459 return (0);
6460}
6461
6462
6463
6464
6465
6466
6467
6468
6469
6470static int
6471ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6472 uint32_t offset)
6473{
6474 uint8_t checksum;
6475 int i;
6476
6477 METHOD_TRACE("ips_verify_bios_memio", 1);
6478
6479
6480 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6481 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6482 udelay(25);
6483
6484 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
6485 return (1);
6486
6487 writel(1, ha->mem_ptr + IPS_REG_FLAP);
6488 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6489 udelay(25);
6490 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
6491 return (1);
6492
6493 checksum = 0xff;
6494 for (i = 2; i < buffersize; i++) {
6495
6496 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
6497 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6498 udelay(25);
6499
6500 checksum =
6501 (uint8_t) checksum + readb(ha->mem_ptr + IPS_REG_FLDP);
6502 }
6503
6504 if (checksum != 0)
6505
6506 return (1);
6507 else
6508
6509 return (0);
6510}
6511
6512
6513
6514
6515
6516
6517
6518
6519static int
6520ips_abort_init(ips_ha_t * ha, int index)
6521{
6522 ha->active = 0;
6523 ips_free(ha);
6524 ips_ha[index] = NULL;
6525 ips_sh[index] = NULL;
6526 return -1;
6527}
6528
6529
6530
6531
6532
6533
6534
6535
6536static void
6537ips_shift_controllers(int lowindex, int highindex)
6538{
6539 ips_ha_t *ha_sav = ips_ha[highindex];
6540 struct Scsi_Host *sh_sav = ips_sh[highindex];
6541 int i;
6542
6543 for (i = highindex; i > lowindex; i--) {
6544 ips_ha[i] = ips_ha[i - 1];
6545 ips_sh[i] = ips_sh[i - 1];
6546 ips_ha[i]->host_num = i;
6547 }
6548 ha_sav->host_num = lowindex;
6549 ips_ha[lowindex] = ha_sav;
6550 ips_sh[lowindex] = sh_sav;
6551}
6552
6553
6554
6555
6556
6557
6558
6559
6560static void
6561ips_order_controllers(void)
6562{
6563 int i, j, tmp, position = 0;
6564 IPS_NVRAM_P5 *nvram;
6565 if (!ips_ha[0])
6566 return;
6567 nvram = ips_ha[0]->nvram;
6568
6569 if (nvram->adapter_order[0]) {
6570 for (i = 1; i <= nvram->adapter_order[0]; i++) {
6571 for (j = position; j < ips_num_controllers; j++) {
6572 switch (ips_ha[j]->ad_type) {
6573 case IPS_ADTYPE_SERVERAID6M:
6574 case IPS_ADTYPE_SERVERAID7M:
6575 if (nvram->adapter_order[i] == 'M') {
6576 ips_shift_controllers(position,
6577 j);
6578 position++;
6579 }
6580 break;
6581 case IPS_ADTYPE_SERVERAID4L:
6582 case IPS_ADTYPE_SERVERAID4M:
6583 case IPS_ADTYPE_SERVERAID4MX:
6584 case IPS_ADTYPE_SERVERAID4LX:
6585 if (nvram->adapter_order[i] == 'N') {
6586 ips_shift_controllers(position,
6587 j);
6588 position++;
6589 }
6590 break;
6591 case IPS_ADTYPE_SERVERAID6I:
6592 case IPS_ADTYPE_SERVERAID5I2:
6593 case IPS_ADTYPE_SERVERAID5I1:
6594 case IPS_ADTYPE_SERVERAID7k:
6595 if (nvram->adapter_order[i] == 'S') {
6596 ips_shift_controllers(position,
6597 j);
6598 position++;
6599 }
6600 break;
6601 case IPS_ADTYPE_SERVERAID:
6602 case IPS_ADTYPE_SERVERAID2:
6603 case IPS_ADTYPE_NAVAJO:
6604 case IPS_ADTYPE_KIOWA:
6605 case IPS_ADTYPE_SERVERAID3L:
6606 case IPS_ADTYPE_SERVERAID3:
6607 case IPS_ADTYPE_SERVERAID4H:
6608 if (nvram->adapter_order[i] == 'A') {
6609 ips_shift_controllers(position,
6610 j);
6611 position++;
6612 }
6613 break;
6614 default:
6615 break;
6616 }
6617 }
6618 }
6619
6620 return;
6621 }
6622
6623 tmp = 0;
6624 for (i = position; i < ips_num_controllers; i++) {
6625 if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I2 ||
6626 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I1) {
6627 ips_shift_controllers(position, i);
6628 position++;
6629 tmp = 1;
6630 }
6631 }
6632
6633 if (!tmp)
6634 return;
6635 for (i = position; i < ips_num_controllers; i++) {
6636 if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4L ||
6637 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4M ||
6638 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4LX ||
6639 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4MX) {
6640 ips_shift_controllers(position, i);
6641 position++;
6642 }
6643 }
6644
6645 return;
6646}
6647
6648
6649
6650
6651
6652
6653
6654
6655static int
6656ips_register_scsi(int index)
6657{
6658 struct Scsi_Host *sh;
6659 ips_ha_t *ha, *oldha = ips_ha[index];
6660 sh = scsi_host_alloc(&ips_driver_template, sizeof (ips_ha_t));
6661 if (!sh) {
6662 IPS_PRINTK(KERN_WARNING, oldha->pcidev,
6663 "Unable to register controller with SCSI subsystem\n");
6664 return -1;
6665 }
6666 ha = IPS_HA(sh);
6667 memcpy(ha, oldha, sizeof (ips_ha_t));
6668 free_irq(oldha->pcidev->irq, oldha);
6669
6670 if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
6671 IPS_PRINTK(KERN_WARNING, ha->pcidev,
6672 "Unable to install interrupt handler\n");
6673 goto err_out_sh;
6674 }
6675
6676 kfree(oldha);
6677
6678
6679 sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr;
6680 sh->sg_tablesize = sh->hostt->sg_tablesize;
6681 sh->can_queue = sh->hostt->can_queue;
6682 sh->cmd_per_lun = sh->hostt->cmd_per_lun;
6683 sh->max_sectors = 128;
6684
6685 sh->max_id = ha->ntargets;
6686 sh->max_lun = ha->nlun;
6687 sh->max_channel = ha->nbus - 1;
6688 sh->can_queue = ha->max_cmds - 1;
6689
6690 if (scsi_add_host(sh, &ha->pcidev->dev))
6691 goto err_out;
6692
6693 ips_sh[index] = sh;
6694 ips_ha[index] = ha;
6695
6696 scsi_scan_host(sh);
6697
6698 return 0;
6699
6700err_out:
6701 free_irq(ha->pcidev->irq, ha);
6702err_out_sh:
6703 scsi_host_put(sh);
6704 return -1;
6705}
6706
6707
6708
6709
6710
6711
6712
6713static void
6714ips_remove_device(struct pci_dev *pci_dev)
6715{
6716 struct Scsi_Host *sh = pci_get_drvdata(pci_dev);
6717
6718 pci_set_drvdata(pci_dev, NULL);
6719
6720 ips_release(sh);
6721
6722 pci_release_regions(pci_dev);
6723 pci_disable_device(pci_dev);
6724}
6725
6726
6727
6728
6729
6730
6731
6732
6733static int __init
6734ips_module_init(void)
6735{
6736#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__)
6737 printk(KERN_ERR "ips: This driver has only been tested on the x86/ia64/x86_64 platforms\n");
6738 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
6739#endif
6740
6741 if (pci_register_driver(&ips_pci_driver) < 0)
6742 return -ENODEV;
6743 ips_driver_template.module = THIS_MODULE;
6744 ips_order_controllers();
6745 if (!ips_detect(&ips_driver_template)) {
6746 pci_unregister_driver(&ips_pci_driver);
6747 return -ENODEV;
6748 }
6749 register_reboot_notifier(&ips_notifier);
6750 return 0;
6751}
6752
6753
6754
6755
6756
6757
6758
6759
6760static void __exit
6761ips_module_exit(void)
6762{
6763 pci_unregister_driver(&ips_pci_driver);
6764 unregister_reboot_notifier(&ips_notifier);
6765}
6766
6767module_init(ips_module_init);
6768module_exit(ips_module_exit);
6769
6770
6771
6772
6773
6774
6775
6776
6777
6778
6779static int
6780ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
6781{
6782 int index = -1;
6783 int rc;
6784
6785 METHOD_TRACE("ips_insert_device", 1);
6786 rc = pci_enable_device(pci_dev);
6787 if (rc)
6788 return rc;
6789
6790 rc = pci_request_regions(pci_dev, "ips");
6791 if (rc)
6792 goto err_out;
6793
6794 rc = ips_init_phase1(pci_dev, &index);
6795 if (rc == SUCCESS)
6796 rc = ips_init_phase2(index);
6797
6798 if (ips_hotplug)
6799 if (ips_register_scsi(index)) {
6800 ips_free(ips_ha[index]);
6801 rc = -1;
6802 }
6803
6804 if (rc == SUCCESS)
6805 ips_num_controllers++;
6806
6807 ips_next_controller = ips_num_controllers;
6808
6809 if (rc < 0) {
6810 rc = -ENODEV;
6811 goto err_out_regions;
6812 }
6813
6814 pci_set_drvdata(pci_dev, ips_sh[index]);
6815 return 0;
6816
6817err_out_regions:
6818 pci_release_regions(pci_dev);
6819err_out:
6820 pci_disable_device(pci_dev);
6821 return rc;
6822}
6823
6824
6825
6826
6827
6828
6829
6830
6831
6832
6833static int
6834ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
6835{
6836 ips_ha_t *ha;
6837 uint32_t io_addr;
6838 uint32_t mem_addr;
6839 uint32_t io_len;
6840 uint32_t mem_len;
6841 int j;
6842 int index;
6843 dma_addr_t dma_address;
6844 char __iomem *ioremap_ptr;
6845 char __iomem *mem_ptr;
6846 uint32_t IsDead;
6847
6848 METHOD_TRACE("ips_init_phase1", 1);
6849 index = IPS_MAX_ADAPTERS;
6850 for (j = 0; j < IPS_MAX_ADAPTERS; j++) {
6851 if (ips_ha[j] == NULL) {
6852 index = j;
6853 break;
6854 }
6855 }
6856
6857 if (index >= IPS_MAX_ADAPTERS)
6858 return -1;
6859
6860
6861 mem_addr = 0;
6862 io_addr = 0;
6863 mem_len = 0;
6864 io_len = 0;
6865
6866 for (j = 0; j < 2; j++) {
6867 if (!pci_resource_start(pci_dev, j))
6868 break;
6869
6870 if (pci_resource_flags(pci_dev, j) & IORESOURCE_IO) {
6871 io_addr = pci_resource_start(pci_dev, j);
6872 io_len = pci_resource_len(pci_dev, j);
6873 } else {
6874 mem_addr = pci_resource_start(pci_dev, j);
6875 mem_len = pci_resource_len(pci_dev, j);
6876 }
6877 }
6878
6879
6880 if (mem_addr) {
6881 uint32_t base;
6882 uint32_t offs;
6883
6884 base = mem_addr & PAGE_MASK;
6885 offs = mem_addr - base;
6886 ioremap_ptr = ioremap(base, PAGE_SIZE);
6887 if (!ioremap_ptr)
6888 return -1;
6889 mem_ptr = ioremap_ptr + offs;
6890 } else {
6891 ioremap_ptr = NULL;
6892 mem_ptr = NULL;
6893 }
6894
6895
6896 ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL);
6897 if (ha == NULL) {
6898 IPS_PRINTK(KERN_WARNING, pci_dev,
6899 "Unable to allocate temporary ha struct\n");
6900 return -1;
6901 }
6902
6903 ips_sh[index] = NULL;
6904 ips_ha[index] = ha;
6905 ha->active = 1;
6906
6907
6908 ha->io_addr = io_addr;
6909 ha->io_len = io_len;
6910 ha->mem_addr = mem_addr;
6911 ha->mem_len = mem_len;
6912 ha->mem_ptr = mem_ptr;
6913 ha->ioremap_ptr = ioremap_ptr;
6914 ha->host_num = (uint32_t) index;
6915 ha->slot_num = PCI_SLOT(pci_dev->devfn);
6916 ha->pcidev = pci_dev;
6917
6918
6919
6920
6921
6922
6923
6924 if (sizeof(dma_addr_t) > 4 && IPS_HAS_ENH_SGLIST(ha) &&
6925 !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) {
6926 (ha)->flags |= IPS_HA_ENH_SG;
6927 } else {
6928 if (dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(32)) != 0) {
6929 printk(KERN_WARNING "Unable to set DMA Mask\n");
6930 return ips_abort_init(ha, index);
6931 }
6932 }
6933 if(ips_cd_boot && !ips_FlashData){
6934 ips_FlashData = dma_alloc_coherent(&pci_dev->dev,
6935 PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL);
6936 }
6937
6938 ha->enq = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ENQ),
6939 &ha->enq_busaddr, GFP_KERNEL);
6940 if (!ha->enq) {
6941 IPS_PRINTK(KERN_WARNING, pci_dev,
6942 "Unable to allocate host inquiry structure\n");
6943 return ips_abort_init(ha, index);
6944 }
6945
6946 ha->adapt = dma_alloc_coherent(&pci_dev->dev,
6947 sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD),
6948 &dma_address, GFP_KERNEL);
6949 if (!ha->adapt) {
6950 IPS_PRINTK(KERN_WARNING, pci_dev,
6951 "Unable to allocate host adapt & dummy structures\n");
6952 return ips_abort_init(ha, index);
6953 }
6954 ha->adapt->hw_status_start = dma_address;
6955 ha->dummy = (void *) (ha->adapt + 1);
6956
6957
6958
6959 ha->logical_drive_info = dma_alloc_coherent(&pci_dev->dev,
6960 sizeof (IPS_LD_INFO), &dma_address, GFP_KERNEL);
6961 if (!ha->logical_drive_info) {
6962 IPS_PRINTK(KERN_WARNING, pci_dev,
6963 "Unable to allocate logical drive info structure\n");
6964 return ips_abort_init(ha, index);
6965 }
6966 ha->logical_drive_info_dma_addr = dma_address;
6967
6968
6969 ha->conf = kmalloc(sizeof (IPS_CONF), GFP_KERNEL);
6970
6971 if (!ha->conf) {
6972 IPS_PRINTK(KERN_WARNING, pci_dev,
6973 "Unable to allocate host conf structure\n");
6974 return ips_abort_init(ha, index);
6975 }
6976
6977 ha->nvram = kmalloc(sizeof (IPS_NVRAM_P5), GFP_KERNEL);
6978
6979 if (!ha->nvram) {
6980 IPS_PRINTK(KERN_WARNING, pci_dev,
6981 "Unable to allocate host NVRAM structure\n");
6982 return ips_abort_init(ha, index);
6983 }
6984
6985 ha->subsys = kmalloc(sizeof (IPS_SUBSYS), GFP_KERNEL);
6986
6987 if (!ha->subsys) {
6988 IPS_PRINTK(KERN_WARNING, pci_dev,
6989 "Unable to allocate host subsystem structure\n");
6990 return ips_abort_init(ha, index);
6991 }
6992
6993
6994
6995 if (ips_ioctlsize < PAGE_SIZE)
6996 ips_ioctlsize = PAGE_SIZE;
6997
6998 ha->ioctl_data = dma_alloc_coherent(&pci_dev->dev, ips_ioctlsize,
6999 &ha->ioctl_busaddr, GFP_KERNEL);
7000 ha->ioctl_len = ips_ioctlsize;
7001 if (!ha->ioctl_data) {
7002 IPS_PRINTK(KERN_WARNING, pci_dev,
7003 "Unable to allocate IOCTL data\n");
7004 return ips_abort_init(ha, index);
7005 }
7006
7007
7008
7009
7010 ips_setup_funclist(ha);
7011
7012 if ((IPS_IS_MORPHEUS(ha)) || (IPS_IS_MARCO(ha))) {
7013
7014 IsDead = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
7015 if (IsDead == 0xDEADBEEF) {
7016 ips_reset_morpheus(ha);
7017 }
7018 }
7019
7020
7021
7022
7023
7024 if (!(*ha->func.isinit) (ha)) {
7025 if (!(*ha->func.init) (ha)) {
7026
7027
7028
7029 IPS_PRINTK(KERN_WARNING, pci_dev,
7030 "Unable to initialize controller\n");
7031 return ips_abort_init(ha, index);
7032 }
7033 }
7034
7035 *indexPtr = index;
7036 return SUCCESS;
7037}
7038
7039
7040
7041
7042
7043
7044
7045
7046
7047
7048static int
7049ips_init_phase2(int index)
7050{
7051 ips_ha_t *ha;
7052
7053 ha = ips_ha[index];
7054
7055 METHOD_TRACE("ips_init_phase2", 1);
7056 if (!ha->active) {
7057 ips_ha[index] = NULL;
7058 return -1;
7059 }
7060
7061
7062 if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
7063 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7064 "Unable to install interrupt handler\n");
7065 return ips_abort_init(ha, index);
7066 }
7067
7068
7069
7070
7071 ha->max_cmds = 1;
7072 if (!ips_allocatescbs(ha)) {
7073 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7074 "Unable to allocate a CCB\n");
7075 free_irq(ha->pcidev->irq, ha);
7076 return ips_abort_init(ha, index);
7077 }
7078
7079 if (!ips_hainit(ha)) {
7080 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7081 "Unable to initialize controller\n");
7082 free_irq(ha->pcidev->irq, ha);
7083 return ips_abort_init(ha, index);
7084 }
7085
7086 ips_deallocatescbs(ha, 1);
7087
7088
7089 if (!ips_allocatescbs(ha)) {
7090 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7091 "Unable to allocate CCBs\n");
7092 free_irq(ha->pcidev->irq, ha);
7093 return ips_abort_init(ha, index);
7094 }
7095
7096 return SUCCESS;
7097}
7098
7099MODULE_LICENSE("GPL");
7100MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
7101MODULE_VERSION(IPS_VER_STRING);
7102
7103
7104
7105
7106
7107
7108
7109
7110
7111
7112
7113
7114
7115
7116
7117
7118
7119
7120
7121
7122